]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Using registers name
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
a2fbb9ea 91module_param(debug, int, 0);
19680c48 92MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 93MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 94MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 95
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
594
595 if (msix) {
8badd27a
EG
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
600 } else if (msi) {
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
605 } else {
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 610
8badd27a
EG
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612 val, port, addr);
615f8fd9
ET
613
614 REG_WR(bp, addr, val);
615
a2fbb9ea
ET
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617 }
618
8badd27a
EG
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
621
622 REG_WR(bp, addr, val);
34f80b04
EG
623
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
626 if (IS_E1HMF(bp)) {
8badd27a 627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 628 if (bp->port.pmf)
4acac6a5
EG
629 /* enable nig and gpio3 attention */
630 val |= 0x1100;
34f80b04
EG
631 } else
632 val = 0xffff;
633
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 }
a2fbb9ea
ET
637}
638
615f8fd9 639static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 640{
34f80b04 641 int port = BP_PORT(bp);
a2fbb9ea
ET
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
644
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651 val, port, addr);
652
8badd27a
EG
653 /* flush all outstanding writes */
654 mmiowb();
655
a2fbb9ea
ET
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659}
660
f8ef6e44 661static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 662{
a2fbb9ea 663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 664 int i, offset;
a2fbb9ea 665
34f80b04 666 /* disable interrupt handling */
a2fbb9ea 667 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
668 if (disable_hw)
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
a2fbb9ea
ET
671
672 /* make sure all ISRs are done */
673 if (msix) {
8badd27a
EG
674 synchronize_irq(bp->msix_table[0].vector);
675 offset = 1;
a2fbb9ea 676 for_each_queue(bp, i)
8badd27a 677 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
678 } else
679 synchronize_irq(bp->pdev->irq);
680
681 /* make sure sp_task is not running */
1cf167f2
EG
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
684}
685
34f80b04 686/* fast path */
a2fbb9ea
ET
687
688/*
34f80b04 689 * General service functions
a2fbb9ea
ET
690 */
691
34f80b04 692static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
693 u8 storm, u16 index, u8 op, u8 update)
694{
5c862848
EG
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
697 struct igu_ack_register igu_ack;
698
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
34f80b04 701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
5c862848
EG
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
709}
710
711static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712{
713 struct host_status_block *fpsb = fp->status_blk;
714 u16 rc = 0;
715
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719 rc |= 1;
720 }
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723 rc |= 2;
724 }
725 return rc;
726}
727
a2fbb9ea
ET
728static u16 bnx2x_ack_int(struct bnx2x *bp)
729{
5c862848
EG
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 733
5c862848
EG
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 result, hc_addr);
a2fbb9ea 736
a2fbb9ea
ET
737 return result;
738}
739
740
741/*
742 * fast path service functions
743 */
744
237907c1
EG
745static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746{
747 u16 tx_cons_sb;
748
749 /* Tell compiler that status block fields can change */
750 barrier();
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
752 return (fp->tx_pkt_cons != tx_cons_sb);
753}
754
755static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756{
757 /* Tell compiler that consumer and producer can change */
758 barrier();
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
237907c1
EG
761}
762
a2fbb9ea
ET
763/* free skb in the packet ring at pos idx
764 * return idx of last bd freed
765 */
766static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767 u16 idx)
768{
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
34f80b04 772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
773 int nbd;
774
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
776 idx, tx_buf, skb);
777
778 /* unmap first bd */
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 785 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
786#ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 788 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
789 bnx2x_panic();
790 }
791#endif
792
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
795 if (nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 }
809 }
810
811 /* now free frags */
812 while (nbd > 0) {
813
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818 if (--nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 }
821
822 /* release skb */
53e5e96e 823 WARN_ON(!skb);
a2fbb9ea
ET
824 dev_kfree_skb(skb);
825 tx_buf->first_bd = 0;
826 tx_buf->skb = NULL;
827
34f80b04 828 return new_cons;
a2fbb9ea
ET
829}
830
34f80b04 831static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 832{
34f80b04
EG
833 s16 used;
834 u16 prod;
835 u16 cons;
a2fbb9ea 836
34f80b04 837 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
840
34f80b04
EG
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 844
34f80b04 845#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
846 WARN_ON(used < 0);
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 849#endif
a2fbb9ea 850
34f80b04 851 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
852}
853
854static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855{
856 struct bnx2x *bp = fp->bp;
555f6c78 857 struct netdev_queue *txq;
a2fbb9ea
ET
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859 int done = 0;
860
861#ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
863 return;
864#endif
865
555f6c78 866 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
869
870 while (sw_cons != hw_cons) {
871 u16 pkt_cons;
872
873 pkt_cons = TX_BD(sw_cons);
874
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
34f80b04 877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
878 hw_cons, sw_cons, pkt_cons);
879
34f80b04 880/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
881 rmb();
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883 }
884*/
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 sw_cons++;
887 done++;
888
889 if (done == work)
890 break;
891 }
892
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
895
555f6c78
EG
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
900 */
901 smp_mb();
902
903 /* TBD need a thresh? */
555f6c78 904 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 905
555f6c78 906 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 907
555f6c78 908 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 909 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 911 netif_tx_wake_queue(txq);
a2fbb9ea 912
555f6c78 913 __netif_tx_unlock(txq);
a2fbb9ea
ET
914 }
915}
916
3196a88a 917
a2fbb9ea
ET
918static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
920{
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
34f80b04 925 DP(BNX2X_MSG_SP,
a2fbb9ea 926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
929
930 bp->spq_left++;
931
34f80b04 932 if (FP_IDX(fp)) {
a2fbb9ea
ET
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937 cid);
938 fp->state = BNX2X_FP_STATE_OPEN;
939 break;
940
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943 cid);
944 fp->state = BNX2X_FP_STATE_HALTED;
945 break;
946
947 default:
34f80b04
EG
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
950 break;
a2fbb9ea 951 }
34f80b04 952 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
953 return;
954 }
c14423fe 955
a2fbb9ea
ET
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
a2fbb9ea 968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
971 break;
972
3196a88a 973
a2fbb9ea 974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 977 bp->set_mac_pending = 0;
a2fbb9ea
ET
978 break;
979
49d66772 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
982 break;
983
a2fbb9ea 984 default:
34f80b04 985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 986 command, bp->state);
34f80b04 987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990}
991
7a9b2557
VZ
992static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999 /* Skip "next page" elements */
1000 if (!page)
1001 return;
1002
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007 sw_buf->page = NULL;
1008 sge->addr_hi = 0;
1009 sge->addr_lo = 0;
1010}
1011
1012static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1014{
1015 int i;
1016
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1019}
1020
1021static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027 dma_addr_t mapping;
1028
1029 if (unlikely(page == NULL))
1030 return -ENOMEM;
1031
4f40f2cb 1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1033 PCI_DMA_FROMDEVICE);
8d8bb39b 1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036 return -ENOMEM;
1037 }
1038
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045 return 0;
1046}
1047
a2fbb9ea
ET
1048static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1050{
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054 dma_addr_t mapping;
1055
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1058 return -ENOMEM;
1059
437cf2f1 1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1061 PCI_DMA_FROMDEVICE);
8d8bb39b 1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1063 dev_kfree_skb(skb);
1064 return -ENOMEM;
1065 }
1066
1067 rx_buf->skb = skb;
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073 return 0;
1074}
1075
1076/* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1080 */
1081static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1083{
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1092 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1093
1094 prod_rx_buf->skb = cons_rx_buf->skb;
1095 pci_unmap_addr_set(prod_rx_buf, mapping,
1096 pci_unmap_addr(cons_rx_buf, mapping));
1097 *prod_bd = *cons_bd;
1098}
1099
7a9b2557
VZ
1100static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1101 u16 idx)
1102{
1103 u16 last_max = fp->last_max_sge;
1104
1105 if (SUB_S16(idx, last_max) > 0)
1106 fp->last_max_sge = idx;
1107}
1108
1109static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1110{
1111 int i, j;
1112
1113 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1114 int idx = RX_SGE_CNT * i - 1;
1115
1116 for (j = 0; j < 2; j++) {
1117 SGE_MASK_CLEAR_BIT(fp, idx);
1118 idx--;
1119 }
1120 }
1121}
1122
1123static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1124 struct eth_fast_path_rx_cqe *fp_cqe)
1125{
1126 struct bnx2x *bp = fp->bp;
4f40f2cb 1127 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1128 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1129 SGE_PAGE_SHIFT;
7a9b2557
VZ
1130 u16 last_max, last_elem, first_elem;
1131 u16 delta = 0;
1132 u16 i;
1133
1134 if (!sge_len)
1135 return;
1136
1137 /* First mark all used pages */
1138 for (i = 0; i < sge_len; i++)
1139 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1140
1141 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1142 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1143
1144 /* Here we assume that the last SGE index is the biggest */
1145 prefetch((void *)(fp->sge_mask));
1146 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1147
1148 last_max = RX_SGE(fp->last_max_sge);
1149 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1150 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1151
1152 /* If ring is not full */
1153 if (last_elem + 1 != first_elem)
1154 last_elem++;
1155
1156 /* Now update the prod */
1157 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1158 if (likely(fp->sge_mask[i]))
1159 break;
1160
1161 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1162 delta += RX_SGE_MASK_ELEM_SZ;
1163 }
1164
1165 if (delta > 0) {
1166 fp->rx_sge_prod += delta;
1167 /* clear page-end entries */
1168 bnx2x_clear_sge_mask_next_elems(fp);
1169 }
1170
1171 DP(NETIF_MSG_RX_STATUS,
1172 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1173 fp->last_max_sge, fp->rx_sge_prod);
1174}
1175
1176static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1177{
1178 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1179 memset(fp->sge_mask, 0xff,
1180 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1181
33471629
EG
1182 /* Clear the two last indices in the page to 1:
1183 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1184 hence will never be indicated and should be removed from
1185 the calculations. */
1186 bnx2x_clear_sge_mask_next_elems(fp);
1187}
1188
1189static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1190 struct sk_buff *skb, u16 cons, u16 prod)
1191{
1192 struct bnx2x *bp = fp->bp;
1193 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1194 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1195 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1196 dma_addr_t mapping;
1197
1198 /* move empty skb from pool to prod and map it */
1199 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1200 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1201 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1202 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1203
1204 /* move partial skb from cons to pool (don't unmap yet) */
1205 fp->tpa_pool[queue] = *cons_rx_buf;
1206
1207 /* mark bin state as start - print error if current state != stop */
1208 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1209 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1210
1211 fp->tpa_state[queue] = BNX2X_TPA_START;
1212
1213 /* point prod_bd to new skb */
1214 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1215 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1216
1217#ifdef BNX2X_STOP_ON_ERROR
1218 fp->tpa_queue_used |= (1 << queue);
1219#ifdef __powerpc64__
1220 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1221#else
1222 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1223#endif
1224 fp->tpa_queue_used);
1225#endif
1226}
1227
1228static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1229 struct sk_buff *skb,
1230 struct eth_fast_path_rx_cqe *fp_cqe,
1231 u16 cqe_idx)
1232{
1233 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1234 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1235 u32 i, frag_len, frag_size, pages;
1236 int err;
1237 int j;
1238
1239 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1240 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1241
1242 /* This is needed in order to enable forwarding support */
1243 if (frag_size)
4f40f2cb 1244 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1245 max(frag_size, (u32)len_on_bd));
1246
1247#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1248 if (pages >
1249 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1250 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1251 pages, cqe_idx);
1252 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1253 fp_cqe->pkt_len, len_on_bd);
1254 bnx2x_panic();
1255 return -EINVAL;
1256 }
1257#endif
1258
1259 /* Run through the SGL and compose the fragmented skb */
1260 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1261 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1262
1263 /* FW gives the indices of the SGE as if the ring is an array
1264 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1265 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1266 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1267 old_rx_pg = *rx_pg;
1268
1269 /* If we fail to allocate a substitute page, we simply stop
1270 where we are and drop the whole packet */
1271 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1272 if (unlikely(err)) {
de832a55 1273 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1274 return err;
1275 }
1276
1277 /* Unmap the page as we r going to pass it to the stack */
1278 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1279 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1280
1281 /* Add one frag and update the appropriate fields in the skb */
1282 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1283
1284 skb->data_len += frag_len;
1285 skb->truesize += frag_len;
1286 skb->len += frag_len;
1287
1288 frag_size -= frag_len;
1289 }
1290
1291 return 0;
1292}
1293
1294static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1295 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1296 u16 cqe_idx)
1297{
1298 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1299 struct sk_buff *skb = rx_buf->skb;
1300 /* alloc new skb */
1301 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1302
1303 /* Unmap skb in the pool anyway, as we are going to change
1304 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1305 fails. */
1306 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1307 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1308
7a9b2557 1309 if (likely(new_skb)) {
66e855f3
YG
1310 /* fix ip xsum and give it to the stack */
1311 /* (no need to map the new skb) */
0c6671b0
EG
1312#ifdef BCM_VLAN
1313 int is_vlan_cqe =
1314 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1315 PARSING_FLAGS_VLAN);
1316 int is_not_hwaccel_vlan_cqe =
1317 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1318#endif
7a9b2557
VZ
1319
1320 prefetch(skb);
1321 prefetch(((char *)(skb)) + 128);
1322
7a9b2557
VZ
1323#ifdef BNX2X_STOP_ON_ERROR
1324 if (pad + len > bp->rx_buf_size) {
1325 BNX2X_ERR("skb_put is about to fail... "
1326 "pad %d len %d rx_buf_size %d\n",
1327 pad, len, bp->rx_buf_size);
1328 bnx2x_panic();
1329 return;
1330 }
1331#endif
1332
1333 skb_reserve(skb, pad);
1334 skb_put(skb, len);
1335
1336 skb->protocol = eth_type_trans(skb, bp->dev);
1337 skb->ip_summed = CHECKSUM_UNNECESSARY;
1338
1339 {
1340 struct iphdr *iph;
1341
1342 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1343#ifdef BCM_VLAN
1344 /* If there is no Rx VLAN offloading -
1345 take VLAN tag into an account */
1346 if (unlikely(is_not_hwaccel_vlan_cqe))
1347 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1348#endif
7a9b2557
VZ
1349 iph->check = 0;
1350 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1351 }
1352
1353 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1354 &cqe->fast_path_cqe, cqe_idx)) {
1355#ifdef BCM_VLAN
0c6671b0
EG
1356 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1357 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1358 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1359 le16_to_cpu(cqe->fast_path_cqe.
1360 vlan_tag));
1361 else
1362#endif
1363 netif_receive_skb(skb);
1364 } else {
1365 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1366 " - dropping packet!\n");
1367 dev_kfree_skb(skb);
1368 }
1369
7a9b2557
VZ
1370
1371 /* put new skb in bin */
1372 fp->tpa_pool[queue].skb = new_skb;
1373
1374 } else {
66e855f3 1375 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Failed to allocate new skb - dropping packet!\n");
de832a55 1378 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1379 }
1380
1381 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1382}
1383
1384static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1385 struct bnx2x_fastpath *fp,
1386 u16 bd_prod, u16 rx_comp_prod,
1387 u16 rx_sge_prod)
1388{
8d9c5f34 1389 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1390 int i;
1391
1392 /* Update producers */
1393 rx_prods.bd_prod = bd_prod;
1394 rx_prods.cqe_prod = rx_comp_prod;
1395 rx_prods.sge_prod = rx_sge_prod;
1396
58f4c4cf
EG
1397 /*
1398 * Make sure that the BD and SGE data is updated before updating the
1399 * producers since FW might read the BD/SGE right after the producer
1400 * is updated.
1401 * This is only applicable for weak-ordered memory model archs such
1402 * as IA-64. The following barrier is also mandatory since FW will
1403 * assumes BDs must have buffers.
1404 */
1405 wmb();
1406
8d9c5f34
EG
1407 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1408 REG_WR(bp, BAR_USTRORM_INTMEM +
1409 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1410 ((u32 *)&rx_prods)[i]);
1411
58f4c4cf
EG
1412 mmiowb(); /* keep prod updates ordered */
1413
7a9b2557 1414 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1415 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1416 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1417}
1418
a2fbb9ea
ET
1419static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1420{
1421 struct bnx2x *bp = fp->bp;
34f80b04 1422 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1423 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1424 int rx_pkt = 0;
1425
1426#ifdef BNX2X_STOP_ON_ERROR
1427 if (unlikely(bp->panic))
1428 return 0;
1429#endif
1430
34f80b04
EG
1431 /* CQ "next element" is of the size of the regular element,
1432 that's why it's ok here */
a2fbb9ea
ET
1433 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1434 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1435 hw_comp_cons++;
1436
1437 bd_cons = fp->rx_bd_cons;
1438 bd_prod = fp->rx_bd_prod;
34f80b04 1439 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1440 sw_comp_cons = fp->rx_comp_cons;
1441 sw_comp_prod = fp->rx_comp_prod;
1442
1443 /* Memory barrier necessary as speculative reads of the rx
1444 * buffer can be ahead of the index in the status block
1445 */
1446 rmb();
1447
1448 DP(NETIF_MSG_RX_STATUS,
1449 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1450 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1451
1452 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1453 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1454 struct sk_buff *skb;
1455 union eth_rx_cqe *cqe;
34f80b04
EG
1456 u8 cqe_fp_flags;
1457 u16 len, pad;
a2fbb9ea
ET
1458
1459 comp_ring_cons = RCQ_BD(sw_comp_cons);
1460 bd_prod = RX_BD(bd_prod);
1461 bd_cons = RX_BD(bd_cons);
1462
1463 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1464 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1465
a2fbb9ea 1466 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1467 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1468 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1469 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1470 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1471 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1472
1473 /* is this a slowpath msg? */
34f80b04 1474 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1475 bnx2x_sp_event(fp, cqe);
1476 goto next_cqe;
1477
1478 /* this is an rx packet */
1479 } else {
1480 rx_buf = &fp->rx_buf_ring[bd_cons];
1481 skb = rx_buf->skb;
a2fbb9ea
ET
1482 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1483 pad = cqe->fast_path_cqe.placement_offset;
1484
7a9b2557
VZ
1485 /* If CQE is marked both TPA_START and TPA_END
1486 it is a non-TPA CQE */
1487 if ((!fp->disable_tpa) &&
1488 (TPA_TYPE(cqe_fp_flags) !=
1489 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1490 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1491
1492 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1493 DP(NETIF_MSG_RX_STATUS,
1494 "calling tpa_start on queue %d\n",
1495 queue);
1496
1497 bnx2x_tpa_start(fp, queue, skb,
1498 bd_cons, bd_prod);
1499 goto next_rx;
1500 }
1501
1502 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1503 DP(NETIF_MSG_RX_STATUS,
1504 "calling tpa_stop on queue %d\n",
1505 queue);
1506
1507 if (!BNX2X_RX_SUM_FIX(cqe))
1508 BNX2X_ERR("STOP on none TCP "
1509 "data\n");
1510
1511 /* This is a size of the linear data
1512 on this skb */
1513 len = le16_to_cpu(cqe->fast_path_cqe.
1514 len_on_bd);
1515 bnx2x_tpa_stop(bp, fp, queue, pad,
1516 len, cqe, comp_ring_cons);
1517#ifdef BNX2X_STOP_ON_ERROR
1518 if (bp->panic)
1519 return -EINVAL;
1520#endif
1521
1522 bnx2x_update_sge_prod(fp,
1523 &cqe->fast_path_cqe);
1524 goto next_cqe;
1525 }
1526 }
1527
a2fbb9ea
ET
1528 pci_dma_sync_single_for_device(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 pad + RX_COPY_THRESH,
1531 PCI_DMA_FROMDEVICE);
1532 prefetch(skb);
1533 prefetch(((char *)(skb)) + 128);
1534
1535 /* is this an error packet? */
34f80b04 1536 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1537 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1538 "ERROR flags %x rx packet %u\n",
1539 cqe_fp_flags, sw_comp_cons);
de832a55 1540 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1541 goto reuse_rx;
1542 }
1543
1544 /* Since we don't have a jumbo ring
1545 * copy small packets if mtu > 1500
1546 */
1547 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1548 (len <= RX_COPY_THRESH)) {
1549 struct sk_buff *new_skb;
1550
1551 new_skb = netdev_alloc_skb(bp->dev,
1552 len + pad);
1553 if (new_skb == NULL) {
1554 DP(NETIF_MSG_RX_ERR,
34f80b04 1555 "ERROR packet dropped "
a2fbb9ea 1556 "because of alloc failure\n");
de832a55 1557 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1558 goto reuse_rx;
1559 }
1560
1561 /* aligned copy */
1562 skb_copy_from_linear_data_offset(skb, pad,
1563 new_skb->data + pad, len);
1564 skb_reserve(new_skb, pad);
1565 skb_put(new_skb, len);
1566
1567 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1568
1569 skb = new_skb;
1570
1571 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1572 pci_unmap_single(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1574 bp->rx_buf_size,
a2fbb9ea
ET
1575 PCI_DMA_FROMDEVICE);
1576 skb_reserve(skb, pad);
1577 skb_put(skb, len);
1578
1579 } else {
1580 DP(NETIF_MSG_RX_ERR,
34f80b04 1581 "ERROR packet dropped because "
a2fbb9ea 1582 "of alloc failure\n");
de832a55 1583 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1584reuse_rx:
1585 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1586 goto next_rx;
1587 }
1588
1589 skb->protocol = eth_type_trans(skb, bp->dev);
1590
1591 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1592 if (bp->rx_csum) {
1adcd8be
EG
1593 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1594 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1595 else
de832a55 1596 fp->eth_q_stats.hw_csum_err++;
66e855f3 1597 }
a2fbb9ea
ET
1598 }
1599
748e5439 1600 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1601#ifdef BCM_VLAN
0c6671b0 1602 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1603 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1604 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1605 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1607 else
1608#endif
34f80b04 1609 netif_receive_skb(skb);
a2fbb9ea 1610
a2fbb9ea
ET
1611
1612next_rx:
1613 rx_buf->skb = NULL;
1614
1615 bd_cons = NEXT_RX_IDX(bd_cons);
1616 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1617 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1618 rx_pkt++;
a2fbb9ea
ET
1619next_cqe:
1620 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1621 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1622
34f80b04 1623 if (rx_pkt == budget)
a2fbb9ea
ET
1624 break;
1625 } /* while */
1626
1627 fp->rx_bd_cons = bd_cons;
34f80b04 1628 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1629 fp->rx_comp_cons = sw_comp_cons;
1630 fp->rx_comp_prod = sw_comp_prod;
1631
7a9b2557
VZ
1632 /* Update producers */
1633 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1634 fp->rx_sge_prod);
a2fbb9ea
ET
1635
1636 fp->rx_pkt += rx_pkt;
1637 fp->rx_calls++;
1638
1639 return rx_pkt;
1640}
1641
1642static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1643{
1644 struct bnx2x_fastpath *fp = fp_cookie;
1645 struct bnx2x *bp = fp->bp;
34f80b04 1646 int index = FP_IDX(fp);
a2fbb9ea 1647
da5a662a
VZ
1648 /* Return here if interrupt is disabled */
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1651 return IRQ_HANDLED;
1652 }
1653
34f80b04
EG
1654 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1655 index, FP_SB_ID(fp));
1656 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1657
1658#ifdef BNX2X_STOP_ON_ERROR
1659 if (unlikely(bp->panic))
1660 return IRQ_HANDLED;
1661#endif
1662
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
288379f0 1668 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1669
a2fbb9ea
ET
1670 return IRQ_HANDLED;
1671}
1672
1673static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1674{
555f6c78 1675 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1676 u16 status = bnx2x_ack_int(bp);
34f80b04 1677 u16 mask;
a2fbb9ea 1678
34f80b04 1679 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1680 if (unlikely(status == 0)) {
1681 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1682 return IRQ_NONE;
1683 }
34f80b04 1684 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1685
34f80b04 1686 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1687 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1688 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1689 return IRQ_HANDLED;
1690 }
1691
3196a88a
EG
1692#ifdef BNX2X_STOP_ON_ERROR
1693 if (unlikely(bp->panic))
1694 return IRQ_HANDLED;
1695#endif
1696
34f80b04
EG
1697 mask = 0x2 << bp->fp[0].sb_id;
1698 if (status & mask) {
a2fbb9ea
ET
1699 struct bnx2x_fastpath *fp = &bp->fp[0];
1700
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(fp->tx_cons_sb);
1703 prefetch(&fp->status_blk->c_status_block.status_block_index);
1704 prefetch(&fp->status_blk->u_status_block.status_block_index);
1705
288379f0 1706 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1707
34f80b04 1708 status &= ~mask;
a2fbb9ea
ET
1709 }
1710
a2fbb9ea 1711
34f80b04 1712 if (unlikely(status & 0x1)) {
1cf167f2 1713 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1714
1715 status &= ~0x1;
1716 if (!status)
1717 return IRQ_HANDLED;
1718 }
1719
34f80b04
EG
1720 if (status)
1721 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1722 status);
a2fbb9ea 1723
c18487ee 1724 return IRQ_HANDLED;
a2fbb9ea
ET
1725}
1726
c18487ee 1727/* end of fast path */
a2fbb9ea 1728
bb2a0f7a 1729static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1730
c18487ee
YR
1731/* Link */
1732
1733/*
1734 * General service functions
1735 */
a2fbb9ea 1736
4a37fb66 1737static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1738{
1739 u32 lock_status;
1740 u32 resource_bit = (1 << resource);
4a37fb66
YG
1741 int func = BP_FUNC(bp);
1742 u32 hw_lock_control_reg;
c18487ee 1743 int cnt;
a2fbb9ea 1744
c18487ee
YR
1745 /* Validating that the resource is within range */
1746 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1747 DP(NETIF_MSG_HW,
1748 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1749 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1750 return -EINVAL;
1751 }
a2fbb9ea 1752
4a37fb66
YG
1753 if (func <= 5) {
1754 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1755 } else {
1756 hw_lock_control_reg =
1757 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1758 }
1759
c18487ee 1760 /* Validating that the resource is not already taken */
4a37fb66 1761 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1762 if (lock_status & resource_bit) {
1763 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1764 lock_status, resource_bit);
1765 return -EEXIST;
1766 }
a2fbb9ea 1767
46230476
EG
1768 /* Try for 5 second every 5ms */
1769 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1770 /* Try to acquire the lock */
4a37fb66
YG
1771 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1772 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1773 if (lock_status & resource_bit)
1774 return 0;
a2fbb9ea 1775
c18487ee 1776 msleep(5);
a2fbb9ea 1777 }
c18487ee
YR
1778 DP(NETIF_MSG_HW, "Timeout\n");
1779 return -EAGAIN;
1780}
a2fbb9ea 1781
4a37fb66 1782static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1783{
1784 u32 lock_status;
1785 u32 resource_bit = (1 << resource);
4a37fb66
YG
1786 int func = BP_FUNC(bp);
1787 u32 hw_lock_control_reg;
a2fbb9ea 1788
c18487ee
YR
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791 DP(NETIF_MSG_HW,
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794 return -EINVAL;
1795 }
1796
4a37fb66
YG
1797 if (func <= 5) {
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799 } else {
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802 }
1803
c18487ee 1804 /* Validating that the resource is currently taken */
4a37fb66 1805 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1806 if (!(lock_status & resource_bit)) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1809 return -EFAULT;
a2fbb9ea
ET
1810 }
1811
4a37fb66 1812 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1813 return 0;
1814}
1815
1816/* HW Lock for shared dual port PHYs */
4a37fb66 1817static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1818{
34f80b04 1819 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1820
46c6a674
EG
1821 if (bp->port.need_hw_lock)
1822 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1823}
a2fbb9ea 1824
4a37fb66 1825static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1826{
46c6a674
EG
1827 if (bp->port.need_hw_lock)
1828 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1829
34f80b04 1830 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1831}
a2fbb9ea 1832
4acac6a5
EG
1833int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1834{
1835 /* The GPIO should be swapped if swap register is set and active */
1836 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1837 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1838 int gpio_shift = gpio_num +
1839 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1840 u32 gpio_mask = (1 << gpio_shift);
1841 u32 gpio_reg;
1842 int value;
1843
1844 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1845 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1846 return -EINVAL;
1847 }
1848
1849 /* read GPIO value */
1850 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1851
1852 /* get the requested pin value */
1853 if ((gpio_reg & gpio_mask) == gpio_mask)
1854 value = 1;
1855 else
1856 value = 0;
1857
1858 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1859
1860 return value;
1861}
1862
17de50b7 1863int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1864{
1865 /* The GPIO should be swapped if swap register is set and active */
1866 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1867 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1868 int gpio_shift = gpio_num +
1869 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1870 u32 gpio_mask = (1 << gpio_shift);
1871 u32 gpio_reg;
a2fbb9ea 1872
c18487ee
YR
1873 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1874 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1875 return -EINVAL;
1876 }
a2fbb9ea 1877
4a37fb66 1878 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1879 /* read GPIO and mask except the float bits */
1880 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1881
c18487ee
YR
1882 switch (mode) {
1883 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1884 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1885 gpio_num, gpio_shift);
1886 /* clear FLOAT and set CLR */
1887 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1888 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1889 break;
a2fbb9ea 1890
c18487ee
YR
1891 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1892 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1893 gpio_num, gpio_shift);
1894 /* clear FLOAT and set SET */
1895 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1896 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1897 break;
a2fbb9ea 1898
17de50b7 1899 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1900 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1901 gpio_num, gpio_shift);
1902 /* set FLOAT */
1903 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1904 break;
a2fbb9ea 1905
c18487ee
YR
1906 default:
1907 break;
a2fbb9ea
ET
1908 }
1909
c18487ee 1910 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1911 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1912
c18487ee 1913 return 0;
a2fbb9ea
ET
1914}
1915
4acac6a5
EG
1916int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1917{
1918 /* The GPIO should be swapped if swap register is set and active */
1919 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1920 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1921 int gpio_shift = gpio_num +
1922 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1923 u32 gpio_mask = (1 << gpio_shift);
1924 u32 gpio_reg;
1925
1926 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1927 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1928 return -EINVAL;
1929 }
1930
1931 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1932 /* read GPIO int */
1933 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1934
1935 switch (mode) {
1936 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1937 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1938 "output low\n", gpio_num, gpio_shift);
1939 /* clear SET and set CLR */
1940 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1941 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1942 break;
1943
1944 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1945 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1946 "output high\n", gpio_num, gpio_shift);
1947 /* clear CLR and set SET */
1948 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1949 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1950 break;
1951
1952 default:
1953 break;
1954 }
1955
1956 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1957 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1958
1959 return 0;
1960}
1961
c18487ee 1962static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1963{
c18487ee
YR
1964 u32 spio_mask = (1 << spio_num);
1965 u32 spio_reg;
a2fbb9ea 1966
c18487ee
YR
1967 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1968 (spio_num > MISC_REGISTERS_SPIO_7)) {
1969 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1970 return -EINVAL;
a2fbb9ea
ET
1971 }
1972
4a37fb66 1973 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1974 /* read SPIO and mask except the float bits */
1975 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1976
c18487ee 1977 switch (mode) {
6378c025 1978 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1979 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1980 /* clear FLOAT and set CLR */
1981 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1982 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1983 break;
a2fbb9ea 1984
6378c025 1985 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1986 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1987 /* clear FLOAT and set SET */
1988 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1989 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1990 break;
a2fbb9ea 1991
c18487ee
YR
1992 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1993 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1994 /* set FLOAT */
1995 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1996 break;
a2fbb9ea 1997
c18487ee
YR
1998 default:
1999 break;
a2fbb9ea
ET
2000 }
2001
c18487ee 2002 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2003 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2004
a2fbb9ea
ET
2005 return 0;
2006}
2007
c18487ee 2008static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2009{
ad33ea3a
EG
2010 switch (bp->link_vars.ieee_fc &
2011 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2012 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2013 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2014 ADVERTISED_Pause);
2015 break;
2016 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2017 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2018 ADVERTISED_Pause);
2019 break;
2020 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2021 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
2022 break;
2023 default:
34f80b04 2024 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2025 ADVERTISED_Pause);
2026 break;
2027 }
2028}
f1410647 2029
c18487ee
YR
2030static void bnx2x_link_report(struct bnx2x *bp)
2031{
2032 if (bp->link_vars.link_up) {
2033 if (bp->state == BNX2X_STATE_OPEN)
2034 netif_carrier_on(bp->dev);
2035 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2036
c18487ee 2037 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2038
c18487ee
YR
2039 if (bp->link_vars.duplex == DUPLEX_FULL)
2040 printk("full duplex");
2041 else
2042 printk("half duplex");
f1410647 2043
c0700f90
DM
2044 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2045 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2046 printk(", receive ");
c0700f90 2047 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2048 printk("& transmit ");
2049 } else {
2050 printk(", transmit ");
2051 }
2052 printk("flow control ON");
2053 }
2054 printk("\n");
f1410647 2055
c18487ee
YR
2056 } else { /* link_down */
2057 netif_carrier_off(bp->dev);
2058 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2059 }
c18487ee
YR
2060}
2061
2062static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
2063{
19680c48
EG
2064 if (!BP_NOMCP(bp)) {
2065 u8 rc;
a2fbb9ea 2066
19680c48 2067 /* Initialize link parameters structure variables */
8c99e7b0
YR
2068 /* It is recommended to turn off RX FC for jumbo frames
2069 for better performance */
2070 if (IS_E1HMF(bp))
c0700f90 2071 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2072 else if (bp->dev->mtu > 5000)
c0700f90 2073 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2074 else
c0700f90 2075 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2076
4a37fb66 2077 bnx2x_acquire_phy_lock(bp);
19680c48 2078 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2079 bnx2x_release_phy_lock(bp);
a2fbb9ea 2080
3c96c68b
EG
2081 bnx2x_calc_fc_adv(bp);
2082
19680c48
EG
2083 if (bp->link_vars.link_up)
2084 bnx2x_link_report(bp);
a2fbb9ea 2085
34f80b04 2086
19680c48
EG
2087 return rc;
2088 }
2089 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2090 return -EINVAL;
a2fbb9ea
ET
2091}
2092
c18487ee 2093static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2094{
19680c48 2095 if (!BP_NOMCP(bp)) {
4a37fb66 2096 bnx2x_acquire_phy_lock(bp);
19680c48 2097 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2098 bnx2x_release_phy_lock(bp);
a2fbb9ea 2099
19680c48
EG
2100 bnx2x_calc_fc_adv(bp);
2101 } else
2102 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2103}
a2fbb9ea 2104
c18487ee
YR
2105static void bnx2x__link_reset(struct bnx2x *bp)
2106{
19680c48 2107 if (!BP_NOMCP(bp)) {
4a37fb66 2108 bnx2x_acquire_phy_lock(bp);
589abe3a 2109 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2110 bnx2x_release_phy_lock(bp);
19680c48
EG
2111 } else
2112 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2113}
a2fbb9ea 2114
c18487ee
YR
2115static u8 bnx2x_link_test(struct bnx2x *bp)
2116{
2117 u8 rc;
a2fbb9ea 2118
4a37fb66 2119 bnx2x_acquire_phy_lock(bp);
c18487ee 2120 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2121 bnx2x_release_phy_lock(bp);
a2fbb9ea 2122
c18487ee
YR
2123 return rc;
2124}
a2fbb9ea 2125
8a1c38d1 2126static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2127{
8a1c38d1
EG
2128 u32 r_param = bp->link_vars.line_speed / 8;
2129 u32 fair_periodic_timeout_usec;
2130 u32 t_fair;
34f80b04 2131
8a1c38d1
EG
2132 memset(&(bp->cmng.rs_vars), 0,
2133 sizeof(struct rate_shaping_vars_per_port));
2134 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2135
8a1c38d1
EG
2136 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2137 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2138
8a1c38d1
EG
2139 /* this is the threshold below which no timer arming will occur
2140 1.25 coefficient is for the threshold to be a little bigger
2141 than the real time, to compensate for timer in-accuracy */
2142 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2143 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2144
8a1c38d1
EG
2145 /* resolution of fairness timer */
2146 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2147 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2148 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2149
8a1c38d1
EG
2150 /* this is the threshold below which we won't arm the timer anymore */
2151 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2152
8a1c38d1
EG
2153 /* we multiply by 1e3/8 to get bytes/msec.
2154 We don't want the credits to pass a credit
2155 of the t_fair*FAIR_MEM (algorithm resolution) */
2156 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2157 /* since each tick is 4 usec */
2158 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2159}
2160
8a1c38d1 2161static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2162{
2163 struct rate_shaping_vars_per_vn m_rs_vn;
2164 struct fairness_vars_per_vn m_fair_vn;
2165 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2166 u16 vn_min_rate, vn_max_rate;
2167 int i;
2168
2169 /* If function is hidden - set min and max to zeroes */
2170 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2171 vn_min_rate = 0;
2172 vn_max_rate = 0;
2173
2174 } else {
2175 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2176 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2177 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2178 if current min rate is zero - set it to 1.
33471629 2179 This is a requirement of the algorithm. */
8a1c38d1 2180 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2181 vn_min_rate = DEF_MIN_RATE;
2182 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2183 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2184 }
2185
8a1c38d1
EG
2186 DP(NETIF_MSG_IFUP,
2187 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2188 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2189
2190 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2191 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2192
2193 /* global vn counter - maximal Mbps for this vn */
2194 m_rs_vn.vn_counter.rate = vn_max_rate;
2195
2196 /* quota - number of bytes transmitted in this period */
2197 m_rs_vn.vn_counter.quota =
2198 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2199
8a1c38d1 2200 if (bp->vn_weight_sum) {
34f80b04
EG
2201 /* credit for each period of the fairness algorithm:
2202 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2203 vn_weight_sum should not be larger than 10000, thus
2204 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2205 than zero */
34f80b04 2206 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2207 max((u32)(vn_min_rate * (T_FAIR_COEF /
2208 (8 * bp->vn_weight_sum))),
2209 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2210 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2211 m_fair_vn.vn_credit_delta);
2212 }
2213
34f80b04
EG
2214 /* Store it to internal memory */
2215 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2216 REG_WR(bp, BAR_XSTRORM_INTMEM +
2217 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2218 ((u32 *)(&m_rs_vn))[i]);
2219
2220 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2221 REG_WR(bp, BAR_XSTRORM_INTMEM +
2222 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2223 ((u32 *)(&m_fair_vn))[i]);
2224}
2225
8a1c38d1 2226
c18487ee
YR
2227/* This function is called upon link interrupt */
2228static void bnx2x_link_attn(struct bnx2x *bp)
2229{
bb2a0f7a
YG
2230 /* Make sure that we are synced with the current statistics */
2231 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2232
c18487ee 2233 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2234
bb2a0f7a
YG
2235 if (bp->link_vars.link_up) {
2236
1c06328c
EG
2237 /* dropless flow control */
2238 if (CHIP_IS_E1H(bp)) {
2239 int port = BP_PORT(bp);
2240 u32 pause_enabled = 0;
2241
2242 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2243 pause_enabled = 1;
2244
2245 REG_WR(bp, BAR_USTRORM_INTMEM +
2246 USTORM_PAUSE_ENABLED_OFFSET(port),
2247 pause_enabled);
2248 }
2249
bb2a0f7a
YG
2250 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2251 struct host_port_stats *pstats;
2252
2253 pstats = bnx2x_sp(bp, port_stats);
2254 /* reset old bmac stats */
2255 memset(&(pstats->mac_stx[0]), 0,
2256 sizeof(struct mac_stx));
2257 }
2258 if ((bp->state == BNX2X_STATE_OPEN) ||
2259 (bp->state == BNX2X_STATE_DISABLED))
2260 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2261 }
2262
c18487ee
YR
2263 /* indicate link status */
2264 bnx2x_link_report(bp);
34f80b04
EG
2265
2266 if (IS_E1HMF(bp)) {
8a1c38d1 2267 int port = BP_PORT(bp);
34f80b04 2268 int func;
8a1c38d1 2269 int vn;
34f80b04
EG
2270
2271 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2272 if (vn == BP_E1HVN(bp))
2273 continue;
2274
8a1c38d1 2275 func = ((vn << 1) | port);
34f80b04
EG
2276
2277 /* Set the attention towards other drivers
2278 on the same port */
2279 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2280 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2281 }
34f80b04 2282
8a1c38d1
EG
2283 if (bp->link_vars.link_up) {
2284 int i;
2285
2286 /* Init rate shaping and fairness contexts */
2287 bnx2x_init_port_minmax(bp);
34f80b04 2288
34f80b04 2289 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2290 bnx2x_init_vn_minmax(bp, 2*vn + port);
2291
2292 /* Store it to internal memory */
2293 for (i = 0;
2294 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2295 REG_WR(bp, BAR_XSTRORM_INTMEM +
2296 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2297 ((u32 *)(&bp->cmng))[i]);
2298 }
34f80b04 2299 }
c18487ee 2300}
a2fbb9ea 2301
c18487ee
YR
2302static void bnx2x__link_status_update(struct bnx2x *bp)
2303{
2304 if (bp->state != BNX2X_STATE_OPEN)
2305 return;
a2fbb9ea 2306
c18487ee 2307 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2308
bb2a0f7a
YG
2309 if (bp->link_vars.link_up)
2310 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2311 else
2312 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2313
c18487ee
YR
2314 /* indicate link status */
2315 bnx2x_link_report(bp);
a2fbb9ea 2316}
a2fbb9ea 2317
34f80b04
EG
2318static void bnx2x_pmf_update(struct bnx2x *bp)
2319{
2320 int port = BP_PORT(bp);
2321 u32 val;
2322
2323 bp->port.pmf = 1;
2324 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2325
2326 /* enable nig attention */
2327 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2328 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2329 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2330
2331 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2332}
2333
c18487ee 2334/* end of Link */
a2fbb9ea
ET
2335
2336/* slow path */
2337
2338/*
2339 * General service functions
2340 */
2341
2342/* the slow path queue is odd since completions arrive on the fastpath ring */
2343static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2344 u32 data_hi, u32 data_lo, int common)
2345{
34f80b04 2346 int func = BP_FUNC(bp);
a2fbb9ea 2347
34f80b04
EG
2348 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2349 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2350 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2351 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2352 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2353
2354#ifdef BNX2X_STOP_ON_ERROR
2355 if (unlikely(bp->panic))
2356 return -EIO;
2357#endif
2358
34f80b04 2359 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2360
2361 if (!bp->spq_left) {
2362 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2363 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2364 bnx2x_panic();
2365 return -EBUSY;
2366 }
f1410647 2367
a2fbb9ea
ET
2368 /* CID needs port number to be encoded int it */
2369 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2370 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2371 HW_CID(bp, cid)));
2372 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2373 if (common)
2374 bp->spq_prod_bd->hdr.type |=
2375 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2376
2377 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2378 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2379
2380 bp->spq_left--;
2381
2382 if (bp->spq_prod_bd == bp->spq_last_bd) {
2383 bp->spq_prod_bd = bp->spq;
2384 bp->spq_prod_idx = 0;
2385 DP(NETIF_MSG_TIMER, "end of spq\n");
2386
2387 } else {
2388 bp->spq_prod_bd++;
2389 bp->spq_prod_idx++;
2390 }
2391
34f80b04 2392 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2393 bp->spq_prod_idx);
2394
34f80b04 2395 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2396 return 0;
2397}
2398
2399/* acquire split MCP access lock register */
4a37fb66 2400static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2401{
a2fbb9ea 2402 u32 i, j, val;
34f80b04 2403 int rc = 0;
a2fbb9ea
ET
2404
2405 might_sleep();
2406 i = 100;
2407 for (j = 0; j < i*10; j++) {
2408 val = (1UL << 31);
2409 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2410 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2411 if (val & (1L << 31))
2412 break;
2413
2414 msleep(5);
2415 }
a2fbb9ea 2416 if (!(val & (1L << 31))) {
19680c48 2417 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2418 rc = -EBUSY;
2419 }
2420
2421 return rc;
2422}
2423
4a37fb66
YG
2424/* release split MCP access lock register */
2425static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2426{
2427 u32 val = 0;
2428
2429 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2430}
2431
2432static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2433{
2434 struct host_def_status_block *def_sb = bp->def_status_blk;
2435 u16 rc = 0;
2436
2437 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2438 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2439 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2440 rc |= 1;
2441 }
2442 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2443 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2444 rc |= 2;
2445 }
2446 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2447 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2448 rc |= 4;
2449 }
2450 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2451 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2452 rc |= 8;
2453 }
2454 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2455 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2456 rc |= 16;
2457 }
2458 return rc;
2459}
2460
2461/*
2462 * slow path service functions
2463 */
2464
2465static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2466{
34f80b04 2467 int port = BP_PORT(bp);
5c862848
EG
2468 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2469 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2470 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2471 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2472 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2473 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2474 u32 aeu_mask;
87942b46 2475 u32 nig_mask = 0;
a2fbb9ea 2476
a2fbb9ea
ET
2477 if (bp->attn_state & asserted)
2478 BNX2X_ERR("IGU ERROR\n");
2479
3fcaf2e5
EG
2480 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2481 aeu_mask = REG_RD(bp, aeu_addr);
2482
a2fbb9ea 2483 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2484 aeu_mask, asserted);
2485 aeu_mask &= ~(asserted & 0xff);
2486 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2487
3fcaf2e5
EG
2488 REG_WR(bp, aeu_addr, aeu_mask);
2489 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2490
3fcaf2e5 2491 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2492 bp->attn_state |= asserted;
3fcaf2e5 2493 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2494
2495 if (asserted & ATTN_HARD_WIRED_MASK) {
2496 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2497
a5e9a7cf
EG
2498 bnx2x_acquire_phy_lock(bp);
2499
877e9aa4 2500 /* save nig interrupt mask */
87942b46 2501 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2502 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2503
c18487ee 2504 bnx2x_link_attn(bp);
a2fbb9ea
ET
2505
2506 /* handle unicore attn? */
2507 }
2508 if (asserted & ATTN_SW_TIMER_4_FUNC)
2509 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2510
2511 if (asserted & GPIO_2_FUNC)
2512 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2513
2514 if (asserted & GPIO_3_FUNC)
2515 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2516
2517 if (asserted & GPIO_4_FUNC)
2518 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2519
2520 if (port == 0) {
2521 if (asserted & ATTN_GENERAL_ATTN_1) {
2522 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2524 }
2525 if (asserted & ATTN_GENERAL_ATTN_2) {
2526 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2527 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2528 }
2529 if (asserted & ATTN_GENERAL_ATTN_3) {
2530 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2531 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2532 }
2533 } else {
2534 if (asserted & ATTN_GENERAL_ATTN_4) {
2535 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2536 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2537 }
2538 if (asserted & ATTN_GENERAL_ATTN_5) {
2539 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2540 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2541 }
2542 if (asserted & ATTN_GENERAL_ATTN_6) {
2543 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2544 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2545 }
2546 }
2547
2548 } /* if hardwired */
2549
5c862848
EG
2550 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2551 asserted, hc_addr);
2552 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2553
2554 /* now set back the mask */
a5e9a7cf 2555 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2556 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2557 bnx2x_release_phy_lock(bp);
2558 }
a2fbb9ea
ET
2559}
2560
877e9aa4 2561static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2562{
34f80b04 2563 int port = BP_PORT(bp);
877e9aa4
ET
2564 int reg_offset;
2565 u32 val;
2566
34f80b04
EG
2567 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2568 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2569
34f80b04 2570 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2571
2572 val = REG_RD(bp, reg_offset);
2573 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2574 REG_WR(bp, reg_offset, val);
2575
2576 BNX2X_ERR("SPIO5 hw attention\n");
2577
35b19ba5
EG
2578 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2579 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2580 /* Fan failure attention */
2581
17de50b7 2582 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2583 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2584 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2585 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2586 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2587 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2588 /* mark the failure */
c18487ee 2589 bp->link_params.ext_phy_config &=
877e9aa4 2590 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2591 bp->link_params.ext_phy_config |=
877e9aa4
ET
2592 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2593 SHMEM_WR(bp,
2594 dev_info.port_hw_config[port].
2595 external_phy_config,
c18487ee 2596 bp->link_params.ext_phy_config);
877e9aa4
ET
2597 /* log the failure */
2598 printk(KERN_ERR PFX "Fan Failure on Network"
2599 " Controller %s has caused the driver to"
2600 " shutdown the card to prevent permanent"
2601 " damage. Please contact Dell Support for"
2602 " assistance\n", bp->dev->name);
2603 break;
2604
2605 default:
2606 break;
2607 }
2608 }
34f80b04 2609
589abe3a
EG
2610 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2611 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2612 bnx2x_acquire_phy_lock(bp);
2613 bnx2x_handle_module_detect_int(&bp->link_params);
2614 bnx2x_release_phy_lock(bp);
2615 }
2616
34f80b04
EG
2617 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2618
2619 val = REG_RD(bp, reg_offset);
2620 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2621 REG_WR(bp, reg_offset, val);
2622
2623 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2624 (attn & HW_INTERRUT_ASSERT_SET_0));
2625 bnx2x_panic();
2626 }
877e9aa4
ET
2627}
2628
2629static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2630{
2631 u32 val;
2632
2633 if (attn & BNX2X_DOORQ_ASSERT) {
2634
2635 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2636 BNX2X_ERR("DB hw attention 0x%x\n", val);
2637 /* DORQ discard attention */
2638 if (val & 0x2)
2639 BNX2X_ERR("FATAL error from DORQ\n");
2640 }
34f80b04
EG
2641
2642 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2643
2644 int port = BP_PORT(bp);
2645 int reg_offset;
2646
2647 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2648 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2649
2650 val = REG_RD(bp, reg_offset);
2651 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2652 REG_WR(bp, reg_offset, val);
2653
2654 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2655 (attn & HW_INTERRUT_ASSERT_SET_1));
2656 bnx2x_panic();
2657 }
877e9aa4
ET
2658}
2659
2660static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2661{
2662 u32 val;
2663
2664 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2665
2666 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2667 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2668 /* CFC error attention */
2669 if (val & 0x2)
2670 BNX2X_ERR("FATAL error from CFC\n");
2671 }
2672
2673 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2674
2675 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2676 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2677 /* RQ_USDMDP_FIFO_OVERFLOW */
2678 if (val & 0x18000)
2679 BNX2X_ERR("FATAL error from PXP\n");
2680 }
34f80b04
EG
2681
2682 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2683
2684 int port = BP_PORT(bp);
2685 int reg_offset;
2686
2687 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2688 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2689
2690 val = REG_RD(bp, reg_offset);
2691 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2692 REG_WR(bp, reg_offset, val);
2693
2694 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2695 (attn & HW_INTERRUT_ASSERT_SET_2));
2696 bnx2x_panic();
2697 }
877e9aa4
ET
2698}
2699
2700static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2701{
34f80b04
EG
2702 u32 val;
2703
877e9aa4
ET
2704 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2705
34f80b04
EG
2706 if (attn & BNX2X_PMF_LINK_ASSERT) {
2707 int func = BP_FUNC(bp);
2708
2709 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2710 bnx2x__link_status_update(bp);
2711 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2712 DRV_STATUS_PMF)
2713 bnx2x_pmf_update(bp);
2714
2715 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2716
2717 BNX2X_ERR("MC assert!\n");
2718 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2721 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2722 bnx2x_panic();
2723
2724 } else if (attn & BNX2X_MCP_ASSERT) {
2725
2726 BNX2X_ERR("MCP assert!\n");
2727 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2728 bnx2x_fw_dump(bp);
877e9aa4
ET
2729
2730 } else
2731 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2732 }
2733
2734 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2735 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2736 if (attn & BNX2X_GRC_TIMEOUT) {
2737 val = CHIP_IS_E1H(bp) ?
2738 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2739 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2740 }
2741 if (attn & BNX2X_GRC_RSV) {
2742 val = CHIP_IS_E1H(bp) ?
2743 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2744 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2745 }
877e9aa4 2746 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2747 }
2748}
2749
2750static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2751{
a2fbb9ea
ET
2752 struct attn_route attn;
2753 struct attn_route group_mask;
34f80b04 2754 int port = BP_PORT(bp);
877e9aa4 2755 int index;
a2fbb9ea
ET
2756 u32 reg_addr;
2757 u32 val;
3fcaf2e5 2758 u32 aeu_mask;
a2fbb9ea
ET
2759
2760 /* need to take HW lock because MCP or other port might also
2761 try to handle this event */
4a37fb66 2762 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2763
2764 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2765 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2766 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2767 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2768 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2769 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2770
2771 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2772 if (deasserted & (1 << index)) {
2773 group_mask = bp->attn_group[index];
2774
34f80b04
EG
2775 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2776 index, group_mask.sig[0], group_mask.sig[1],
2777 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2778
877e9aa4
ET
2779 bnx2x_attn_int_deasserted3(bp,
2780 attn.sig[3] & group_mask.sig[3]);
2781 bnx2x_attn_int_deasserted1(bp,
2782 attn.sig[1] & group_mask.sig[1]);
2783 bnx2x_attn_int_deasserted2(bp,
2784 attn.sig[2] & group_mask.sig[2]);
2785 bnx2x_attn_int_deasserted0(bp,
2786 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2787
a2fbb9ea
ET
2788 if ((attn.sig[0] & group_mask.sig[0] &
2789 HW_PRTY_ASSERT_SET_0) ||
2790 (attn.sig[1] & group_mask.sig[1] &
2791 HW_PRTY_ASSERT_SET_1) ||
2792 (attn.sig[2] & group_mask.sig[2] &
2793 HW_PRTY_ASSERT_SET_2))
6378c025 2794 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2795 }
2796 }
2797
4a37fb66 2798 bnx2x_release_alr(bp);
a2fbb9ea 2799
5c862848 2800 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2801
2802 val = ~deasserted;
3fcaf2e5
EG
2803 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2804 val, reg_addr);
5c862848 2805 REG_WR(bp, reg_addr, val);
a2fbb9ea 2806
a2fbb9ea 2807 if (~bp->attn_state & deasserted)
3fcaf2e5 2808 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2809
2810 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2811 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2812
3fcaf2e5
EG
2813 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2814 aeu_mask = REG_RD(bp, reg_addr);
2815
2816 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2817 aeu_mask, deasserted);
2818 aeu_mask |= (deasserted & 0xff);
2819 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2820
3fcaf2e5
EG
2821 REG_WR(bp, reg_addr, aeu_mask);
2822 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2823
2824 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2825 bp->attn_state &= ~deasserted;
2826 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2827}
2828
2829static void bnx2x_attn_int(struct bnx2x *bp)
2830{
2831 /* read local copy of bits */
68d59484
EG
2832 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2833 attn_bits);
2834 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2835 attn_bits_ack);
a2fbb9ea
ET
2836 u32 attn_state = bp->attn_state;
2837
2838 /* look for changed bits */
2839 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2840 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2841
2842 DP(NETIF_MSG_HW,
2843 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2844 attn_bits, attn_ack, asserted, deasserted);
2845
2846 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2847 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2848
2849 /* handle bits that were raised */
2850 if (asserted)
2851 bnx2x_attn_int_asserted(bp, asserted);
2852
2853 if (deasserted)
2854 bnx2x_attn_int_deasserted(bp, deasserted);
2855}
2856
2857static void bnx2x_sp_task(struct work_struct *work)
2858{
1cf167f2 2859 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2860 u16 status;
2861
34f80b04 2862
a2fbb9ea
ET
2863 /* Return here if interrupt is disabled */
2864 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2865 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2866 return;
2867 }
2868
2869 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2870/* if (status == 0) */
2871/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2872
3196a88a 2873 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2874
877e9aa4
ET
2875 /* HW attentions */
2876 if (status & 0x1)
a2fbb9ea 2877 bnx2x_attn_int(bp);
a2fbb9ea 2878
68d59484 2879 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2880 IGU_INT_NOP, 1);
2881 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2882 IGU_INT_NOP, 1);
2883 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2884 IGU_INT_NOP, 1);
2885 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2886 IGU_INT_NOP, 1);
2887 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2888 IGU_INT_ENABLE, 1);
877e9aa4 2889
a2fbb9ea
ET
2890}
2891
2892static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2893{
2894 struct net_device *dev = dev_instance;
2895 struct bnx2x *bp = netdev_priv(dev);
2896
2897 /* Return here if interrupt is disabled */
2898 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2899 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2900 return IRQ_HANDLED;
2901 }
2902
8d9c5f34 2903 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2904
2905#ifdef BNX2X_STOP_ON_ERROR
2906 if (unlikely(bp->panic))
2907 return IRQ_HANDLED;
2908#endif
2909
1cf167f2 2910 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2911
2912 return IRQ_HANDLED;
2913}
2914
2915/* end of slow path */
2916
2917/* Statistics */
2918
2919/****************************************************************************
2920* Macros
2921****************************************************************************/
2922
a2fbb9ea
ET
2923/* sum[hi:lo] += add[hi:lo] */
2924#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2925 do { \
2926 s_lo += a_lo; \
f5ba6772 2927 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2928 } while (0)
2929
2930/* difference = minuend - subtrahend */
2931#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2932 do { \
bb2a0f7a
YG
2933 if (m_lo < s_lo) { \
2934 /* underflow */ \
a2fbb9ea 2935 d_hi = m_hi - s_hi; \
bb2a0f7a 2936 if (d_hi > 0) { \
6378c025 2937 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2938 d_hi--; \
2939 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2940 } else { \
6378c025 2941 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2942 d_hi = 0; \
2943 d_lo = 0; \
2944 } \
bb2a0f7a
YG
2945 } else { \
2946 /* m_lo >= s_lo */ \
a2fbb9ea 2947 if (m_hi < s_hi) { \
bb2a0f7a
YG
2948 d_hi = 0; \
2949 d_lo = 0; \
2950 } else { \
6378c025 2951 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2952 d_hi = m_hi - s_hi; \
2953 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2954 } \
2955 } \
2956 } while (0)
2957
bb2a0f7a 2958#define UPDATE_STAT64(s, t) \
a2fbb9ea 2959 do { \
bb2a0f7a
YG
2960 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2961 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2962 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2963 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2964 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2965 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2966 } while (0)
2967
bb2a0f7a 2968#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2969 do { \
bb2a0f7a
YG
2970 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2971 diff.lo, new->s##_lo, old->s##_lo); \
2972 ADD_64(estats->t##_hi, diff.hi, \
2973 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2974 } while (0)
2975
2976/* sum[hi:lo] += add */
2977#define ADD_EXTEND_64(s_hi, s_lo, a) \
2978 do { \
2979 s_lo += a; \
2980 s_hi += (s_lo < a) ? 1 : 0; \
2981 } while (0)
2982
bb2a0f7a 2983#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2984 do { \
bb2a0f7a
YG
2985 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2986 pstats->mac_stx[1].s##_lo, \
2987 new->s); \
a2fbb9ea
ET
2988 } while (0)
2989
bb2a0f7a 2990#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2991 do { \
2992 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2993 old_tclient->s = le32_to_cpu(tclient->s); \
de832a55
EG
2994 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2995 } while (0)
2996
2997#define UPDATE_EXTEND_USTAT(s, t) \
2998 do { \
2999 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3000 old_uclient->s = uclient->s; \
3001 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3002 } while (0)
3003
3004#define UPDATE_EXTEND_XSTAT(s, t) \
3005 do { \
3006 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3007 old_xclient->s = le32_to_cpu(xclient->s); \
de832a55
EG
3008 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3009 } while (0)
3010
3011/* minuend -= subtrahend */
3012#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3013 do { \
3014 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3015 } while (0)
3016
3017/* minuend[hi:lo] -= subtrahend */
3018#define SUB_EXTEND_64(m_hi, m_lo, s) \
3019 do { \
3020 SUB_64(m_hi, 0, m_lo, s); \
3021 } while (0)
3022
3023#define SUB_EXTEND_USTAT(s, t) \
3024 do { \
3025 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3026 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3027 } while (0)
3028
3029/*
3030 * General service functions
3031 */
3032
3033static inline long bnx2x_hilo(u32 *hiref)
3034{
3035 u32 lo = *(hiref + 1);
3036#if (BITS_PER_LONG == 64)
3037 u32 hi = *hiref;
3038
3039 return HILO_U64(hi, lo);
3040#else
3041 return lo;
3042#endif
3043}
3044
3045/*
3046 * Init service functions
3047 */
3048
bb2a0f7a
YG
3049static void bnx2x_storm_stats_post(struct bnx2x *bp)
3050{
3051 if (!bp->stats_pending) {
3052 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3053 int i, rc;
bb2a0f7a
YG
3054
3055 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3056 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3057 for_each_queue(bp, i)
3058 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3059
3060 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3061 ((u32 *)&ramrod_data)[1],
3062 ((u32 *)&ramrod_data)[0], 0);
3063 if (rc == 0) {
3064 /* stats ramrod has it's own slot on the spq */
3065 bp->spq_left++;
3066 bp->stats_pending = 1;
3067 }
3068 }
3069}
3070
3071static void bnx2x_stats_init(struct bnx2x *bp)
3072{
3073 int port = BP_PORT(bp);
de832a55 3074 int i;
bb2a0f7a 3075
de832a55 3076 bp->stats_pending = 0;
bb2a0f7a
YG
3077 bp->executer_idx = 0;
3078 bp->stats_counter = 0;
3079
3080 /* port stats */
3081 if (!BP_NOMCP(bp))
3082 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3083 else
3084 bp->port.port_stx = 0;
3085 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3086
3087 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3088 bp->port.old_nig_stats.brb_discard =
3089 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3090 bp->port.old_nig_stats.brb_truncate =
3091 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3092 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3093 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3094 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3095 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3096
3097 /* function stats */
de832a55
EG
3098 for_each_queue(bp, i) {
3099 struct bnx2x_fastpath *fp = &bp->fp[i];
3100
3101 memset(&fp->old_tclient, 0,
3102 sizeof(struct tstorm_per_client_stats));
3103 memset(&fp->old_uclient, 0,
3104 sizeof(struct ustorm_per_client_stats));
3105 memset(&fp->old_xclient, 0,
3106 sizeof(struct xstorm_per_client_stats));
3107 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3108 }
3109
bb2a0f7a 3110 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3111 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3112
3113 bp->stats_state = STATS_STATE_DISABLED;
3114 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3115 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3116}
3117
3118static void bnx2x_hw_stats_post(struct bnx2x *bp)
3119{
3120 struct dmae_command *dmae = &bp->stats_dmae;
3121 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122
3123 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3124 if (CHIP_REV_IS_SLOW(bp))
3125 return;
bb2a0f7a
YG
3126
3127 /* loader */
3128 if (bp->executer_idx) {
3129 int loader_idx = PMF_DMAE_C(bp);
3130
3131 memset(dmae, 0, sizeof(struct dmae_command));
3132
3133 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3134 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3135 DMAE_CMD_DST_RESET |
3136#ifdef __BIG_ENDIAN
3137 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3138#else
3139 DMAE_CMD_ENDIANITY_DW_SWAP |
3140#endif
3141 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3142 DMAE_CMD_PORT_0) |
3143 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3144 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3145 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3146 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3147 sizeof(struct dmae_command) *
3148 (loader_idx + 1)) >> 2;
3149 dmae->dst_addr_hi = 0;
3150 dmae->len = sizeof(struct dmae_command) >> 2;
3151 if (CHIP_IS_E1(bp))
3152 dmae->len--;
3153 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3154 dmae->comp_addr_hi = 0;
3155 dmae->comp_val = 1;
3156
3157 *stats_comp = 0;
3158 bnx2x_post_dmae(bp, dmae, loader_idx);
3159
3160 } else if (bp->func_stx) {
3161 *stats_comp = 0;
3162 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3163 }
3164}
3165
3166static int bnx2x_stats_comp(struct bnx2x *bp)
3167{
3168 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3169 int cnt = 10;
3170
3171 might_sleep();
3172 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3173 if (!cnt) {
3174 BNX2X_ERR("timeout waiting for stats finished\n");
3175 break;
3176 }
3177 cnt--;
12469401 3178 msleep(1);
bb2a0f7a
YG
3179 }
3180 return 1;
3181}
3182
3183/*
3184 * Statistics service functions
3185 */
3186
3187static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3188{
3189 struct dmae_command *dmae;
3190 u32 opcode;
3191 int loader_idx = PMF_DMAE_C(bp);
3192 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3193
3194 /* sanity */
3195 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3196 BNX2X_ERR("BUG!\n");
3197 return;
3198 }
3199
3200 bp->executer_idx = 0;
3201
3202 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3203 DMAE_CMD_C_ENABLE |
3204 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3205#ifdef __BIG_ENDIAN
3206 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3207#else
3208 DMAE_CMD_ENDIANITY_DW_SWAP |
3209#endif
3210 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3211 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3212
3213 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3214 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3215 dmae->src_addr_lo = bp->port.port_stx >> 2;
3216 dmae->src_addr_hi = 0;
3217 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3218 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3219 dmae->len = DMAE_LEN32_RD_MAX;
3220 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3221 dmae->comp_addr_hi = 0;
3222 dmae->comp_val = 1;
3223
3224 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3225 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3226 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3227 dmae->src_addr_hi = 0;
7a9b2557
VZ
3228 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3229 DMAE_LEN32_RD_MAX * 4);
3230 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3231 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3232 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3233 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3234 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3235 dmae->comp_val = DMAE_COMP_VAL;
3236
3237 *stats_comp = 0;
3238 bnx2x_hw_stats_post(bp);
3239 bnx2x_stats_comp(bp);
3240}
3241
3242static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3243{
3244 struct dmae_command *dmae;
34f80b04 3245 int port = BP_PORT(bp);
bb2a0f7a 3246 int vn = BP_E1HVN(bp);
a2fbb9ea 3247 u32 opcode;
bb2a0f7a 3248 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3249 u32 mac_addr;
bb2a0f7a
YG
3250 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3251
3252 /* sanity */
3253 if (!bp->link_vars.link_up || !bp->port.pmf) {
3254 BNX2X_ERR("BUG!\n");
3255 return;
3256 }
a2fbb9ea
ET
3257
3258 bp->executer_idx = 0;
bb2a0f7a
YG
3259
3260 /* MCP */
3261 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3262 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3263 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3264#ifdef __BIG_ENDIAN
bb2a0f7a 3265 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3266#else
bb2a0f7a 3267 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3268#endif
bb2a0f7a
YG
3269 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3270 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3271
bb2a0f7a 3272 if (bp->port.port_stx) {
a2fbb9ea
ET
3273
3274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3275 dmae->opcode = opcode;
bb2a0f7a
YG
3276 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3277 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3278 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3279 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3280 dmae->len = sizeof(struct host_port_stats) >> 2;
3281 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3282 dmae->comp_addr_hi = 0;
3283 dmae->comp_val = 1;
a2fbb9ea
ET
3284 }
3285
bb2a0f7a
YG
3286 if (bp->func_stx) {
3287
3288 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3289 dmae->opcode = opcode;
3290 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3291 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3292 dmae->dst_addr_lo = bp->func_stx >> 2;
3293 dmae->dst_addr_hi = 0;
3294 dmae->len = sizeof(struct host_func_stats) >> 2;
3295 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3296 dmae->comp_addr_hi = 0;
3297 dmae->comp_val = 1;
a2fbb9ea
ET
3298 }
3299
bb2a0f7a 3300 /* MAC */
a2fbb9ea
ET
3301 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3302 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3303 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3304#ifdef __BIG_ENDIAN
3305 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3306#else
3307 DMAE_CMD_ENDIANITY_DW_SWAP |
3308#endif
bb2a0f7a
YG
3309 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3311
c18487ee 3312 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3313
3314 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3315 NIG_REG_INGRESS_BMAC0_MEM);
3316
3317 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3318 BIGMAC_REGISTER_TX_STAT_GTBYT */
3319 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3320 dmae->opcode = opcode;
3321 dmae->src_addr_lo = (mac_addr +
3322 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3323 dmae->src_addr_hi = 0;
3324 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3325 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3326 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3327 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329 dmae->comp_addr_hi = 0;
3330 dmae->comp_val = 1;
3331
3332 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3333 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335 dmae->opcode = opcode;
3336 dmae->src_addr_lo = (mac_addr +
3337 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3338 dmae->src_addr_hi = 0;
3339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3340 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3341 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3342 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3343 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3344 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3345 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3346 dmae->comp_addr_hi = 0;
3347 dmae->comp_val = 1;
3348
c18487ee 3349 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3350
3351 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3352
3353 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = opcode;
3356 dmae->src_addr_lo = (mac_addr +
3357 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3358 dmae->src_addr_hi = 0;
3359 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3360 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3361 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3364 dmae->comp_val = 1;
3365
3366 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368 dmae->opcode = opcode;
3369 dmae->src_addr_lo = (mac_addr +
3370 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3371 dmae->src_addr_hi = 0;
3372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3373 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3375 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3376 dmae->len = 1;
3377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378 dmae->comp_addr_hi = 0;
3379 dmae->comp_val = 1;
3380
3381 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3382 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3383 dmae->opcode = opcode;
3384 dmae->src_addr_lo = (mac_addr +
3385 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3386 dmae->src_addr_hi = 0;
3387 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3388 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3389 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3390 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3391 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3392 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3393 dmae->comp_addr_hi = 0;
3394 dmae->comp_val = 1;
3395 }
3396
3397 /* NIG */
bb2a0f7a
YG
3398 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3399 dmae->opcode = opcode;
3400 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3401 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3402 dmae->src_addr_hi = 0;
3403 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3404 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3405 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3407 dmae->comp_addr_hi = 0;
3408 dmae->comp_val = 1;
3409
3410 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3411 dmae->opcode = opcode;
3412 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3413 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3414 dmae->src_addr_hi = 0;
3415 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3416 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3417 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3418 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3419 dmae->len = (2*sizeof(u32)) >> 2;
3420 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3421 dmae->comp_addr_hi = 0;
3422 dmae->comp_val = 1;
3423
a2fbb9ea
ET
3424 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3425 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3426 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3427 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3428#ifdef __BIG_ENDIAN
3429 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3430#else
3431 DMAE_CMD_ENDIANITY_DW_SWAP |
3432#endif
bb2a0f7a
YG
3433 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3434 (vn << DMAE_CMD_E1HVN_SHIFT));
3435 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3436 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3437 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3438 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3439 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3440 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3441 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3442 dmae->len = (2*sizeof(u32)) >> 2;
3443 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3444 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3445 dmae->comp_val = DMAE_COMP_VAL;
3446
3447 *stats_comp = 0;
a2fbb9ea
ET
3448}
3449
bb2a0f7a 3450static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3451{
bb2a0f7a
YG
3452 struct dmae_command *dmae = &bp->stats_dmae;
3453 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3454
bb2a0f7a
YG
3455 /* sanity */
3456 if (!bp->func_stx) {
3457 BNX2X_ERR("BUG!\n");
3458 return;
3459 }
a2fbb9ea 3460
bb2a0f7a
YG
3461 bp->executer_idx = 0;
3462 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3463
bb2a0f7a
YG
3464 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3465 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3466 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3467#ifdef __BIG_ENDIAN
3468 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3469#else
3470 DMAE_CMD_ENDIANITY_DW_SWAP |
3471#endif
3472 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3473 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3474 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3475 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3476 dmae->dst_addr_lo = bp->func_stx >> 2;
3477 dmae->dst_addr_hi = 0;
3478 dmae->len = sizeof(struct host_func_stats) >> 2;
3479 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3480 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3481 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3482
bb2a0f7a
YG
3483 *stats_comp = 0;
3484}
a2fbb9ea 3485
bb2a0f7a
YG
3486static void bnx2x_stats_start(struct bnx2x *bp)
3487{
3488 if (bp->port.pmf)
3489 bnx2x_port_stats_init(bp);
3490
3491 else if (bp->func_stx)
3492 bnx2x_func_stats_init(bp);
3493
3494 bnx2x_hw_stats_post(bp);
3495 bnx2x_storm_stats_post(bp);
3496}
3497
3498static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3499{
3500 bnx2x_stats_comp(bp);
3501 bnx2x_stats_pmf_update(bp);
3502 bnx2x_stats_start(bp);
3503}
3504
3505static void bnx2x_stats_restart(struct bnx2x *bp)
3506{
3507 bnx2x_stats_comp(bp);
3508 bnx2x_stats_start(bp);
3509}
3510
3511static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3512{
3513 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3514 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3515 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3516 struct regpair diff;
3517
3518 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3519 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3520 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3521 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3522 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3523 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3524 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3525 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3526 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3527 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3528 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3529 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3530 UPDATE_STAT64(tx_stat_gt127,
3531 tx_stat_etherstatspkts65octetsto127octets);
3532 UPDATE_STAT64(tx_stat_gt255,
3533 tx_stat_etherstatspkts128octetsto255octets);
3534 UPDATE_STAT64(tx_stat_gt511,
3535 tx_stat_etherstatspkts256octetsto511octets);
3536 UPDATE_STAT64(tx_stat_gt1023,
3537 tx_stat_etherstatspkts512octetsto1023octets);
3538 UPDATE_STAT64(tx_stat_gt1518,
3539 tx_stat_etherstatspkts1024octetsto1522octets);
3540 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3541 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3542 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3543 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3544 UPDATE_STAT64(tx_stat_gterr,
3545 tx_stat_dot3statsinternalmactransmiterrors);
3546 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3547
3548 estats->pause_frames_received_hi =
3549 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3550 estats->pause_frames_received_lo =
3551 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3552
3553 estats->pause_frames_sent_hi =
3554 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3555 estats->pause_frames_sent_lo =
3556 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3557}
3558
3559static void bnx2x_emac_stats_update(struct bnx2x *bp)
3560{
3561 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3562 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3563 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3564
3565 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3566 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3567 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3568 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3569 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3570 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3571 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3572 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3573 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3574 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3575 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3576 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3577 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3578 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3579 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3580 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3581 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3582 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3583 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3584 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3585 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3586 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3587 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3588 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3589 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3590 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3591 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3592 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3593 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3594 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3595 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3596
3597 estats->pause_frames_received_hi =
3598 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3599 estats->pause_frames_received_lo =
3600 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3601 ADD_64(estats->pause_frames_received_hi,
3602 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3603 estats->pause_frames_received_lo,
3604 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3605
3606 estats->pause_frames_sent_hi =
3607 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3608 estats->pause_frames_sent_lo =
3609 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3610 ADD_64(estats->pause_frames_sent_hi,
3611 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3612 estats->pause_frames_sent_lo,
3613 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3614}
3615
3616static int bnx2x_hw_stats_update(struct bnx2x *bp)
3617{
3618 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3619 struct nig_stats *old = &(bp->port.old_nig_stats);
3620 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3621 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3622 struct regpair diff;
de832a55 3623 u32 nig_timer_max;
bb2a0f7a
YG
3624
3625 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3626 bnx2x_bmac_stats_update(bp);
3627
3628 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3629 bnx2x_emac_stats_update(bp);
3630
3631 else { /* unreached */
3632 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3633 return -1;
3634 }
a2fbb9ea 3635
bb2a0f7a
YG
3636 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3637 new->brb_discard - old->brb_discard);
66e855f3
YG
3638 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3639 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3640
bb2a0f7a
YG
3641 UPDATE_STAT64_NIG(egress_mac_pkt0,
3642 etherstatspkts1024octetsto1522octets);
3643 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3644
bb2a0f7a 3645 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3646
bb2a0f7a
YG
3647 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3648 sizeof(struct mac_stx));
3649 estats->brb_drop_hi = pstats->brb_drop_hi;
3650 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3651
bb2a0f7a 3652 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3653
de832a55
EG
3654 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3655 if (nig_timer_max != estats->nig_timer_max) {
3656 estats->nig_timer_max = nig_timer_max;
3657 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3658 }
3659
bb2a0f7a 3660 return 0;
a2fbb9ea
ET
3661}
3662
bb2a0f7a 3663static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3664{
3665 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3666 struct tstorm_per_port_stats *tport =
de832a55 3667 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3668 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3669 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3670 int i;
3671
3672 memset(&(fstats->total_bytes_received_hi), 0,
3673 sizeof(struct host_func_stats) - 2*sizeof(u32));
3674 estats->error_bytes_received_hi = 0;
3675 estats->error_bytes_received_lo = 0;
3676 estats->etherstatsoverrsizepkts_hi = 0;
3677 estats->etherstatsoverrsizepkts_lo = 0;
3678 estats->no_buff_discard_hi = 0;
3679 estats->no_buff_discard_lo = 0;
a2fbb9ea 3680
de832a55
EG
3681 for_each_queue(bp, i) {
3682 struct bnx2x_fastpath *fp = &bp->fp[i];
3683 int cl_id = fp->cl_id;
3684 struct tstorm_per_client_stats *tclient =
3685 &stats->tstorm_common.client_statistics[cl_id];
3686 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3687 struct ustorm_per_client_stats *uclient =
3688 &stats->ustorm_common.client_statistics[cl_id];
3689 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3690 struct xstorm_per_client_stats *xclient =
3691 &stats->xstorm_common.client_statistics[cl_id];
3692 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3693 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3694 u32 diff;
3695
3696 /* are storm stats valid? */
3697 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3698 bp->stats_counter) {
de832a55
EG
3699 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3700 " xstorm counter (%d) != stats_counter (%d)\n",
3701 i, xclient->stats_counter, bp->stats_counter);
3702 return -1;
3703 }
3704 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3705 bp->stats_counter) {
de832a55
EG
3706 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3707 " tstorm counter (%d) != stats_counter (%d)\n",
3708 i, tclient->stats_counter, bp->stats_counter);
3709 return -2;
3710 }
3711 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3712 bp->stats_counter) {
3713 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3714 " ustorm counter (%d) != stats_counter (%d)\n",
3715 i, uclient->stats_counter, bp->stats_counter);
3716 return -4;
3717 }
a2fbb9ea 3718
de832a55
EG
3719 qstats->total_bytes_received_hi =
3720 qstats->valid_bytes_received_hi =
a2fbb9ea 3721 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3722 qstats->total_bytes_received_lo =
3723 qstats->valid_bytes_received_lo =
a2fbb9ea 3724 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3725
de832a55 3726 qstats->error_bytes_received_hi =
bb2a0f7a 3727 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3728 qstats->error_bytes_received_lo =
bb2a0f7a 3729 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3730
de832a55
EG
3731 ADD_64(qstats->total_bytes_received_hi,
3732 qstats->error_bytes_received_hi,
3733 qstats->total_bytes_received_lo,
3734 qstats->error_bytes_received_lo);
3735
3736 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3737 total_unicast_packets_received);
3738 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3739 total_multicast_packets_received);
3740 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3741 total_broadcast_packets_received);
3742 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3743 etherstatsoverrsizepkts);
3744 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3745
3746 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3747 total_unicast_packets_received);
3748 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3749 total_multicast_packets_received);
3750 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3751 total_broadcast_packets_received);
3752 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3753 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3754 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3755
3756 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3757 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3758 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3759 le32_to_cpu(xclient->total_sent_bytes.lo);
3760
de832a55
EG
3761 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3762 total_unicast_packets_transmitted);
3763 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3764 total_multicast_packets_transmitted);
3765 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3766 total_broadcast_packets_transmitted);
3767
3768 old_tclient->checksum_discard = tclient->checksum_discard;
3769 old_tclient->ttl0_discard = tclient->ttl0_discard;
3770
3771 ADD_64(fstats->total_bytes_received_hi,
3772 qstats->total_bytes_received_hi,
3773 fstats->total_bytes_received_lo,
3774 qstats->total_bytes_received_lo);
3775 ADD_64(fstats->total_bytes_transmitted_hi,
3776 qstats->total_bytes_transmitted_hi,
3777 fstats->total_bytes_transmitted_lo,
3778 qstats->total_bytes_transmitted_lo);
3779 ADD_64(fstats->total_unicast_packets_received_hi,
3780 qstats->total_unicast_packets_received_hi,
3781 fstats->total_unicast_packets_received_lo,
3782 qstats->total_unicast_packets_received_lo);
3783 ADD_64(fstats->total_multicast_packets_received_hi,
3784 qstats->total_multicast_packets_received_hi,
3785 fstats->total_multicast_packets_received_lo,
3786 qstats->total_multicast_packets_received_lo);
3787 ADD_64(fstats->total_broadcast_packets_received_hi,
3788 qstats->total_broadcast_packets_received_hi,
3789 fstats->total_broadcast_packets_received_lo,
3790 qstats->total_broadcast_packets_received_lo);
3791 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3792 qstats->total_unicast_packets_transmitted_hi,
3793 fstats->total_unicast_packets_transmitted_lo,
3794 qstats->total_unicast_packets_transmitted_lo);
3795 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3796 qstats->total_multicast_packets_transmitted_hi,
3797 fstats->total_multicast_packets_transmitted_lo,
3798 qstats->total_multicast_packets_transmitted_lo);
3799 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3800 qstats->total_broadcast_packets_transmitted_hi,
3801 fstats->total_broadcast_packets_transmitted_lo,
3802 qstats->total_broadcast_packets_transmitted_lo);
3803 ADD_64(fstats->valid_bytes_received_hi,
3804 qstats->valid_bytes_received_hi,
3805 fstats->valid_bytes_received_lo,
3806 qstats->valid_bytes_received_lo);
3807
3808 ADD_64(estats->error_bytes_received_hi,
3809 qstats->error_bytes_received_hi,
3810 estats->error_bytes_received_lo,
3811 qstats->error_bytes_received_lo);
3812 ADD_64(estats->etherstatsoverrsizepkts_hi,
3813 qstats->etherstatsoverrsizepkts_hi,
3814 estats->etherstatsoverrsizepkts_lo,
3815 qstats->etherstatsoverrsizepkts_lo);
3816 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3817 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3818 }
3819
3820 ADD_64(fstats->total_bytes_received_hi,
3821 estats->rx_stat_ifhcinbadoctets_hi,
3822 fstats->total_bytes_received_lo,
3823 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3824
3825 memcpy(estats, &(fstats->total_bytes_received_hi),
3826 sizeof(struct host_func_stats) - 2*sizeof(u32));
3827
de832a55
EG
3828 ADD_64(estats->etherstatsoverrsizepkts_hi,
3829 estats->rx_stat_dot3statsframestoolong_hi,
3830 estats->etherstatsoverrsizepkts_lo,
3831 estats->rx_stat_dot3statsframestoolong_lo);
3832 ADD_64(estats->error_bytes_received_hi,
3833 estats->rx_stat_ifhcinbadoctets_hi,
3834 estats->error_bytes_received_lo,
3835 estats->rx_stat_ifhcinbadoctets_lo);
3836
3837 if (bp->port.pmf) {
3838 estats->mac_filter_discard =
3839 le32_to_cpu(tport->mac_filter_discard);
3840 estats->xxoverflow_discard =
3841 le32_to_cpu(tport->xxoverflow_discard);
3842 estats->brb_truncate_discard =
bb2a0f7a 3843 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3844 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3845 }
bb2a0f7a
YG
3846
3847 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3848
de832a55
EG
3849 bp->stats_pending = 0;
3850
a2fbb9ea
ET
3851 return 0;
3852}
3853
bb2a0f7a 3854static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3855{
bb2a0f7a 3856 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3857 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3858 int i;
a2fbb9ea
ET
3859
3860 nstats->rx_packets =
3861 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3862 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3863 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3864
3865 nstats->tx_packets =
3866 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3867 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3868 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3869
de832a55 3870 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3871
0e39e645 3872 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3873
de832a55
EG
3874 nstats->rx_dropped = estats->mac_discard;
3875 for_each_queue(bp, i)
3876 nstats->rx_dropped +=
3877 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3878
a2fbb9ea
ET
3879 nstats->tx_dropped = 0;
3880
3881 nstats->multicast =
de832a55 3882 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3883
bb2a0f7a 3884 nstats->collisions =
de832a55 3885 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3886
3887 nstats->rx_length_errors =
de832a55
EG
3888 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3889 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3890 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3891 bnx2x_hilo(&estats->brb_truncate_hi);
3892 nstats->rx_crc_errors =
3893 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3894 nstats->rx_frame_errors =
3895 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3896 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3897 nstats->rx_missed_errors = estats->xxoverflow_discard;
3898
3899 nstats->rx_errors = nstats->rx_length_errors +
3900 nstats->rx_over_errors +
3901 nstats->rx_crc_errors +
3902 nstats->rx_frame_errors +
0e39e645
ET
3903 nstats->rx_fifo_errors +
3904 nstats->rx_missed_errors;
a2fbb9ea 3905
bb2a0f7a 3906 nstats->tx_aborted_errors =
de832a55
EG
3907 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3908 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3909 nstats->tx_carrier_errors =
3910 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3911 nstats->tx_fifo_errors = 0;
3912 nstats->tx_heartbeat_errors = 0;
3913 nstats->tx_window_errors = 0;
3914
3915 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3916 nstats->tx_carrier_errors +
3917 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3918}
3919
3920static void bnx2x_drv_stats_update(struct bnx2x *bp)
3921{
3922 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3923 int i;
3924
3925 estats->driver_xoff = 0;
3926 estats->rx_err_discard_pkt = 0;
3927 estats->rx_skb_alloc_failed = 0;
3928 estats->hw_csum_err = 0;
3929 for_each_queue(bp, i) {
3930 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3931
3932 estats->driver_xoff += qstats->driver_xoff;
3933 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3934 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3935 estats->hw_csum_err += qstats->hw_csum_err;
3936 }
a2fbb9ea
ET
3937}
3938
bb2a0f7a 3939static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3940{
bb2a0f7a 3941 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3942
bb2a0f7a
YG
3943 if (*stats_comp != DMAE_COMP_VAL)
3944 return;
3945
3946 if (bp->port.pmf)
de832a55 3947 bnx2x_hw_stats_update(bp);
a2fbb9ea 3948
de832a55
EG
3949 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3950 BNX2X_ERR("storm stats were not updated for 3 times\n");
3951 bnx2x_panic();
3952 return;
a2fbb9ea
ET
3953 }
3954
de832a55
EG
3955 bnx2x_net_stats_update(bp);
3956 bnx2x_drv_stats_update(bp);
3957
a2fbb9ea 3958 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3959 struct tstorm_per_client_stats *old_tclient =
3960 &bp->fp->old_tclient;
3961 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3962 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3963 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3964 int i;
a2fbb9ea
ET
3965
3966 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3967 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3968 " tx pkt (%lx)\n",
3969 bnx2x_tx_avail(bp->fp),
7a9b2557 3970 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3971 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3972 " rx pkt (%lx)\n",
7a9b2557
VZ
3973 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3974 bp->fp->rx_comp_cons),
3975 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
3976 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3977 "brb truncate %u\n",
3978 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3979 qstats->driver_xoff,
3980 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 3981 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 3982 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
3983 "mac_discard %u mac_filter_discard %u "
3984 "xxovrflow_discard %u brb_truncate_discard %u "
3985 "ttl0_discard %u\n",
bb2a0f7a 3986 old_tclient->checksum_discard,
de832a55
EG
3987 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3988 bnx2x_hilo(&qstats->no_buff_discard_hi),
3989 estats->mac_discard, estats->mac_filter_discard,
3990 estats->xxoverflow_discard, estats->brb_truncate_discard,
bb2a0f7a 3991 old_tclient->ttl0_discard);
a2fbb9ea
ET
3992
3993 for_each_queue(bp, i) {
3994 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3995 bnx2x_fp(bp, i, tx_pkt),
3996 bnx2x_fp(bp, i, rx_pkt),
3997 bnx2x_fp(bp, i, rx_calls));
3998 }
3999 }
4000
bb2a0f7a
YG
4001 bnx2x_hw_stats_post(bp);
4002 bnx2x_storm_stats_post(bp);
4003}
a2fbb9ea 4004
bb2a0f7a
YG
4005static void bnx2x_port_stats_stop(struct bnx2x *bp)
4006{
4007 struct dmae_command *dmae;
4008 u32 opcode;
4009 int loader_idx = PMF_DMAE_C(bp);
4010 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4011
bb2a0f7a 4012 bp->executer_idx = 0;
a2fbb9ea 4013
bb2a0f7a
YG
4014 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4015 DMAE_CMD_C_ENABLE |
4016 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4017#ifdef __BIG_ENDIAN
bb2a0f7a 4018 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4019#else
bb2a0f7a 4020 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4021#endif
bb2a0f7a
YG
4022 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4023 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4024
4025 if (bp->port.port_stx) {
4026
4027 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4028 if (bp->func_stx)
4029 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4030 else
4031 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4032 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4033 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4034 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4035 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4036 dmae->len = sizeof(struct host_port_stats) >> 2;
4037 if (bp->func_stx) {
4038 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4039 dmae->comp_addr_hi = 0;
4040 dmae->comp_val = 1;
4041 } else {
4042 dmae->comp_addr_lo =
4043 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4044 dmae->comp_addr_hi =
4045 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4046 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4047
bb2a0f7a
YG
4048 *stats_comp = 0;
4049 }
a2fbb9ea
ET
4050 }
4051
bb2a0f7a
YG
4052 if (bp->func_stx) {
4053
4054 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4055 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4056 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4057 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4058 dmae->dst_addr_lo = bp->func_stx >> 2;
4059 dmae->dst_addr_hi = 0;
4060 dmae->len = sizeof(struct host_func_stats) >> 2;
4061 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4062 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4063 dmae->comp_val = DMAE_COMP_VAL;
4064
4065 *stats_comp = 0;
a2fbb9ea 4066 }
bb2a0f7a
YG
4067}
4068
4069static void bnx2x_stats_stop(struct bnx2x *bp)
4070{
4071 int update = 0;
4072
4073 bnx2x_stats_comp(bp);
4074
4075 if (bp->port.pmf)
4076 update = (bnx2x_hw_stats_update(bp) == 0);
4077
4078 update |= (bnx2x_storm_stats_update(bp) == 0);
4079
4080 if (update) {
4081 bnx2x_net_stats_update(bp);
a2fbb9ea 4082
bb2a0f7a
YG
4083 if (bp->port.pmf)
4084 bnx2x_port_stats_stop(bp);
4085
4086 bnx2x_hw_stats_post(bp);
4087 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4088 }
4089}
4090
bb2a0f7a
YG
4091static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4092{
4093}
4094
4095static const struct {
4096 void (*action)(struct bnx2x *bp);
4097 enum bnx2x_stats_state next_state;
4098} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4099/* state event */
4100{
4101/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4102/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4103/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4104/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4105},
4106{
4107/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4108/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4109/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4110/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4111}
4112};
4113
4114static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4115{
4116 enum bnx2x_stats_state state = bp->stats_state;
4117
4118 bnx2x_stats_stm[state][event].action(bp);
4119 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4120
4121 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4122 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4123 state, event, bp->stats_state);
4124}
4125
a2fbb9ea
ET
4126static void bnx2x_timer(unsigned long data)
4127{
4128 struct bnx2x *bp = (struct bnx2x *) data;
4129
4130 if (!netif_running(bp->dev))
4131 return;
4132
4133 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4134 goto timer_restart;
a2fbb9ea
ET
4135
4136 if (poll) {
4137 struct bnx2x_fastpath *fp = &bp->fp[0];
4138 int rc;
4139
4140 bnx2x_tx_int(fp, 1000);
4141 rc = bnx2x_rx_int(fp, 1000);
4142 }
4143
34f80b04
EG
4144 if (!BP_NOMCP(bp)) {
4145 int func = BP_FUNC(bp);
a2fbb9ea
ET
4146 u32 drv_pulse;
4147 u32 mcp_pulse;
4148
4149 ++bp->fw_drv_pulse_wr_seq;
4150 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4151 /* TBD - add SYSTEM_TIME */
4152 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4153 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4154
34f80b04 4155 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4156 MCP_PULSE_SEQ_MASK);
4157 /* The delta between driver pulse and mcp response
4158 * should be 1 (before mcp response) or 0 (after mcp response)
4159 */
4160 if ((drv_pulse != mcp_pulse) &&
4161 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4162 /* someone lost a heartbeat... */
4163 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4164 drv_pulse, mcp_pulse);
4165 }
4166 }
4167
bb2a0f7a
YG
4168 if ((bp->state == BNX2X_STATE_OPEN) ||
4169 (bp->state == BNX2X_STATE_DISABLED))
4170 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4171
f1410647 4172timer_restart:
a2fbb9ea
ET
4173 mod_timer(&bp->timer, jiffies + bp->current_interval);
4174}
4175
4176/* end of Statistics */
4177
4178/* nic init */
4179
4180/*
4181 * nic init service functions
4182 */
4183
34f80b04 4184static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4185{
34f80b04
EG
4186 int port = BP_PORT(bp);
4187
4188 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4189 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4190 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4191 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4192 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4193 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4194}
4195
5c862848
EG
4196static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4197 dma_addr_t mapping, int sb_id)
34f80b04
EG
4198{
4199 int port = BP_PORT(bp);
bb2a0f7a 4200 int func = BP_FUNC(bp);
a2fbb9ea 4201 int index;
34f80b04 4202 u64 section;
a2fbb9ea
ET
4203
4204 /* USTORM */
4205 section = ((u64)mapping) + offsetof(struct host_status_block,
4206 u_status_block);
34f80b04 4207 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4208
4209 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4210 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4211 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4212 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4213 U64_HI(section));
bb2a0f7a
YG
4214 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4215 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4216
4217 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4218 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4219 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4220
4221 /* CSTORM */
4222 section = ((u64)mapping) + offsetof(struct host_status_block,
4223 c_status_block);
34f80b04 4224 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4225
4226 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4227 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4228 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4229 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4230 U64_HI(section));
7a9b2557
VZ
4231 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4232 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4233
4234 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4235 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4236 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4237
4238 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4239}
4240
4241static void bnx2x_zero_def_sb(struct bnx2x *bp)
4242{
4243 int func = BP_FUNC(bp);
a2fbb9ea 4244
34f80b04
EG
4245 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4246 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4247 sizeof(struct ustorm_def_status_block)/4);
4248 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4249 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4250 sizeof(struct cstorm_def_status_block)/4);
4251 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4252 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4253 sizeof(struct xstorm_def_status_block)/4);
4254 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4255 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4256 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4257}
4258
4259static void bnx2x_init_def_sb(struct bnx2x *bp,
4260 struct host_def_status_block *def_sb,
34f80b04 4261 dma_addr_t mapping, int sb_id)
a2fbb9ea 4262{
34f80b04
EG
4263 int port = BP_PORT(bp);
4264 int func = BP_FUNC(bp);
a2fbb9ea
ET
4265 int index, val, reg_offset;
4266 u64 section;
4267
4268 /* ATTN */
4269 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4270 atten_status_block);
34f80b04 4271 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4272
49d66772
ET
4273 bp->attn_state = 0;
4274
a2fbb9ea
ET
4275 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4276 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4277
34f80b04 4278 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4279 bp->attn_group[index].sig[0] = REG_RD(bp,
4280 reg_offset + 0x10*index);
4281 bp->attn_group[index].sig[1] = REG_RD(bp,
4282 reg_offset + 0x4 + 0x10*index);
4283 bp->attn_group[index].sig[2] = REG_RD(bp,
4284 reg_offset + 0x8 + 0x10*index);
4285 bp->attn_group[index].sig[3] = REG_RD(bp,
4286 reg_offset + 0xc + 0x10*index);
4287 }
4288
a2fbb9ea
ET
4289 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4290 HC_REG_ATTN_MSG0_ADDR_L);
4291
4292 REG_WR(bp, reg_offset, U64_LO(section));
4293 REG_WR(bp, reg_offset + 4, U64_HI(section));
4294
4295 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4296
4297 val = REG_RD(bp, reg_offset);
34f80b04 4298 val |= sb_id;
a2fbb9ea
ET
4299 REG_WR(bp, reg_offset, val);
4300
4301 /* USTORM */
4302 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4303 u_def_status_block);
34f80b04 4304 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4305
4306 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4307 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4308 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4309 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4310 U64_HI(section));
5c862848 4311 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4312 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4313
4314 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4315 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4316 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4317
4318 /* CSTORM */
4319 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4320 c_def_status_block);
34f80b04 4321 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4322
4323 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4324 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4325 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4326 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4327 U64_HI(section));
5c862848 4328 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4329 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4330
4331 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4332 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4333 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4334
4335 /* TSTORM */
4336 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4337 t_def_status_block);
34f80b04 4338 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4339
4340 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4341 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4342 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4343 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4344 U64_HI(section));
5c862848 4345 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4346 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4347
4348 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4349 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4350 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4351
4352 /* XSTORM */
4353 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4354 x_def_status_block);
34f80b04 4355 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4356
4357 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4358 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4359 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4360 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4361 U64_HI(section));
5c862848 4362 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4363 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4364
4365 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4366 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4367 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4368
bb2a0f7a 4369 bp->stats_pending = 0;
66e855f3 4370 bp->set_mac_pending = 0;
bb2a0f7a 4371
34f80b04 4372 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4373}
4374
4375static void bnx2x_update_coalesce(struct bnx2x *bp)
4376{
34f80b04 4377 int port = BP_PORT(bp);
a2fbb9ea
ET
4378 int i;
4379
4380 for_each_queue(bp, i) {
34f80b04 4381 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4382
4383 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4384 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4385 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4386 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4387 bp->rx_ticks/12);
a2fbb9ea 4388 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4389 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4390 U_SB_ETH_RX_CQ_INDEX),
4391 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4392
4393 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4394 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4395 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4396 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4397 bp->tx_ticks/12);
a2fbb9ea 4398 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4399 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4400 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4401 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4402 }
4403}
4404
7a9b2557
VZ
4405static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4406 struct bnx2x_fastpath *fp, int last)
4407{
4408 int i;
4409
4410 for (i = 0; i < last; i++) {
4411 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4412 struct sk_buff *skb = rx_buf->skb;
4413
4414 if (skb == NULL) {
4415 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4416 continue;
4417 }
4418
4419 if (fp->tpa_state[i] == BNX2X_TPA_START)
4420 pci_unmap_single(bp->pdev,
4421 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4422 bp->rx_buf_size,
7a9b2557
VZ
4423 PCI_DMA_FROMDEVICE);
4424
4425 dev_kfree_skb(skb);
4426 rx_buf->skb = NULL;
4427 }
4428}
4429
a2fbb9ea
ET
4430static void bnx2x_init_rx_rings(struct bnx2x *bp)
4431{
7a9b2557 4432 int func = BP_FUNC(bp);
32626230
EG
4433 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4434 ETH_MAX_AGGREGATION_QUEUES_E1H;
4435 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4436 int i, j;
a2fbb9ea 4437
87942b46 4438 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4439 DP(NETIF_MSG_IFUP,
4440 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4441
7a9b2557 4442 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4443
555f6c78 4444 for_each_rx_queue(bp, j) {
32626230 4445 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4446
32626230 4447 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4448 fp->tpa_pool[i].skb =
4449 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4450 if (!fp->tpa_pool[i].skb) {
4451 BNX2X_ERR("Failed to allocate TPA "
4452 "skb pool for queue[%d] - "
4453 "disabling TPA on this "
4454 "queue!\n", j);
4455 bnx2x_free_tpa_pool(bp, fp, i);
4456 fp->disable_tpa = 1;
4457 break;
4458 }
4459 pci_unmap_addr_set((struct sw_rx_bd *)
4460 &bp->fp->tpa_pool[i],
4461 mapping, 0);
4462 fp->tpa_state[i] = BNX2X_TPA_STOP;
4463 }
4464 }
4465 }
4466
555f6c78 4467 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4468 struct bnx2x_fastpath *fp = &bp->fp[j];
4469
4470 fp->rx_bd_cons = 0;
4471 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4472 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4473
4474 /* "next page" elements initialization */
4475 /* SGE ring */
4476 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4477 struct eth_rx_sge *sge;
4478
4479 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4480 sge->addr_hi =
4481 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4482 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4483 sge->addr_lo =
4484 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4485 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4486 }
4487
4488 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4489
7a9b2557 4490 /* RX BD ring */
a2fbb9ea
ET
4491 for (i = 1; i <= NUM_RX_RINGS; i++) {
4492 struct eth_rx_bd *rx_bd;
4493
4494 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4495 rx_bd->addr_hi =
4496 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4497 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4498 rx_bd->addr_lo =
4499 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4500 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4501 }
4502
34f80b04 4503 /* CQ ring */
a2fbb9ea
ET
4504 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4505 struct eth_rx_cqe_next_page *nextpg;
4506
4507 nextpg = (struct eth_rx_cqe_next_page *)
4508 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4509 nextpg->addr_hi =
4510 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4511 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4512 nextpg->addr_lo =
4513 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4514 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4515 }
4516
7a9b2557
VZ
4517 /* Allocate SGEs and initialize the ring elements */
4518 for (i = 0, ring_prod = 0;
4519 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4520
7a9b2557
VZ
4521 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4522 BNX2X_ERR("was only able to allocate "
4523 "%d rx sges\n", i);
4524 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4525 /* Cleanup already allocated elements */
4526 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4527 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4528 fp->disable_tpa = 1;
4529 ring_prod = 0;
4530 break;
4531 }
4532 ring_prod = NEXT_SGE_IDX(ring_prod);
4533 }
4534 fp->rx_sge_prod = ring_prod;
4535
4536 /* Allocate BDs and initialize BD ring */
66e855f3 4537 fp->rx_comp_cons = 0;
7a9b2557 4538 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4539 for (i = 0; i < bp->rx_ring_size; i++) {
4540 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4541 BNX2X_ERR("was only able to allocate "
de832a55
EG
4542 "%d rx skbs on queue[%d]\n", i, j);
4543 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4544 break;
4545 }
4546 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4547 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4548 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4549 }
4550
7a9b2557
VZ
4551 fp->rx_bd_prod = ring_prod;
4552 /* must not have more available CQEs than BDs */
4553 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4554 cqe_ring_prod);
a2fbb9ea
ET
4555 fp->rx_pkt = fp->rx_calls = 0;
4556
7a9b2557
VZ
4557 /* Warning!
4558 * this will generate an interrupt (to the TSTORM)
4559 * must only be done after chip is initialized
4560 */
4561 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4562 fp->rx_sge_prod);
a2fbb9ea
ET
4563 if (j != 0)
4564 continue;
4565
4566 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4567 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4568 U64_LO(fp->rx_comp_mapping));
4569 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4570 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4571 U64_HI(fp->rx_comp_mapping));
4572 }
4573}
4574
4575static void bnx2x_init_tx_ring(struct bnx2x *bp)
4576{
4577 int i, j;
4578
555f6c78 4579 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4580 struct bnx2x_fastpath *fp = &bp->fp[j];
4581
4582 for (i = 1; i <= NUM_TX_RINGS; i++) {
4583 struct eth_tx_bd *tx_bd =
4584 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4585
4586 tx_bd->addr_hi =
4587 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4588 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4589 tx_bd->addr_lo =
4590 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4591 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4592 }
4593
4594 fp->tx_pkt_prod = 0;
4595 fp->tx_pkt_cons = 0;
4596 fp->tx_bd_prod = 0;
4597 fp->tx_bd_cons = 0;
4598 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4599 fp->tx_pkt = 0;
4600 }
4601}
4602
4603static void bnx2x_init_sp_ring(struct bnx2x *bp)
4604{
34f80b04 4605 int func = BP_FUNC(bp);
a2fbb9ea
ET
4606
4607 spin_lock_init(&bp->spq_lock);
4608
4609 bp->spq_left = MAX_SPQ_PENDING;
4610 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4611 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4612 bp->spq_prod_bd = bp->spq;
4613 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4614
34f80b04 4615 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4616 U64_LO(bp->spq_mapping));
34f80b04
EG
4617 REG_WR(bp,
4618 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4619 U64_HI(bp->spq_mapping));
4620
34f80b04 4621 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4622 bp->spq_prod_idx);
4623}
4624
4625static void bnx2x_init_context(struct bnx2x *bp)
4626{
4627 int i;
4628
4629 for_each_queue(bp, i) {
4630 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4631 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4632 u8 cl_id = fp->cl_id;
34f80b04 4633 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4634
34f80b04
EG
4635 context->ustorm_st_context.common.sb_index_numbers =
4636 BNX2X_RX_SB_INDEX_NUM;
4637 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4638 context->ustorm_st_context.common.status_block_id = sb_id;
4639 context->ustorm_st_context.common.flags =
de832a55
EG
4640 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4641 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4642 context->ustorm_st_context.common.statistics_counter_id =
4643 cl_id;
8d9c5f34 4644 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4645 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4646 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4647 bp->rx_buf_size;
34f80b04 4648 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4649 U64_HI(fp->rx_desc_mapping);
34f80b04 4650 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4651 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4652 if (!fp->disable_tpa) {
4653 context->ustorm_st_context.common.flags |=
4654 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4655 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4656 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4657 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4658 (u32)0xffff);
7a9b2557
VZ
4659 context->ustorm_st_context.common.sge_page_base_hi =
4660 U64_HI(fp->rx_sge_mapping);
4661 context->ustorm_st_context.common.sge_page_base_lo =
4662 U64_LO(fp->rx_sge_mapping);
4663 }
4664
8d9c5f34
EG
4665 context->ustorm_ag_context.cdu_usage =
4666 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4667 CDU_REGION_NUMBER_UCM_AG,
4668 ETH_CONNECTION_TYPE);
4669
4670 context->xstorm_st_context.tx_bd_page_base_hi =
4671 U64_HI(fp->tx_desc_mapping);
4672 context->xstorm_st_context.tx_bd_page_base_lo =
4673 U64_LO(fp->tx_desc_mapping);
4674 context->xstorm_st_context.db_data_addr_hi =
4675 U64_HI(fp->tx_prods_mapping);
4676 context->xstorm_st_context.db_data_addr_lo =
4677 U64_LO(fp->tx_prods_mapping);
4678 context->xstorm_st_context.statistics_data = (fp->cl_id |
4679 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4680 context->cstorm_st_context.sb_index_number =
5c862848 4681 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4682 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4683
4684 context->xstorm_ag_context.cdu_reserved =
4685 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4686 CDU_REGION_NUMBER_XCM_AG,
4687 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4688 }
4689}
4690
4691static void bnx2x_init_ind_table(struct bnx2x *bp)
4692{
26c8fa4d 4693 int func = BP_FUNC(bp);
a2fbb9ea
ET
4694 int i;
4695
555f6c78 4696 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4697 return;
4698
555f6c78
EG
4699 DP(NETIF_MSG_IFUP,
4700 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4701 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4702 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4703 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4704 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4705}
4706
49d66772
ET
4707static void bnx2x_set_client_config(struct bnx2x *bp)
4708{
49d66772 4709 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4710 int port = BP_PORT(bp);
4711 int i;
49d66772 4712
e7799c5f 4713 tstorm_client.mtu = bp->dev->mtu;
49d66772 4714 tstorm_client.config_flags =
de832a55
EG
4715 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4716 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4717#ifdef BCM_VLAN
0c6671b0 4718 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4719 tstorm_client.config_flags |=
8d9c5f34 4720 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4721 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4722 }
4723#endif
49d66772 4724
7a9b2557
VZ
4725 if (bp->flags & TPA_ENABLE_FLAG) {
4726 tstorm_client.max_sges_for_packet =
4f40f2cb 4727 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4728 tstorm_client.max_sges_for_packet =
4729 ((tstorm_client.max_sges_for_packet +
4730 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4731 PAGES_PER_SGE_SHIFT;
4732
4733 tstorm_client.config_flags |=
4734 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4735 }
4736
49d66772 4737 for_each_queue(bp, i) {
de832a55
EG
4738 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4739
49d66772 4740 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4741 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4742 ((u32 *)&tstorm_client)[0]);
4743 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4744 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4745 ((u32 *)&tstorm_client)[1]);
4746 }
4747
34f80b04
EG
4748 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4749 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4750}
4751
a2fbb9ea
ET
4752static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4753{
a2fbb9ea 4754 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4755 int mode = bp->rx_mode;
4756 int mask = (1 << BP_L_ID(bp));
4757 int func = BP_FUNC(bp);
a2fbb9ea
ET
4758 int i;
4759
3196a88a 4760 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4761
4762 switch (mode) {
4763 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4764 tstorm_mac_filter.ucast_drop_all = mask;
4765 tstorm_mac_filter.mcast_drop_all = mask;
4766 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4767 break;
4768 case BNX2X_RX_MODE_NORMAL:
34f80b04 4769 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4770 break;
4771 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4772 tstorm_mac_filter.mcast_accept_all = mask;
4773 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4774 break;
4775 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4776 tstorm_mac_filter.ucast_accept_all = mask;
4777 tstorm_mac_filter.mcast_accept_all = mask;
4778 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4779 break;
4780 default:
34f80b04
EG
4781 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4782 break;
a2fbb9ea
ET
4783 }
4784
4785 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4786 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4787 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4788 ((u32 *)&tstorm_mac_filter)[i]);
4789
34f80b04 4790/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4791 ((u32 *)&tstorm_mac_filter)[i]); */
4792 }
a2fbb9ea 4793
49d66772
ET
4794 if (mode != BNX2X_RX_MODE_NONE)
4795 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4796}
4797
471de716
EG
4798static void bnx2x_init_internal_common(struct bnx2x *bp)
4799{
4800 int i;
4801
3cdf1db7
YG
4802 if (bp->flags & TPA_ENABLE_FLAG) {
4803 struct tstorm_eth_tpa_exist tpa = {0};
4804
4805 tpa.tpa_exist = 1;
4806
4807 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4808 ((u32 *)&tpa)[0]);
4809 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4810 ((u32 *)&tpa)[1]);
4811 }
4812
471de716
EG
4813 /* Zero this manually as its initialization is
4814 currently missing in the initTool */
4815 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4816 REG_WR(bp, BAR_USTRORM_INTMEM +
4817 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4818}
4819
4820static void bnx2x_init_internal_port(struct bnx2x *bp)
4821{
4822 int port = BP_PORT(bp);
4823
4824 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4825 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4826 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4827 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4828}
4829
8a1c38d1
EG
4830/* Calculates the sum of vn_min_rates.
4831 It's needed for further normalizing of the min_rates.
4832 Returns:
4833 sum of vn_min_rates.
4834 or
4835 0 - if all the min_rates are 0.
4836 In the later case fainess algorithm should be deactivated.
4837 If not all min_rates are zero then those that are zeroes will be set to 1.
4838 */
4839static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4840{
4841 int all_zero = 1;
4842 int port = BP_PORT(bp);
4843 int vn;
4844
4845 bp->vn_weight_sum = 0;
4846 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4847 int func = 2*vn + port;
4848 u32 vn_cfg =
4849 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4850 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4851 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4852
4853 /* Skip hidden vns */
4854 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4855 continue;
4856
4857 /* If min rate is zero - set it to 1 */
4858 if (!vn_min_rate)
4859 vn_min_rate = DEF_MIN_RATE;
4860 else
4861 all_zero = 0;
4862
4863 bp->vn_weight_sum += vn_min_rate;
4864 }
4865
4866 /* ... only if all min rates are zeros - disable fairness */
4867 if (all_zero)
4868 bp->vn_weight_sum = 0;
4869}
4870
471de716 4871static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4872{
a2fbb9ea
ET
4873 struct tstorm_eth_function_common_config tstorm_config = {0};
4874 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4875 int port = BP_PORT(bp);
4876 int func = BP_FUNC(bp);
de832a55
EG
4877 int i, j;
4878 u32 offset;
471de716 4879 u16 max_agg_size;
a2fbb9ea
ET
4880
4881 if (is_multi(bp)) {
555f6c78 4882 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4883 tstorm_config.rss_result_mask = MULTI_MASK;
4884 }
8d9c5f34
EG
4885 if (IS_E1HMF(bp))
4886 tstorm_config.config_flags |=
4887 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4888
34f80b04
EG
4889 tstorm_config.leading_client_id = BP_L_ID(bp);
4890
a2fbb9ea 4891 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4892 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4893 (*(u32 *)&tstorm_config));
4894
c14423fe 4895 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4896 bnx2x_set_storm_rx_mode(bp);
4897
de832a55
EG
4898 for_each_queue(bp, i) {
4899 u8 cl_id = bp->fp[i].cl_id;
4900
4901 /* reset xstorm per client statistics */
4902 offset = BAR_XSTRORM_INTMEM +
4903 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4904 for (j = 0;
4905 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4906 REG_WR(bp, offset + j*4, 0);
4907
4908 /* reset tstorm per client statistics */
4909 offset = BAR_TSTRORM_INTMEM +
4910 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4911 for (j = 0;
4912 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4913 REG_WR(bp, offset + j*4, 0);
4914
4915 /* reset ustorm per client statistics */
4916 offset = BAR_USTRORM_INTMEM +
4917 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4918 for (j = 0;
4919 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4920 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4921 }
4922
4923 /* Init statistics related context */
34f80b04 4924 stats_flags.collect_eth = 1;
a2fbb9ea 4925
66e855f3 4926 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4927 ((u32 *)&stats_flags)[0]);
66e855f3 4928 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4929 ((u32 *)&stats_flags)[1]);
4930
66e855f3 4931 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4932 ((u32 *)&stats_flags)[0]);
66e855f3 4933 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4934 ((u32 *)&stats_flags)[1]);
4935
de832a55
EG
4936 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4937 ((u32 *)&stats_flags)[0]);
4938 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4939 ((u32 *)&stats_flags)[1]);
4940
66e855f3 4941 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4942 ((u32 *)&stats_flags)[0]);
66e855f3 4943 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4944 ((u32 *)&stats_flags)[1]);
4945
66e855f3
YG
4946 REG_WR(bp, BAR_XSTRORM_INTMEM +
4947 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4948 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4949 REG_WR(bp, BAR_XSTRORM_INTMEM +
4950 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4951 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4952
4953 REG_WR(bp, BAR_TSTRORM_INTMEM +
4954 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4955 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4956 REG_WR(bp, BAR_TSTRORM_INTMEM +
4957 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4958 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4959
de832a55
EG
4960 REG_WR(bp, BAR_USTRORM_INTMEM +
4961 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4962 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4963 REG_WR(bp, BAR_USTRORM_INTMEM +
4964 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4965 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4966
34f80b04
EG
4967 if (CHIP_IS_E1H(bp)) {
4968 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4969 IS_E1HMF(bp));
4970 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4971 IS_E1HMF(bp));
4972 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4973 IS_E1HMF(bp));
4974 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4975 IS_E1HMF(bp));
4976
7a9b2557
VZ
4977 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4978 bp->e1hov);
34f80b04
EG
4979 }
4980
4f40f2cb
EG
4981 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4982 max_agg_size =
4983 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4984 SGE_PAGE_SIZE * PAGES_PER_SGE),
4985 (u32)0xffff);
555f6c78 4986 for_each_rx_queue(bp, i) {
7a9b2557 4987 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4988
4989 REG_WR(bp, BAR_USTRORM_INTMEM +
4990 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4991 U64_LO(fp->rx_comp_mapping));
4992 REG_WR(bp, BAR_USTRORM_INTMEM +
4993 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4994 U64_HI(fp->rx_comp_mapping));
4995
7a9b2557
VZ
4996 REG_WR16(bp, BAR_USTRORM_INTMEM +
4997 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4998 max_agg_size);
4999 }
8a1c38d1 5000
1c06328c
EG
5001 /* dropless flow control */
5002 if (CHIP_IS_E1H(bp)) {
5003 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5004
5005 rx_pause.bd_thr_low = 250;
5006 rx_pause.cqe_thr_low = 250;
5007 rx_pause.cos = 1;
5008 rx_pause.sge_thr_low = 0;
5009 rx_pause.bd_thr_high = 350;
5010 rx_pause.cqe_thr_high = 350;
5011 rx_pause.sge_thr_high = 0;
5012
5013 for_each_rx_queue(bp, i) {
5014 struct bnx2x_fastpath *fp = &bp->fp[i];
5015
5016 if (!fp->disable_tpa) {
5017 rx_pause.sge_thr_low = 150;
5018 rx_pause.sge_thr_high = 250;
5019 }
5020
5021
5022 offset = BAR_USTRORM_INTMEM +
5023 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5024 fp->cl_id);
5025 for (j = 0;
5026 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5027 j++)
5028 REG_WR(bp, offset + j*4,
5029 ((u32 *)&rx_pause)[j]);
5030 }
5031 }
5032
8a1c38d1
EG
5033 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5034
5035 /* Init rate shaping and fairness contexts */
5036 if (IS_E1HMF(bp)) {
5037 int vn;
5038
5039 /* During init there is no active link
5040 Until link is up, set link rate to 10Gbps */
5041 bp->link_vars.line_speed = SPEED_10000;
5042 bnx2x_init_port_minmax(bp);
5043
5044 bnx2x_calc_vn_weight_sum(bp);
5045
5046 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5047 bnx2x_init_vn_minmax(bp, 2*vn + port);
5048
5049 /* Enable rate shaping and fairness */
5050 bp->cmng.flags.cmng_enables =
5051 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5052 if (bp->vn_weight_sum)
5053 bp->cmng.flags.cmng_enables |=
5054 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5055 else
5056 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5057 " fairness will be disabled\n");
5058 } else {
5059 /* rate shaping and fairness are disabled */
5060 DP(NETIF_MSG_IFUP,
5061 "single function mode minmax will be disabled\n");
5062 }
5063
5064
5065 /* Store it to internal memory */
5066 if (bp->port.pmf)
5067 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5068 REG_WR(bp, BAR_XSTRORM_INTMEM +
5069 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5070 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5071}
5072
471de716
EG
5073static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5074{
5075 switch (load_code) {
5076 case FW_MSG_CODE_DRV_LOAD_COMMON:
5077 bnx2x_init_internal_common(bp);
5078 /* no break */
5079
5080 case FW_MSG_CODE_DRV_LOAD_PORT:
5081 bnx2x_init_internal_port(bp);
5082 /* no break */
5083
5084 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5085 bnx2x_init_internal_func(bp);
5086 break;
5087
5088 default:
5089 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5090 break;
5091 }
5092}
5093
5094static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5095{
5096 int i;
5097
5098 for_each_queue(bp, i) {
5099 struct bnx2x_fastpath *fp = &bp->fp[i];
5100
34f80b04 5101 fp->bp = bp;
a2fbb9ea 5102 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5103 fp->index = i;
34f80b04
EG
5104 fp->cl_id = BP_L_ID(bp) + i;
5105 fp->sb_id = fp->cl_id;
5106 DP(NETIF_MSG_IFUP,
5107 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5108 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
5109 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5110 FP_SB_ID(fp));
5111 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5112 }
5113
5c862848
EG
5114 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5115 DEF_SB_ID);
5116 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5117 bnx2x_update_coalesce(bp);
5118 bnx2x_init_rx_rings(bp);
5119 bnx2x_init_tx_ring(bp);
5120 bnx2x_init_sp_ring(bp);
5121 bnx2x_init_context(bp);
471de716 5122 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5123 bnx2x_init_ind_table(bp);
0ef00459
EG
5124 bnx2x_stats_init(bp);
5125
5126 /* At this point, we are ready for interrupts */
5127 atomic_set(&bp->intr_sem, 0);
5128
5129 /* flush all before enabling interrupts */
5130 mb();
5131 mmiowb();
5132
615f8fd9 5133 bnx2x_int_enable(bp);
a2fbb9ea
ET
5134}
5135
5136/* end of nic init */
5137
5138/*
5139 * gzip service functions
5140 */
5141
5142static int bnx2x_gunzip_init(struct bnx2x *bp)
5143{
5144 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5145 &bp->gunzip_mapping);
5146 if (bp->gunzip_buf == NULL)
5147 goto gunzip_nomem1;
5148
5149 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5150 if (bp->strm == NULL)
5151 goto gunzip_nomem2;
5152
5153 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5154 GFP_KERNEL);
5155 if (bp->strm->workspace == NULL)
5156 goto gunzip_nomem3;
5157
5158 return 0;
5159
5160gunzip_nomem3:
5161 kfree(bp->strm);
5162 bp->strm = NULL;
5163
5164gunzip_nomem2:
5165 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5166 bp->gunzip_mapping);
5167 bp->gunzip_buf = NULL;
5168
5169gunzip_nomem1:
5170 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5171 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5172 return -ENOMEM;
5173}
5174
5175static void bnx2x_gunzip_end(struct bnx2x *bp)
5176{
5177 kfree(bp->strm->workspace);
5178
5179 kfree(bp->strm);
5180 bp->strm = NULL;
5181
5182 if (bp->gunzip_buf) {
5183 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5184 bp->gunzip_mapping);
5185 bp->gunzip_buf = NULL;
5186 }
5187}
5188
5189static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5190{
5191 int n, rc;
5192
5193 /* check gzip header */
5194 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5195 return -EINVAL;
5196
5197 n = 10;
5198
34f80b04 5199#define FNAME 0x8
a2fbb9ea
ET
5200
5201 if (zbuf[3] & FNAME)
5202 while ((zbuf[n++] != 0) && (n < len));
5203
5204 bp->strm->next_in = zbuf + n;
5205 bp->strm->avail_in = len - n;
5206 bp->strm->next_out = bp->gunzip_buf;
5207 bp->strm->avail_out = FW_BUF_SIZE;
5208
5209 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5210 if (rc != Z_OK)
5211 return rc;
5212
5213 rc = zlib_inflate(bp->strm, Z_FINISH);
5214 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5215 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5216 bp->dev->name, bp->strm->msg);
5217
5218 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5219 if (bp->gunzip_outlen & 0x3)
5220 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5221 " gunzip_outlen (%d) not aligned\n",
5222 bp->dev->name, bp->gunzip_outlen);
5223 bp->gunzip_outlen >>= 2;
5224
5225 zlib_inflateEnd(bp->strm);
5226
5227 if (rc == Z_STREAM_END)
5228 return 0;
5229
5230 return rc;
5231}
5232
5233/* nic load/unload */
5234
5235/*
34f80b04 5236 * General service functions
a2fbb9ea
ET
5237 */
5238
5239/* send a NIG loopback debug packet */
5240static void bnx2x_lb_pckt(struct bnx2x *bp)
5241{
a2fbb9ea 5242 u32 wb_write[3];
a2fbb9ea
ET
5243
5244 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5245 wb_write[0] = 0x55555555;
5246 wb_write[1] = 0x55555555;
34f80b04 5247 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5248 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5249
5250 /* NON-IP protocol */
a2fbb9ea
ET
5251 wb_write[0] = 0x09000000;
5252 wb_write[1] = 0x55555555;
34f80b04 5253 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5254 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5255}
5256
5257/* some of the internal memories
5258 * are not directly readable from the driver
5259 * to test them we send debug packets
5260 */
5261static int bnx2x_int_mem_test(struct bnx2x *bp)
5262{
5263 int factor;
5264 int count, i;
5265 u32 val = 0;
5266
ad8d3948 5267 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5268 factor = 120;
ad8d3948
EG
5269 else if (CHIP_REV_IS_EMUL(bp))
5270 factor = 200;
5271 else
a2fbb9ea 5272 factor = 1;
a2fbb9ea
ET
5273
5274 DP(NETIF_MSG_HW, "start part1\n");
5275
5276 /* Disable inputs of parser neighbor blocks */
5277 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5278 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5279 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5280 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5281
5282 /* Write 0 to parser credits for CFC search request */
5283 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5284
5285 /* send Ethernet packet */
5286 bnx2x_lb_pckt(bp);
5287
5288 /* TODO do i reset NIG statistic? */
5289 /* Wait until NIG register shows 1 packet of size 0x10 */
5290 count = 1000 * factor;
5291 while (count) {
34f80b04 5292
a2fbb9ea
ET
5293 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5294 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5295 if (val == 0x10)
5296 break;
5297
5298 msleep(10);
5299 count--;
5300 }
5301 if (val != 0x10) {
5302 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5303 return -1;
5304 }
5305
5306 /* Wait until PRS register shows 1 packet */
5307 count = 1000 * factor;
5308 while (count) {
5309 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5310 if (val == 1)
5311 break;
5312
5313 msleep(10);
5314 count--;
5315 }
5316 if (val != 0x1) {
5317 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5318 return -2;
5319 }
5320
5321 /* Reset and init BRB, PRS */
34f80b04 5322 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5323 msleep(50);
34f80b04 5324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5325 msleep(50);
5326 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5327 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5328
5329 DP(NETIF_MSG_HW, "part2\n");
5330
5331 /* Disable inputs of parser neighbor blocks */
5332 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5333 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5334 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5335 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5336
5337 /* Write 0 to parser credits for CFC search request */
5338 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5339
5340 /* send 10 Ethernet packets */
5341 for (i = 0; i < 10; i++)
5342 bnx2x_lb_pckt(bp);
5343
5344 /* Wait until NIG register shows 10 + 1
5345 packets of size 11*0x10 = 0xb0 */
5346 count = 1000 * factor;
5347 while (count) {
34f80b04 5348
a2fbb9ea
ET
5349 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5350 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5351 if (val == 0xb0)
5352 break;
5353
5354 msleep(10);
5355 count--;
5356 }
5357 if (val != 0xb0) {
5358 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5359 return -3;
5360 }
5361
5362 /* Wait until PRS register shows 2 packets */
5363 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5364 if (val != 2)
5365 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5366
5367 /* Write 1 to parser credits for CFC search request */
5368 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5369
5370 /* Wait until PRS register shows 3 packets */
5371 msleep(10 * factor);
5372 /* Wait until NIG register shows 1 packet of size 0x10 */
5373 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5374 if (val != 3)
5375 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5376
5377 /* clear NIG EOP FIFO */
5378 for (i = 0; i < 11; i++)
5379 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5380 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5381 if (val != 1) {
5382 BNX2X_ERR("clear of NIG failed\n");
5383 return -4;
5384 }
5385
5386 /* Reset and init BRB, PRS, NIG */
5387 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5388 msleep(50);
5389 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5390 msleep(50);
5391 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5392 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5393#ifndef BCM_ISCSI
5394 /* set NIC mode */
5395 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5396#endif
5397
5398 /* Enable inputs of parser neighbor blocks */
5399 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5400 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5401 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5402 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5403
5404 DP(NETIF_MSG_HW, "done\n");
5405
5406 return 0; /* OK */
5407}
5408
5409static void enable_blocks_attention(struct bnx2x *bp)
5410{
5411 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5412 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5413 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5414 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5415 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5416 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5417 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5418 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5419 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5420/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5421/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5422 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5423 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5424 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5425/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5426/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5427 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5428 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5429 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5430 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5431/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5432/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5433 if (CHIP_REV_IS_FPGA(bp))
5434 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5435 else
5436 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5437 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5438 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5439 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5440/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5441/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5442 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5443 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5444/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5445 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5446}
5447
34f80b04 5448
81f75bbf
EG
5449static void bnx2x_reset_common(struct bnx2x *bp)
5450{
5451 /* reset_common */
5452 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5453 0xd3ffff7f);
5454 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5455}
5456
34f80b04 5457static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5458{
a2fbb9ea 5459 u32 val, i;
a2fbb9ea 5460
34f80b04 5461 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5462
81f75bbf 5463 bnx2x_reset_common(bp);
34f80b04
EG
5464 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5465 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5466
34f80b04
EG
5467 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5468 if (CHIP_IS_E1H(bp))
5469 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5470
34f80b04
EG
5471 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5472 msleep(30);
5473 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5474
34f80b04
EG
5475 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5476 if (CHIP_IS_E1(bp)) {
5477 /* enable HW interrupt from PXP on USDM overflow
5478 bit 16 on INT_MASK_0 */
5479 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5480 }
a2fbb9ea 5481
34f80b04
EG
5482 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5483 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5484
5485#ifdef __BIG_ENDIAN
34f80b04
EG
5486 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5487 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5488 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5489 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5490 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5491 /* make sure this value is 0 */
5492 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5493
5494/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5495 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5496 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5497 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5498 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5499#endif
5500
34f80b04 5501 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5502#ifdef BCM_ISCSI
34f80b04
EG
5503 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5504 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5505 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5506#endif
5507
34f80b04
EG
5508 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5509 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5510
34f80b04
EG
5511 /* let the HW do it's magic ... */
5512 msleep(100);
5513 /* finish PXP init */
5514 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5515 if (val != 1) {
5516 BNX2X_ERR("PXP2 CFG failed\n");
5517 return -EBUSY;
5518 }
5519 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5520 if (val != 1) {
5521 BNX2X_ERR("PXP2 RD_INIT failed\n");
5522 return -EBUSY;
5523 }
a2fbb9ea 5524
34f80b04
EG
5525 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5526 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5527
34f80b04 5528 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5529
34f80b04
EG
5530 /* clean the DMAE memory */
5531 bp->dmae_ready = 1;
5532 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5533
34f80b04
EG
5534 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5535 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5536 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5537 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5538
34f80b04
EG
5539 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5540 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5541 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5542 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5543
5544 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5545 /* soft reset pulse */
5546 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5547 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5548
5549#ifdef BCM_ISCSI
34f80b04 5550 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5551#endif
a2fbb9ea 5552
34f80b04
EG
5553 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5554 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5555 if (!CHIP_REV_IS_SLOW(bp)) {
5556 /* enable hw interrupt from doorbell Q */
5557 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5558 }
a2fbb9ea 5559
34f80b04 5560 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5561 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5562 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5563 /* set NIC mode */
5564 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5565 if (CHIP_IS_E1H(bp))
5566 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5567
34f80b04
EG
5568 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5569 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5570 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5571 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5572
34f80b04
EG
5573 if (CHIP_IS_E1H(bp)) {
5574 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5575 STORM_INTMEM_SIZE_E1H/2);
5576 bnx2x_init_fill(bp,
5577 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5578 0, STORM_INTMEM_SIZE_E1H/2);
5579 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5580 STORM_INTMEM_SIZE_E1H/2);
5581 bnx2x_init_fill(bp,
5582 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5583 0, STORM_INTMEM_SIZE_E1H/2);
5584 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5585 STORM_INTMEM_SIZE_E1H/2);
5586 bnx2x_init_fill(bp,
5587 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5588 0, STORM_INTMEM_SIZE_E1H/2);
5589 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5590 STORM_INTMEM_SIZE_E1H/2);
5591 bnx2x_init_fill(bp,
5592 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5593 0, STORM_INTMEM_SIZE_E1H/2);
5594 } else { /* E1 */
ad8d3948
EG
5595 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5596 STORM_INTMEM_SIZE_E1);
5597 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5598 STORM_INTMEM_SIZE_E1);
5599 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5600 STORM_INTMEM_SIZE_E1);
5601 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5602 STORM_INTMEM_SIZE_E1);
34f80b04 5603 }
a2fbb9ea 5604
34f80b04
EG
5605 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5606 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5607 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5608 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5609
34f80b04
EG
5610 /* sync semi rtc */
5611 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5612 0x80000000);
5613 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5614 0x80000000);
a2fbb9ea 5615
34f80b04
EG
5616 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5617 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5618 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5619
34f80b04
EG
5620 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5621 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5622 REG_WR(bp, i, 0xc0cac01a);
5623 /* TODO: replace with something meaningful */
5624 }
8d9c5f34 5625 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5626 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5627
34f80b04
EG
5628 if (sizeof(union cdu_context) != 1024)
5629 /* we currently assume that a context is 1024 bytes */
5630 printk(KERN_ALERT PFX "please adjust the size of"
5631 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5632
34f80b04
EG
5633 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5634 val = (4 << 24) + (0 << 12) + 1024;
5635 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5636 if (CHIP_IS_E1(bp)) {
5637 /* !!! fix pxp client crdit until excel update */
5638 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5639 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5640 }
a2fbb9ea 5641
34f80b04
EG
5642 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5643 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5644 /* enable context validation interrupt from CFC */
5645 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5646
5647 /* set the thresholds to prevent CFC/CDU race */
5648 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5649
34f80b04
EG
5650 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5651 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5652
34f80b04
EG
5653 /* PXPCS COMMON comes here */
5654 /* Reset PCIE errors for debug */
5655 REG_WR(bp, 0x2814, 0xffffffff);
5656 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5657
34f80b04
EG
5658 /* EMAC0 COMMON comes here */
5659 /* EMAC1 COMMON comes here */
5660 /* DBU COMMON comes here */
5661 /* DBG COMMON comes here */
5662
5663 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5664 if (CHIP_IS_E1H(bp)) {
5665 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5666 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5667 }
5668
5669 if (CHIP_REV_IS_SLOW(bp))
5670 msleep(200);
5671
5672 /* finish CFC init */
5673 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5674 if (val != 1) {
5675 BNX2X_ERR("CFC LL_INIT failed\n");
5676 return -EBUSY;
5677 }
5678 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5679 if (val != 1) {
5680 BNX2X_ERR("CFC AC_INIT failed\n");
5681 return -EBUSY;
5682 }
5683 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5684 if (val != 1) {
5685 BNX2X_ERR("CFC CAM_INIT failed\n");
5686 return -EBUSY;
5687 }
5688 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5689
34f80b04
EG
5690 /* read NIG statistic
5691 to see if this is our first up since powerup */
5692 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5693 val = *bnx2x_sp(bp, wb_data[0]);
5694
5695 /* do internal memory self test */
5696 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5697 BNX2X_ERR("internal mem self test failed\n");
5698 return -EBUSY;
5699 }
5700
35b19ba5 5701 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5702 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5703 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5704 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5705 bp->port.need_hw_lock = 1;
5706 break;
5707
35b19ba5 5708 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5709 /* Fan failure is indicated by SPIO 5 */
5710 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5711 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5712
5713 /* set to active low mode */
5714 val = REG_RD(bp, MISC_REG_SPIO_INT);
5715 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5716 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5717 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5718
34f80b04
EG
5719 /* enable interrupt to signal the IGU */
5720 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5721 val |= (1 << MISC_REGISTERS_SPIO_5);
5722 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5723 break;
f1410647 5724
34f80b04
EG
5725 default:
5726 break;
5727 }
f1410647 5728
34f80b04
EG
5729 /* clear PXP2 attentions */
5730 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5731
34f80b04 5732 enable_blocks_attention(bp);
a2fbb9ea 5733
6bbca910
YR
5734 if (!BP_NOMCP(bp)) {
5735 bnx2x_acquire_phy_lock(bp);
5736 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5737 bnx2x_release_phy_lock(bp);
5738 } else
5739 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5740
34f80b04
EG
5741 return 0;
5742}
a2fbb9ea 5743
34f80b04
EG
5744static int bnx2x_init_port(struct bnx2x *bp)
5745{
5746 int port = BP_PORT(bp);
1c06328c 5747 u32 low, high;
34f80b04 5748 u32 val;
a2fbb9ea 5749
34f80b04
EG
5750 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5751
5752 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5753
5754 /* Port PXP comes here */
5755 /* Port PXP2 comes here */
a2fbb9ea
ET
5756#ifdef BCM_ISCSI
5757 /* Port0 1
5758 * Port1 385 */
5759 i++;
5760 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5761 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5762 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5763 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5764
5765 /* Port0 2
5766 * Port1 386 */
5767 i++;
5768 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5769 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5770 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5771 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5772
5773 /* Port0 3
5774 * Port1 387 */
5775 i++;
5776 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5777 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5778 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5779 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5780#endif
34f80b04 5781 /* Port CMs come here */
8d9c5f34
EG
5782 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5783 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5784
5785 /* Port QM comes here */
a2fbb9ea
ET
5786#ifdef BCM_ISCSI
5787 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5788 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5789
5790 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5791 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5792#endif
5793 /* Port DQ comes here */
1c06328c
EG
5794
5795 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5796 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5797 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5798 /* no pause for emulation and FPGA */
5799 low = 0;
5800 high = 513;
5801 } else {
5802 if (IS_E1HMF(bp))
5803 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5804 else if (bp->dev->mtu > 4096) {
5805 if (bp->flags & ONE_PORT_FLAG)
5806 low = 160;
5807 else {
5808 val = bp->dev->mtu;
5809 /* (24*1024 + val*4)/256 */
5810 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5811 }
5812 } else
5813 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5814 high = low + 56; /* 14*1024/256 */
5815 }
5816 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5817 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5818
5819
ad8d3948 5820 /* Port PRS comes here */
a2fbb9ea
ET
5821 /* Port TSDM comes here */
5822 /* Port CSDM comes here */
5823 /* Port USDM comes here */
5824 /* Port XSDM comes here */
34f80b04
EG
5825 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5826 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5827 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5828 port ? USEM_PORT1_END : USEM_PORT0_END);
5829 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5830 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5831 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5832 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5833 /* Port UPB comes here */
34f80b04
EG
5834 /* Port XPB comes here */
5835
5836 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5837 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5838
5839 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5840 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5841
5842 /* update threshold */
34f80b04 5843 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5844 /* update init credit */
34f80b04 5845 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5846
5847 /* probe changes */
34f80b04 5848 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5849 msleep(5);
34f80b04 5850 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5851
5852#ifdef BCM_ISCSI
5853 /* tell the searcher where the T2 table is */
5854 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5855
5856 wb_write[0] = U64_LO(bp->t2_mapping);
5857 wb_write[1] = U64_HI(bp->t2_mapping);
5858 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5859 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5860 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5861 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5862
5863 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5864 /* Port SRCH comes here */
5865#endif
5866 /* Port CDU comes here */
5867 /* Port CFC comes here */
34f80b04
EG
5868
5869 if (CHIP_IS_E1(bp)) {
5870 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5871 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5872 }
5873 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5874 port ? HC_PORT1_END : HC_PORT0_END);
5875
5876 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5877 MISC_AEU_PORT0_START,
34f80b04
EG
5878 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5879 /* init aeu_mask_attn_func_0/1:
5880 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5881 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5882 * bits 4-7 are used for "per vn group attention" */
5883 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5884 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5885
a2fbb9ea
ET
5886 /* Port PXPCS comes here */
5887 /* Port EMAC0 comes here */
5888 /* Port EMAC1 comes here */
5889 /* Port DBU comes here */
5890 /* Port DBG comes here */
34f80b04
EG
5891 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5892 port ? NIG_PORT1_END : NIG_PORT0_END);
5893
5894 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5895
5896 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5897 /* 0x2 disable e1hov, 0x1 enable */
5898 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5899 (IS_E1HMF(bp) ? 0x1 : 0x2));
5900
1c06328c
EG
5901 /* support pause requests from USDM, TSDM and BRB */
5902 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5903
5904 {
5905 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5906 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5907 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5908 }
34f80b04
EG
5909 }
5910
a2fbb9ea
ET
5911 /* Port MCP comes here */
5912 /* Port DMAE comes here */
5913
35b19ba5 5914 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5915 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5916 {
5917 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5918
5919 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5920 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5921
5922 /* The GPIO should be swapped if the swap register is
5923 set and active */
5924 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5925 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5926
5927 /* Select function upon port-swap configuration */
5928 if (port == 0) {
5929 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5930 aeu_gpio_mask = (swap_val && swap_override) ?
5931 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5932 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5933 } else {
5934 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5935 aeu_gpio_mask = (swap_val && swap_override) ?
5936 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5937 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5938 }
5939 val = REG_RD(bp, offset);
5940 /* add GPIO3 to group */
5941 val |= aeu_gpio_mask;
5942 REG_WR(bp, offset, val);
5943 }
5944 break;
5945
35b19ba5 5946 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5947 /* add SPIO 5 to group 0 */
5948 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5949 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5950 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5951 break;
5952
5953 default:
5954 break;
5955 }
5956
c18487ee 5957 bnx2x__link_reset(bp);
a2fbb9ea 5958
34f80b04
EG
5959 return 0;
5960}
5961
5962#define ILT_PER_FUNC (768/2)
5963#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5964/* the phys address is shifted right 12 bits and has an added
5965 1=valid bit added to the 53rd bit
5966 then since this is a wide register(TM)
5967 we split it into two 32 bit writes
5968 */
5969#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5970#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5971#define PXP_ONE_ILT(x) (((x) << 10) | x)
5972#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5973
5974#define CNIC_ILT_LINES 0
5975
5976static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5977{
5978 int reg;
5979
5980 if (CHIP_IS_E1H(bp))
5981 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5982 else /* E1 */
5983 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5984
5985 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5986}
5987
5988static int bnx2x_init_func(struct bnx2x *bp)
5989{
5990 int port = BP_PORT(bp);
5991 int func = BP_FUNC(bp);
8badd27a 5992 u32 addr, val;
34f80b04
EG
5993 int i;
5994
5995 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5996
8badd27a
EG
5997 /* set MSI reconfigure capability */
5998 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5999 val = REG_RD(bp, addr);
6000 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6001 REG_WR(bp, addr, val);
6002
34f80b04
EG
6003 i = FUNC_ILT_BASE(func);
6004
6005 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6006 if (CHIP_IS_E1H(bp)) {
6007 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6008 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6009 } else /* E1 */
6010 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6011 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6012
6013
6014 if (CHIP_IS_E1H(bp)) {
6015 for (i = 0; i < 9; i++)
6016 bnx2x_init_block(bp,
6017 cm_start[func][i], cm_end[func][i]);
6018
6019 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6020 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6021 }
6022
6023 /* HC init per function */
6024 if (CHIP_IS_E1H(bp)) {
6025 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6026
6027 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6028 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6029 }
6030 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6031
c14423fe 6032 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6033 REG_WR(bp, 0x2114, 0xffffffff);
6034 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6035
34f80b04
EG
6036 return 0;
6037}
6038
6039static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6040{
6041 int i, rc = 0;
a2fbb9ea 6042
34f80b04
EG
6043 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6044 BP_FUNC(bp), load_code);
a2fbb9ea 6045
34f80b04
EG
6046 bp->dmae_ready = 0;
6047 mutex_init(&bp->dmae_mutex);
6048 bnx2x_gunzip_init(bp);
a2fbb9ea 6049
34f80b04
EG
6050 switch (load_code) {
6051 case FW_MSG_CODE_DRV_LOAD_COMMON:
6052 rc = bnx2x_init_common(bp);
6053 if (rc)
6054 goto init_hw_err;
6055 /* no break */
6056
6057 case FW_MSG_CODE_DRV_LOAD_PORT:
6058 bp->dmae_ready = 1;
6059 rc = bnx2x_init_port(bp);
6060 if (rc)
6061 goto init_hw_err;
6062 /* no break */
6063
6064 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6065 bp->dmae_ready = 1;
6066 rc = bnx2x_init_func(bp);
6067 if (rc)
6068 goto init_hw_err;
6069 break;
6070
6071 default:
6072 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6073 break;
6074 }
6075
6076 if (!BP_NOMCP(bp)) {
6077 int func = BP_FUNC(bp);
a2fbb9ea
ET
6078
6079 bp->fw_drv_pulse_wr_seq =
34f80b04 6080 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6081 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6082 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6083 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6084 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6085 } else
6086 bp->func_stx = 0;
a2fbb9ea 6087
34f80b04
EG
6088 /* this needs to be done before gunzip end */
6089 bnx2x_zero_def_sb(bp);
6090 for_each_queue(bp, i)
6091 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6092
6093init_hw_err:
6094 bnx2x_gunzip_end(bp);
6095
6096 return rc;
a2fbb9ea
ET
6097}
6098
c14423fe 6099/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6100static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6101{
34f80b04 6102 int func = BP_FUNC(bp);
f1410647
ET
6103 u32 seq = ++bp->fw_seq;
6104 u32 rc = 0;
19680c48
EG
6105 u32 cnt = 1;
6106 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6107
34f80b04 6108 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6109 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6110
19680c48
EG
6111 do {
6112 /* let the FW do it's magic ... */
6113 msleep(delay);
a2fbb9ea 6114
19680c48 6115 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6116
19680c48
EG
6117 /* Give the FW up to 2 second (200*10ms) */
6118 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6119
6120 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6121 cnt*delay, rc, seq);
a2fbb9ea
ET
6122
6123 /* is this a reply to our command? */
6124 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6125 rc &= FW_MSG_CODE_MASK;
f1410647 6126
a2fbb9ea
ET
6127 } else {
6128 /* FW BUG! */
6129 BNX2X_ERR("FW failed to respond!\n");
6130 bnx2x_fw_dump(bp);
6131 rc = 0;
6132 }
f1410647 6133
a2fbb9ea
ET
6134 return rc;
6135}
6136
6137static void bnx2x_free_mem(struct bnx2x *bp)
6138{
6139
6140#define BNX2X_PCI_FREE(x, y, size) \
6141 do { \
6142 if (x) { \
6143 pci_free_consistent(bp->pdev, size, x, y); \
6144 x = NULL; \
6145 y = 0; \
6146 } \
6147 } while (0)
6148
6149#define BNX2X_FREE(x) \
6150 do { \
6151 if (x) { \
6152 vfree(x); \
6153 x = NULL; \
6154 } \
6155 } while (0)
6156
6157 int i;
6158
6159 /* fastpath */
555f6c78 6160 /* Common */
a2fbb9ea
ET
6161 for_each_queue(bp, i) {
6162
555f6c78 6163 /* status blocks */
a2fbb9ea
ET
6164 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6165 bnx2x_fp(bp, i, status_blk_mapping),
6166 sizeof(struct host_status_block) +
6167 sizeof(struct eth_tx_db_data));
555f6c78
EG
6168 }
6169 /* Rx */
6170 for_each_rx_queue(bp, i) {
a2fbb9ea 6171
555f6c78 6172 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6173 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6174 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6175 bnx2x_fp(bp, i, rx_desc_mapping),
6176 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6177
6178 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6179 bnx2x_fp(bp, i, rx_comp_mapping),
6180 sizeof(struct eth_fast_path_rx_cqe) *
6181 NUM_RCQ_BD);
a2fbb9ea 6182
7a9b2557 6183 /* SGE ring */
32626230 6184 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6185 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6186 bnx2x_fp(bp, i, rx_sge_mapping),
6187 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6188 }
555f6c78
EG
6189 /* Tx */
6190 for_each_tx_queue(bp, i) {
6191
6192 /* fastpath tx rings: tx_buf tx_desc */
6193 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6194 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6195 bnx2x_fp(bp, i, tx_desc_mapping),
6196 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6197 }
a2fbb9ea
ET
6198 /* end of fastpath */
6199
6200 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6201 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6202
6203 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6204 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6205
6206#ifdef BCM_ISCSI
6207 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6208 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6209 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6210 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6211#endif
7a9b2557 6212 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6213
6214#undef BNX2X_PCI_FREE
6215#undef BNX2X_KFREE
6216}
6217
6218static int bnx2x_alloc_mem(struct bnx2x *bp)
6219{
6220
6221#define BNX2X_PCI_ALLOC(x, y, size) \
6222 do { \
6223 x = pci_alloc_consistent(bp->pdev, size, y); \
6224 if (x == NULL) \
6225 goto alloc_mem_err; \
6226 memset(x, 0, size); \
6227 } while (0)
6228
6229#define BNX2X_ALLOC(x, size) \
6230 do { \
6231 x = vmalloc(size); \
6232 if (x == NULL) \
6233 goto alloc_mem_err; \
6234 memset(x, 0, size); \
6235 } while (0)
6236
6237 int i;
6238
6239 /* fastpath */
555f6c78 6240 /* Common */
a2fbb9ea
ET
6241 for_each_queue(bp, i) {
6242 bnx2x_fp(bp, i, bp) = bp;
6243
555f6c78 6244 /* status blocks */
a2fbb9ea
ET
6245 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6246 &bnx2x_fp(bp, i, status_blk_mapping),
6247 sizeof(struct host_status_block) +
6248 sizeof(struct eth_tx_db_data));
555f6c78
EG
6249 }
6250 /* Rx */
6251 for_each_rx_queue(bp, i) {
a2fbb9ea 6252
555f6c78 6253 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6254 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6255 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6256 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6257 &bnx2x_fp(bp, i, rx_desc_mapping),
6258 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6259
6260 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6261 &bnx2x_fp(bp, i, rx_comp_mapping),
6262 sizeof(struct eth_fast_path_rx_cqe) *
6263 NUM_RCQ_BD);
6264
7a9b2557
VZ
6265 /* SGE ring */
6266 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6267 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6268 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6269 &bnx2x_fp(bp, i, rx_sge_mapping),
6270 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6271 }
555f6c78
EG
6272 /* Tx */
6273 for_each_tx_queue(bp, i) {
6274
6275 bnx2x_fp(bp, i, hw_tx_prods) =
6276 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6277
6278 bnx2x_fp(bp, i, tx_prods_mapping) =
6279 bnx2x_fp(bp, i, status_blk_mapping) +
6280 sizeof(struct host_status_block);
6281
6282 /* fastpath tx rings: tx_buf tx_desc */
6283 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6284 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6285 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6286 &bnx2x_fp(bp, i, tx_desc_mapping),
6287 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6288 }
a2fbb9ea
ET
6289 /* end of fastpath */
6290
6291 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6292 sizeof(struct host_def_status_block));
6293
6294 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6295 sizeof(struct bnx2x_slowpath));
6296
6297#ifdef BCM_ISCSI
6298 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6299
6300 /* Initialize T1 */
6301 for (i = 0; i < 64*1024; i += 64) {
6302 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6303 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6304 }
6305
6306 /* allocate searcher T2 table
6307 we allocate 1/4 of alloc num for T2
6308 (which is not entered into the ILT) */
6309 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6310
6311 /* Initialize T2 */
6312 for (i = 0; i < 16*1024; i += 64)
6313 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6314
c14423fe 6315 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6316 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6317
6318 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6319 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6320
6321 /* QM queues (128*MAX_CONN) */
6322 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6323#endif
6324
6325 /* Slow path ring */
6326 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6327
6328 return 0;
6329
6330alloc_mem_err:
6331 bnx2x_free_mem(bp);
6332 return -ENOMEM;
6333
6334#undef BNX2X_PCI_ALLOC
6335#undef BNX2X_ALLOC
6336}
6337
6338static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6339{
6340 int i;
6341
555f6c78 6342 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6343 struct bnx2x_fastpath *fp = &bp->fp[i];
6344
6345 u16 bd_cons = fp->tx_bd_cons;
6346 u16 sw_prod = fp->tx_pkt_prod;
6347 u16 sw_cons = fp->tx_pkt_cons;
6348
a2fbb9ea
ET
6349 while (sw_cons != sw_prod) {
6350 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6351 sw_cons++;
6352 }
6353 }
6354}
6355
6356static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6357{
6358 int i, j;
6359
555f6c78 6360 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6361 struct bnx2x_fastpath *fp = &bp->fp[j];
6362
a2fbb9ea
ET
6363 for (i = 0; i < NUM_RX_BD; i++) {
6364 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6365 struct sk_buff *skb = rx_buf->skb;
6366
6367 if (skb == NULL)
6368 continue;
6369
6370 pci_unmap_single(bp->pdev,
6371 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6372 bp->rx_buf_size,
a2fbb9ea
ET
6373 PCI_DMA_FROMDEVICE);
6374
6375 rx_buf->skb = NULL;
6376 dev_kfree_skb(skb);
6377 }
7a9b2557 6378 if (!fp->disable_tpa)
32626230
EG
6379 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6380 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6381 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6382 }
6383}
6384
6385static void bnx2x_free_skbs(struct bnx2x *bp)
6386{
6387 bnx2x_free_tx_skbs(bp);
6388 bnx2x_free_rx_skbs(bp);
6389}
6390
6391static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6392{
34f80b04 6393 int i, offset = 1;
a2fbb9ea
ET
6394
6395 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6396 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6397 bp->msix_table[0].vector);
6398
6399 for_each_queue(bp, i) {
c14423fe 6400 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6401 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6402 bnx2x_fp(bp, i, state));
6403
34f80b04 6404 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6405 }
a2fbb9ea
ET
6406}
6407
6408static void bnx2x_free_irq(struct bnx2x *bp)
6409{
a2fbb9ea 6410 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6411 bnx2x_free_msix_irqs(bp);
6412 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6413 bp->flags &= ~USING_MSIX_FLAG;
6414
8badd27a
EG
6415 } else if (bp->flags & USING_MSI_FLAG) {
6416 free_irq(bp->pdev->irq, bp->dev);
6417 pci_disable_msi(bp->pdev);
6418 bp->flags &= ~USING_MSI_FLAG;
6419
a2fbb9ea
ET
6420 } else
6421 free_irq(bp->pdev->irq, bp->dev);
6422}
6423
6424static int bnx2x_enable_msix(struct bnx2x *bp)
6425{
8badd27a
EG
6426 int i, rc, offset = 1;
6427 int igu_vec = 0;
a2fbb9ea 6428
8badd27a
EG
6429 bp->msix_table[0].entry = igu_vec;
6430 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6431
34f80b04 6432 for_each_queue(bp, i) {
8badd27a 6433 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6434 bp->msix_table[i + offset].entry = igu_vec;
6435 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6436 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6437 }
6438
34f80b04 6439 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6440 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6441 if (rc) {
8badd27a
EG
6442 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6443 return rc;
34f80b04 6444 }
8badd27a 6445
a2fbb9ea
ET
6446 bp->flags |= USING_MSIX_FLAG;
6447
6448 return 0;
a2fbb9ea
ET
6449}
6450
a2fbb9ea
ET
6451static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6452{
34f80b04 6453 int i, rc, offset = 1;
a2fbb9ea 6454
a2fbb9ea
ET
6455 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6456 bp->dev->name, bp->dev);
a2fbb9ea
ET
6457 if (rc) {
6458 BNX2X_ERR("request sp irq failed\n");
6459 return -EBUSY;
6460 }
6461
6462 for_each_queue(bp, i) {
555f6c78
EG
6463 struct bnx2x_fastpath *fp = &bp->fp[i];
6464
6465 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6466 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6467 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6468 if (rc) {
555f6c78 6469 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6470 bnx2x_free_msix_irqs(bp);
6471 return -EBUSY;
6472 }
6473
555f6c78 6474 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6475 }
6476
555f6c78
EG
6477 i = BNX2X_NUM_QUEUES(bp);
6478 if (is_multi(bp))
6479 printk(KERN_INFO PFX
6480 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6481 bp->dev->name, bp->msix_table[0].vector,
6482 bp->msix_table[offset].vector,
6483 bp->msix_table[offset + i - 1].vector);
6484 else
6485 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6486 bp->dev->name, bp->msix_table[0].vector,
6487 bp->msix_table[offset + i - 1].vector);
6488
a2fbb9ea 6489 return 0;
a2fbb9ea
ET
6490}
6491
8badd27a
EG
6492static int bnx2x_enable_msi(struct bnx2x *bp)
6493{
6494 int rc;
6495
6496 rc = pci_enable_msi(bp->pdev);
6497 if (rc) {
6498 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6499 return -1;
6500 }
6501 bp->flags |= USING_MSI_FLAG;
6502
6503 return 0;
6504}
6505
a2fbb9ea
ET
6506static int bnx2x_req_irq(struct bnx2x *bp)
6507{
8badd27a 6508 unsigned long flags;
34f80b04 6509 int rc;
a2fbb9ea 6510
8badd27a
EG
6511 if (bp->flags & USING_MSI_FLAG)
6512 flags = 0;
6513 else
6514 flags = IRQF_SHARED;
6515
6516 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6517 bp->dev->name, bp->dev);
a2fbb9ea
ET
6518 if (!rc)
6519 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6520
6521 return rc;
a2fbb9ea
ET
6522}
6523
65abd74d
YG
6524static void bnx2x_napi_enable(struct bnx2x *bp)
6525{
6526 int i;
6527
555f6c78 6528 for_each_rx_queue(bp, i)
65abd74d
YG
6529 napi_enable(&bnx2x_fp(bp, i, napi));
6530}
6531
6532static void bnx2x_napi_disable(struct bnx2x *bp)
6533{
6534 int i;
6535
555f6c78 6536 for_each_rx_queue(bp, i)
65abd74d
YG
6537 napi_disable(&bnx2x_fp(bp, i, napi));
6538}
6539
6540static void bnx2x_netif_start(struct bnx2x *bp)
6541{
6542 if (atomic_dec_and_test(&bp->intr_sem)) {
6543 if (netif_running(bp->dev)) {
65abd74d
YG
6544 bnx2x_napi_enable(bp);
6545 bnx2x_int_enable(bp);
555f6c78
EG
6546 if (bp->state == BNX2X_STATE_OPEN)
6547 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6548 }
6549 }
6550}
6551
f8ef6e44 6552static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6553{
f8ef6e44 6554 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6555 bnx2x_napi_disable(bp);
65abd74d 6556 if (netif_running(bp->dev)) {
65abd74d
YG
6557 netif_tx_disable(bp->dev);
6558 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6559 }
6560}
6561
a2fbb9ea
ET
6562/*
6563 * Init service functions
6564 */
6565
3101c2bc 6566static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6567{
6568 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6569 int port = BP_PORT(bp);
a2fbb9ea
ET
6570
6571 /* CAM allocation
6572 * unicasts 0-31:port0 32-63:port1
6573 * multicast 64-127:port0 128-191:port1
6574 */
8d9c5f34 6575 config->hdr.length = 2;
af246401 6576 config->hdr.offset = port ? 32 : 0;
34f80b04 6577 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6578 config->hdr.reserved1 = 0;
6579
6580 /* primary MAC */
6581 config->config_table[0].cam_entry.msb_mac_addr =
6582 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6583 config->config_table[0].cam_entry.middle_mac_addr =
6584 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6585 config->config_table[0].cam_entry.lsb_mac_addr =
6586 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6587 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6588 if (set)
6589 config->config_table[0].target_table_entry.flags = 0;
6590 else
6591 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6592 config->config_table[0].target_table_entry.client_id = 0;
6593 config->config_table[0].target_table_entry.vlan_id = 0;
6594
3101c2bc
YG
6595 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6596 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6597 config->config_table[0].cam_entry.msb_mac_addr,
6598 config->config_table[0].cam_entry.middle_mac_addr,
6599 config->config_table[0].cam_entry.lsb_mac_addr);
6600
6601 /* broadcast */
6602 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6603 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6604 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6605 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6606 if (set)
6607 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6608 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6609 else
6610 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6611 config->config_table[1].target_table_entry.client_id = 0;
6612 config->config_table[1].target_table_entry.vlan_id = 0;
6613
6614 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6615 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6616 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6617}
6618
3101c2bc 6619static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6620{
6621 struct mac_configuration_cmd_e1h *config =
6622 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6623
3101c2bc 6624 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6625 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6626 return;
6627 }
6628
6629 /* CAM allocation for E1H
6630 * unicasts: by func number
6631 * multicast: 20+FUNC*20, 20 each
6632 */
8d9c5f34 6633 config->hdr.length = 1;
34f80b04
EG
6634 config->hdr.offset = BP_FUNC(bp);
6635 config->hdr.client_id = BP_CL_ID(bp);
6636 config->hdr.reserved1 = 0;
6637
6638 /* primary MAC */
6639 config->config_table[0].msb_mac_addr =
6640 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6641 config->config_table[0].middle_mac_addr =
6642 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6643 config->config_table[0].lsb_mac_addr =
6644 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6645 config->config_table[0].client_id = BP_L_ID(bp);
6646 config->config_table[0].vlan_id = 0;
6647 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6648 if (set)
6649 config->config_table[0].flags = BP_PORT(bp);
6650 else
6651 config->config_table[0].flags =
6652 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6653
3101c2bc
YG
6654 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6655 (set ? "setting" : "clearing"),
34f80b04
EG
6656 config->config_table[0].msb_mac_addr,
6657 config->config_table[0].middle_mac_addr,
6658 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6659
6660 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6661 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6662 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6663}
6664
a2fbb9ea
ET
6665static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6666 int *state_p, int poll)
6667{
6668 /* can take a while if any port is running */
34f80b04 6669 int cnt = 500;
a2fbb9ea 6670
c14423fe
ET
6671 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6672 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6673
6674 might_sleep();
34f80b04 6675 while (cnt--) {
a2fbb9ea
ET
6676 if (poll) {
6677 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6678 /* if index is different from 0
6679 * the reply for some commands will
3101c2bc 6680 * be on the non default queue
a2fbb9ea
ET
6681 */
6682 if (idx)
6683 bnx2x_rx_int(&bp->fp[idx], 10);
6684 }
a2fbb9ea 6685
3101c2bc 6686 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6687 if (*state_p == state)
a2fbb9ea
ET
6688 return 0;
6689
a2fbb9ea 6690 msleep(1);
a2fbb9ea
ET
6691 }
6692
a2fbb9ea 6693 /* timeout! */
49d66772
ET
6694 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6695 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6696#ifdef BNX2X_STOP_ON_ERROR
6697 bnx2x_panic();
6698#endif
a2fbb9ea 6699
49d66772 6700 return -EBUSY;
a2fbb9ea
ET
6701}
6702
6703static int bnx2x_setup_leading(struct bnx2x *bp)
6704{
34f80b04 6705 int rc;
a2fbb9ea 6706
c14423fe 6707 /* reset IGU state */
34f80b04 6708 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6709
6710 /* SETUP ramrod */
6711 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6712
34f80b04
EG
6713 /* Wait for completion */
6714 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6715
34f80b04 6716 return rc;
a2fbb9ea
ET
6717}
6718
6719static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6720{
555f6c78
EG
6721 struct bnx2x_fastpath *fp = &bp->fp[index];
6722
a2fbb9ea 6723 /* reset IGU state */
555f6c78 6724 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6725
228241eb 6726 /* SETUP ramrod */
555f6c78
EG
6727 fp->state = BNX2X_FP_STATE_OPENING;
6728 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6729 fp->cl_id, 0);
a2fbb9ea
ET
6730
6731 /* Wait for completion */
6732 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6733 &(fp->state), 0);
a2fbb9ea
ET
6734}
6735
a2fbb9ea 6736static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6737
8badd27a 6738static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6739{
555f6c78 6740 int num_queues;
a2fbb9ea 6741
8badd27a
EG
6742 switch (int_mode) {
6743 case INT_MODE_INTx:
6744 case INT_MODE_MSI:
555f6c78
EG
6745 num_queues = 1;
6746 bp->num_rx_queues = num_queues;
6747 bp->num_tx_queues = num_queues;
6748 DP(NETIF_MSG_IFUP,
6749 "set number of queues to %d\n", num_queues);
8badd27a
EG
6750 break;
6751
6752 case INT_MODE_MSIX:
6753 default:
555f6c78
EG
6754 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6755 num_queues = min_t(u32, num_online_cpus(),
6756 BNX2X_MAX_QUEUES(bp));
34f80b04 6757 else
555f6c78
EG
6758 num_queues = 1;
6759 bp->num_rx_queues = num_queues;
6760 bp->num_tx_queues = num_queues;
6761 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6762 " number of tx queues to %d\n",
6763 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6764 /* if we can't use MSI-X we only need one fp,
6765 * so try to enable MSI-X with the requested number of fp's
6766 * and fallback to MSI or legacy INTx with one fp
6767 */
8badd27a 6768 if (bnx2x_enable_msix(bp)) {
34f80b04 6769 /* failed to enable MSI-X */
555f6c78
EG
6770 num_queues = 1;
6771 bp->num_rx_queues = num_queues;
6772 bp->num_tx_queues = num_queues;
6773 if (bp->multi_mode)
6774 BNX2X_ERR("Multi requested but failed to "
6775 "enable MSI-X set number of "
6776 "queues to %d\n", num_queues);
a2fbb9ea 6777 }
8badd27a 6778 break;
a2fbb9ea 6779 }
555f6c78 6780 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6781}
6782
6783static void bnx2x_set_rx_mode(struct net_device *dev);
6784
6785/* must be called with rtnl_lock */
6786static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6787{
6788 u32 load_code;
6789 int i, rc = 0;
6790#ifdef BNX2X_STOP_ON_ERROR
6791 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6792 if (unlikely(bp->panic))
6793 return -EPERM;
6794#endif
6795
6796 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6797
6798 bnx2x_set_int_mode(bp);
c14423fe 6799
a2fbb9ea
ET
6800 if (bnx2x_alloc_mem(bp))
6801 return -ENOMEM;
6802
555f6c78 6803 for_each_rx_queue(bp, i)
7a9b2557
VZ
6804 bnx2x_fp(bp, i, disable_tpa) =
6805 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6806
555f6c78 6807 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6808 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6809 bnx2x_poll, 128);
6810
6811#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6812 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6813 struct bnx2x_fastpath *fp = &bp->fp[i];
6814
6815 fp->poll_no_work = 0;
6816 fp->poll_calls = 0;
6817 fp->poll_max_calls = 0;
6818 fp->poll_complete = 0;
6819 fp->poll_exit = 0;
6820 }
6821#endif
6822 bnx2x_napi_enable(bp);
6823
34f80b04
EG
6824 if (bp->flags & USING_MSIX_FLAG) {
6825 rc = bnx2x_req_msix_irqs(bp);
6826 if (rc) {
6827 pci_disable_msix(bp->pdev);
2dfe0e1f 6828 goto load_error1;
34f80b04
EG
6829 }
6830 } else {
8badd27a
EG
6831 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6832 bnx2x_enable_msi(bp);
34f80b04
EG
6833 bnx2x_ack_int(bp);
6834 rc = bnx2x_req_irq(bp);
6835 if (rc) {
2dfe0e1f 6836 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6837 if (bp->flags & USING_MSI_FLAG)
6838 pci_disable_msi(bp->pdev);
2dfe0e1f 6839 goto load_error1;
a2fbb9ea 6840 }
8badd27a
EG
6841 if (bp->flags & USING_MSI_FLAG) {
6842 bp->dev->irq = bp->pdev->irq;
6843 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6844 bp->dev->name, bp->pdev->irq);
6845 }
a2fbb9ea
ET
6846 }
6847
2dfe0e1f
EG
6848 /* Send LOAD_REQUEST command to MCP
6849 Returns the type of LOAD command:
6850 if it is the first port to be initialized
6851 common blocks should be initialized, otherwise - not
6852 */
6853 if (!BP_NOMCP(bp)) {
6854 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6855 if (!load_code) {
6856 BNX2X_ERR("MCP response failure, aborting\n");
6857 rc = -EBUSY;
6858 goto load_error2;
6859 }
6860 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6861 rc = -EBUSY; /* other port in diagnostic mode */
6862 goto load_error2;
6863 }
6864
6865 } else {
6866 int port = BP_PORT(bp);
6867
6868 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6869 load_count[0], load_count[1], load_count[2]);
6870 load_count[0]++;
6871 load_count[1 + port]++;
6872 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6873 load_count[0], load_count[1], load_count[2]);
6874 if (load_count[0] == 1)
6875 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6876 else if (load_count[1 + port] == 1)
6877 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6878 else
6879 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6880 }
6881
6882 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6883 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6884 bp->port.pmf = 1;
6885 else
6886 bp->port.pmf = 0;
6887 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6888
a2fbb9ea 6889 /* Initialize HW */
34f80b04
EG
6890 rc = bnx2x_init_hw(bp, load_code);
6891 if (rc) {
a2fbb9ea 6892 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6893 goto load_error2;
a2fbb9ea
ET
6894 }
6895
a2fbb9ea 6896 /* Setup NIC internals and enable interrupts */
471de716 6897 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6898
6899 /* Send LOAD_DONE command to MCP */
34f80b04 6900 if (!BP_NOMCP(bp)) {
228241eb
ET
6901 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6902 if (!load_code) {
da5a662a 6903 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6904 rc = -EBUSY;
2dfe0e1f 6905 goto load_error3;
a2fbb9ea
ET
6906 }
6907 }
6908
6909 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6910
34f80b04
EG
6911 rc = bnx2x_setup_leading(bp);
6912 if (rc) {
da5a662a 6913 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6914 goto load_error3;
34f80b04 6915 }
a2fbb9ea 6916
34f80b04
EG
6917 if (CHIP_IS_E1H(bp))
6918 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6919 BNX2X_ERR("!!! mf_cfg function disabled\n");
6920 bp->state = BNX2X_STATE_DISABLED;
6921 }
a2fbb9ea 6922
34f80b04
EG
6923 if (bp->state == BNX2X_STATE_OPEN)
6924 for_each_nondefault_queue(bp, i) {
6925 rc = bnx2x_setup_multi(bp, i);
6926 if (rc)
2dfe0e1f 6927 goto load_error3;
34f80b04 6928 }
a2fbb9ea 6929
34f80b04 6930 if (CHIP_IS_E1(bp))
3101c2bc 6931 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6932 else
3101c2bc 6933 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6934
6935 if (bp->port.pmf)
6936 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6937
6938 /* Start fast path */
34f80b04
EG
6939 switch (load_mode) {
6940 case LOAD_NORMAL:
6941 /* Tx queue should be only reenabled */
555f6c78 6942 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6943 /* Initialize the receive filter. */
34f80b04
EG
6944 bnx2x_set_rx_mode(bp->dev);
6945 break;
6946
6947 case LOAD_OPEN:
555f6c78 6948 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6949 /* Initialize the receive filter. */
34f80b04 6950 bnx2x_set_rx_mode(bp->dev);
34f80b04 6951 break;
a2fbb9ea 6952
34f80b04 6953 case LOAD_DIAG:
2dfe0e1f 6954 /* Initialize the receive filter. */
a2fbb9ea 6955 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6956 bp->state = BNX2X_STATE_DIAG;
6957 break;
6958
6959 default:
6960 break;
a2fbb9ea
ET
6961 }
6962
34f80b04
EG
6963 if (!bp->port.pmf)
6964 bnx2x__link_status_update(bp);
6965
a2fbb9ea
ET
6966 /* start the timer */
6967 mod_timer(&bp->timer, jiffies + bp->current_interval);
6968
34f80b04 6969
a2fbb9ea
ET
6970 return 0;
6971
2dfe0e1f
EG
6972load_error3:
6973 bnx2x_int_disable_sync(bp, 1);
6974 if (!BP_NOMCP(bp)) {
6975 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6976 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6977 }
6978 bp->port.pmf = 0;
7a9b2557
VZ
6979 /* Free SKBs, SGEs, TPA pool and driver internals */
6980 bnx2x_free_skbs(bp);
555f6c78 6981 for_each_rx_queue(bp, i)
3196a88a 6982 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6983load_error2:
d1014634
YG
6984 /* Release IRQs */
6985 bnx2x_free_irq(bp);
2dfe0e1f
EG
6986load_error1:
6987 bnx2x_napi_disable(bp);
555f6c78 6988 for_each_rx_queue(bp, i)
7cde1c8b 6989 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6990 bnx2x_free_mem(bp);
6991
6992 /* TBD we really need to reset the chip
6993 if we want to recover from this */
34f80b04 6994 return rc;
a2fbb9ea
ET
6995}
6996
6997static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6998{
555f6c78 6999 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7000 int rc;
7001
c14423fe 7002 /* halt the connection */
555f6c78
EG
7003 fp->state = BNX2X_FP_STATE_HALTING;
7004 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7005
34f80b04 7006 /* Wait for completion */
a2fbb9ea 7007 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7008 &(fp->state), 1);
c14423fe 7009 if (rc) /* timeout */
a2fbb9ea
ET
7010 return rc;
7011
7012 /* delete cfc entry */
7013 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7014
34f80b04
EG
7015 /* Wait for completion */
7016 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7017 &(fp->state), 1);
34f80b04 7018 return rc;
a2fbb9ea
ET
7019}
7020
da5a662a 7021static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7022{
49d66772 7023 u16 dsb_sp_prod_idx;
c14423fe 7024 /* if the other port is handling traffic,
a2fbb9ea 7025 this can take a lot of time */
34f80b04
EG
7026 int cnt = 500;
7027 int rc;
a2fbb9ea
ET
7028
7029 might_sleep();
7030
7031 /* Send HALT ramrod */
7032 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 7033 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 7034
34f80b04
EG
7035 /* Wait for completion */
7036 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7037 &(bp->fp[0].state), 1);
7038 if (rc) /* timeout */
da5a662a 7039 return rc;
a2fbb9ea 7040
49d66772 7041 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7042
228241eb 7043 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7044 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7045
49d66772 7046 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7047 we are going to reset the chip anyway
7048 so there is not much to do if this times out
7049 */
34f80b04 7050 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7051 if (!cnt) {
7052 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7053 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7054 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7055#ifdef BNX2X_STOP_ON_ERROR
7056 bnx2x_panic();
da5a662a
VZ
7057#else
7058 rc = -EBUSY;
34f80b04
EG
7059#endif
7060 break;
7061 }
7062 cnt--;
da5a662a 7063 msleep(1);
5650d9d4 7064 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7065 }
7066 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7067 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7068
7069 return rc;
a2fbb9ea
ET
7070}
7071
34f80b04
EG
7072static void bnx2x_reset_func(struct bnx2x *bp)
7073{
7074 int port = BP_PORT(bp);
7075 int func = BP_FUNC(bp);
7076 int base, i;
7077
7078 /* Configure IGU */
7079 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7080 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7081
34f80b04
EG
7082 /* Clear ILT */
7083 base = FUNC_ILT_BASE(func);
7084 for (i = base; i < base + ILT_PER_FUNC; i++)
7085 bnx2x_ilt_wr(bp, i, 0);
7086}
7087
7088static void bnx2x_reset_port(struct bnx2x *bp)
7089{
7090 int port = BP_PORT(bp);
7091 u32 val;
7092
7093 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7094
7095 /* Do not rcv packets to BRB */
7096 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7097 /* Do not direct rcv packets that are not for MCP to the BRB */
7098 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7099 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7100
7101 /* Configure AEU */
7102 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7103
7104 msleep(100);
7105 /* Check for BRB port occupancy */
7106 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7107 if (val)
7108 DP(NETIF_MSG_IFDOWN,
33471629 7109 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7110
7111 /* TODO: Close Doorbell port? */
7112}
7113
34f80b04
EG
7114static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7115{
7116 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7117 BP_FUNC(bp), reset_code);
7118
7119 switch (reset_code) {
7120 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7121 bnx2x_reset_port(bp);
7122 bnx2x_reset_func(bp);
7123 bnx2x_reset_common(bp);
7124 break;
7125
7126 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7127 bnx2x_reset_port(bp);
7128 bnx2x_reset_func(bp);
7129 break;
7130
7131 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7132 bnx2x_reset_func(bp);
7133 break;
49d66772 7134
34f80b04
EG
7135 default:
7136 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7137 break;
7138 }
7139}
7140
33471629 7141/* must be called with rtnl_lock */
34f80b04 7142static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7143{
da5a662a 7144 int port = BP_PORT(bp);
a2fbb9ea 7145 u32 reset_code = 0;
da5a662a 7146 int i, cnt, rc;
a2fbb9ea
ET
7147
7148 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7149
228241eb
ET
7150 bp->rx_mode = BNX2X_RX_MODE_NONE;
7151 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7152
f8ef6e44 7153 bnx2x_netif_stop(bp, 1);
e94d8af3 7154
34f80b04
EG
7155 del_timer_sync(&bp->timer);
7156 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7157 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7158 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7159
70b9986c
EG
7160 /* Release IRQs */
7161 bnx2x_free_irq(bp);
7162
555f6c78
EG
7163 /* Wait until tx fastpath tasks complete */
7164 for_each_tx_queue(bp, i) {
228241eb
ET
7165 struct bnx2x_fastpath *fp = &bp->fp[i];
7166
34f80b04
EG
7167 cnt = 1000;
7168 smp_rmb();
e8b5fc51 7169 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7170
65abd74d 7171 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7172 if (!cnt) {
7173 BNX2X_ERR("timeout waiting for queue[%d]\n",
7174 i);
7175#ifdef BNX2X_STOP_ON_ERROR
7176 bnx2x_panic();
7177 return -EBUSY;
7178#else
7179 break;
7180#endif
7181 }
7182 cnt--;
da5a662a 7183 msleep(1);
34f80b04
EG
7184 smp_rmb();
7185 }
228241eb 7186 }
da5a662a
VZ
7187 /* Give HW time to discard old tx messages */
7188 msleep(1);
a2fbb9ea 7189
3101c2bc
YG
7190 if (CHIP_IS_E1(bp)) {
7191 struct mac_configuration_cmd *config =
7192 bnx2x_sp(bp, mcast_config);
7193
7194 bnx2x_set_mac_addr_e1(bp, 0);
7195
8d9c5f34 7196 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7197 CAM_INVALIDATE(config->config_table[i]);
7198
8d9c5f34 7199 config->hdr.length = i;
3101c2bc
YG
7200 if (CHIP_REV_IS_SLOW(bp))
7201 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7202 else
7203 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7204 config->hdr.client_id = BP_CL_ID(bp);
7205 config->hdr.reserved1 = 0;
7206
7207 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7208 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7209 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7210
7211 } else { /* E1H */
65abd74d
YG
7212 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7213
3101c2bc
YG
7214 bnx2x_set_mac_addr_e1h(bp, 0);
7215
7216 for (i = 0; i < MC_HASH_SIZE; i++)
7217 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7218 }
7219
65abd74d
YG
7220 if (unload_mode == UNLOAD_NORMAL)
7221 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7222
7223 else if (bp->flags & NO_WOL_FLAG) {
7224 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7225 if (CHIP_IS_E1H(bp))
7226 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7227
7228 } else if (bp->wol) {
7229 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7230 u8 *mac_addr = bp->dev->dev_addr;
7231 u32 val;
7232 /* The mac address is written to entries 1-4 to
7233 preserve entry 0 which is used by the PMF */
7234 u8 entry = (BP_E1HVN(bp) + 1)*8;
7235
7236 val = (mac_addr[0] << 8) | mac_addr[1];
7237 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7238
7239 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7240 (mac_addr[4] << 8) | mac_addr[5];
7241 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7242
7243 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7244
7245 } else
7246 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7247
34f80b04
EG
7248 /* Close multi and leading connections
7249 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7250 for_each_nondefault_queue(bp, i)
7251 if (bnx2x_stop_multi(bp, i))
228241eb 7252 goto unload_error;
a2fbb9ea 7253
da5a662a
VZ
7254 rc = bnx2x_stop_leading(bp);
7255 if (rc) {
34f80b04 7256 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7257#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7258 return -EBUSY;
da5a662a
VZ
7259#else
7260 goto unload_error;
34f80b04 7261#endif
228241eb
ET
7262 }
7263
7264unload_error:
34f80b04 7265 if (!BP_NOMCP(bp))
228241eb 7266 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7267 else {
7268 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7269 load_count[0], load_count[1], load_count[2]);
7270 load_count[0]--;
da5a662a 7271 load_count[1 + port]--;
34f80b04
EG
7272 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7273 load_count[0], load_count[1], load_count[2]);
7274 if (load_count[0] == 0)
7275 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7276 else if (load_count[1 + port] == 0)
34f80b04
EG
7277 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7278 else
7279 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7280 }
a2fbb9ea 7281
34f80b04
EG
7282 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7283 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7284 bnx2x__link_reset(bp);
a2fbb9ea
ET
7285
7286 /* Reset the chip */
228241eb 7287 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7288
7289 /* Report UNLOAD_DONE to MCP */
34f80b04 7290 if (!BP_NOMCP(bp))
a2fbb9ea 7291 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7292 bp->port.pmf = 0;
a2fbb9ea 7293
7a9b2557 7294 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7295 bnx2x_free_skbs(bp);
555f6c78 7296 for_each_rx_queue(bp, i)
3196a88a 7297 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7298 for_each_rx_queue(bp, i)
7cde1c8b 7299 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7300 bnx2x_free_mem(bp);
7301
7302 bp->state = BNX2X_STATE_CLOSED;
228241eb 7303
a2fbb9ea
ET
7304 netif_carrier_off(bp->dev);
7305
7306 return 0;
7307}
7308
34f80b04
EG
7309static void bnx2x_reset_task(struct work_struct *work)
7310{
7311 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7312
7313#ifdef BNX2X_STOP_ON_ERROR
7314 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7315 " so reset not done to allow debug dump,\n"
7316 KERN_ERR " you will need to reboot when done\n");
7317 return;
7318#endif
7319
7320 rtnl_lock();
7321
7322 if (!netif_running(bp->dev))
7323 goto reset_task_exit;
7324
7325 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7326 bnx2x_nic_load(bp, LOAD_NORMAL);
7327
7328reset_task_exit:
7329 rtnl_unlock();
7330}
7331
a2fbb9ea
ET
7332/* end of nic load/unload */
7333
7334/* ethtool_ops */
7335
7336/*
7337 * Init service functions
7338 */
7339
f1ef27ef
EG
7340static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7341{
7342 switch (func) {
7343 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7344 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7345 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7346 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7347 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7348 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7349 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7350 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7351 default:
7352 BNX2X_ERR("Unsupported function index: %d\n", func);
7353 return (u32)(-1);
7354 }
7355}
7356
7357static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7358{
7359 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7360
7361 /* Flush all outstanding writes */
7362 mmiowb();
7363
7364 /* Pretend to be function 0 */
7365 REG_WR(bp, reg, 0);
7366 /* Flush the GRC transaction (in the chip) */
7367 new_val = REG_RD(bp, reg);
7368 if (new_val != 0) {
7369 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7370 new_val);
7371 BUG();
7372 }
7373
7374 /* From now we are in the "like-E1" mode */
7375 bnx2x_int_disable(bp);
7376
7377 /* Flush all outstanding writes */
7378 mmiowb();
7379
7380 /* Restore the original funtion settings */
7381 REG_WR(bp, reg, orig_func);
7382 new_val = REG_RD(bp, reg);
7383 if (new_val != orig_func) {
7384 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7385 orig_func, new_val);
7386 BUG();
7387 }
7388}
7389
7390static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7391{
7392 if (CHIP_IS_E1H(bp))
7393 bnx2x_undi_int_disable_e1h(bp, func);
7394 else
7395 bnx2x_int_disable(bp);
7396}
7397
34f80b04
EG
7398static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7399{
7400 u32 val;
7401
7402 /* Check if there is any driver already loaded */
7403 val = REG_RD(bp, MISC_REG_UNPREPARED);
7404 if (val == 0x1) {
7405 /* Check if it is the UNDI driver
7406 * UNDI driver initializes CID offset for normal bell to 0x7
7407 */
4a37fb66 7408 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7409 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7410 if (val == 0x7) {
7411 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7412 /* save our func */
34f80b04 7413 int func = BP_FUNC(bp);
da5a662a
VZ
7414 u32 swap_en;
7415 u32 swap_val;
34f80b04 7416
b4661739
EG
7417 /* clear the UNDI indication */
7418 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7419
34f80b04
EG
7420 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7421
7422 /* try unload UNDI on port 0 */
7423 bp->func = 0;
da5a662a
VZ
7424 bp->fw_seq =
7425 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7426 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7427 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7428
7429 /* if UNDI is loaded on the other port */
7430 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7431
da5a662a
VZ
7432 /* send "DONE" for previous unload */
7433 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7434
7435 /* unload UNDI on port 1 */
34f80b04 7436 bp->func = 1;
da5a662a
VZ
7437 bp->fw_seq =
7438 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7439 DRV_MSG_SEQ_NUMBER_MASK);
7440 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7441
7442 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7443 }
7444
b4661739
EG
7445 /* now it's safe to release the lock */
7446 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7447
f1ef27ef 7448 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7449
7450 /* close input traffic and wait for it */
7451 /* Do not rcv packets to BRB */
7452 REG_WR(bp,
7453 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7454 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7455 /* Do not direct rcv packets that are not for MCP to
7456 * the BRB */
7457 REG_WR(bp,
7458 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7459 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7460 /* clear AEU */
7461 REG_WR(bp,
7462 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7463 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7464 msleep(10);
7465
7466 /* save NIG port swap info */
7467 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7468 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7469 /* reset device */
7470 REG_WR(bp,
7471 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7472 0xd3ffffff);
34f80b04
EG
7473 REG_WR(bp,
7474 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7475 0x1403);
da5a662a
VZ
7476 /* take the NIG out of reset and restore swap values */
7477 REG_WR(bp,
7478 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7479 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7480 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7481 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7482
7483 /* send unload done to the MCP */
7484 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7485
7486 /* restore our func and fw_seq */
7487 bp->func = func;
7488 bp->fw_seq =
7489 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7490 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7491
7492 } else
7493 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7494 }
7495}
7496
7497static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7498{
7499 u32 val, val2, val3, val4, id;
72ce58c3 7500 u16 pmc;
34f80b04
EG
7501
7502 /* Get the chip revision id and number. */
7503 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7504 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7505 id = ((val & 0xffff) << 16);
7506 val = REG_RD(bp, MISC_REG_CHIP_REV);
7507 id |= ((val & 0xf) << 12);
7508 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7509 id |= ((val & 0xff) << 4);
5a40e08e 7510 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7511 id |= (val & 0xf);
7512 bp->common.chip_id = id;
7513 bp->link_params.chip_id = bp->common.chip_id;
7514 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7515
1c06328c
EG
7516 val = (REG_RD(bp, 0x2874) & 0x55);
7517 if ((bp->common.chip_id & 0x1) ||
7518 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7519 bp->flags |= ONE_PORT_FLAG;
7520 BNX2X_DEV_INFO("single port device\n");
7521 }
7522
34f80b04
EG
7523 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7524 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7525 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7526 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7527 bp->common.flash_size, bp->common.flash_size);
7528
7529 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7530 bp->link_params.shmem_base = bp->common.shmem_base;
7531 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7532
7533 if (!bp->common.shmem_base ||
7534 (bp->common.shmem_base < 0xA0000) ||
7535 (bp->common.shmem_base >= 0xC0000)) {
7536 BNX2X_DEV_INFO("MCP not active\n");
7537 bp->flags |= NO_MCP_FLAG;
7538 return;
7539 }
7540
7541 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7542 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7543 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7544 BNX2X_ERR("BAD MCP validity signature\n");
7545
7546 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7547 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7548
7549 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7550 SHARED_HW_CFG_LED_MODE_MASK) >>
7551 SHARED_HW_CFG_LED_MODE_SHIFT);
7552
c2c8b03e
EG
7553 bp->link_params.feature_config_flags = 0;
7554 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7555 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7556 bp->link_params.feature_config_flags |=
7557 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7558 else
7559 bp->link_params.feature_config_flags &=
7560 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7561
34f80b04
EG
7562 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7563 bp->common.bc_ver = val;
7564 BNX2X_DEV_INFO("bc_ver %X\n", val);
7565 if (val < BNX2X_BC_VER) {
7566 /* for now only warn
7567 * later we might need to enforce this */
7568 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7569 " please upgrade BC\n", BNX2X_BC_VER, val);
7570 }
72ce58c3
EG
7571
7572 if (BP_E1HVN(bp) == 0) {
7573 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7574 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7575 } else {
7576 /* no WOL capability for E1HVN != 0 */
7577 bp->flags |= NO_WOL_FLAG;
7578 }
7579 BNX2X_DEV_INFO("%sWoL capable\n",
7580 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7581
7582 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7583 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7584 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7585 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7586
7587 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7588 val, val2, val3, val4);
7589}
7590
7591static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7592 u32 switch_cfg)
a2fbb9ea 7593{
34f80b04 7594 int port = BP_PORT(bp);
a2fbb9ea
ET
7595 u32 ext_phy_type;
7596
a2fbb9ea
ET
7597 switch (switch_cfg) {
7598 case SWITCH_CFG_1G:
7599 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7600
c18487ee
YR
7601 ext_phy_type =
7602 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7603 switch (ext_phy_type) {
7604 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7605 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7606 ext_phy_type);
7607
34f80b04
EG
7608 bp->port.supported |= (SUPPORTED_10baseT_Half |
7609 SUPPORTED_10baseT_Full |
7610 SUPPORTED_100baseT_Half |
7611 SUPPORTED_100baseT_Full |
7612 SUPPORTED_1000baseT_Full |
7613 SUPPORTED_2500baseX_Full |
7614 SUPPORTED_TP |
7615 SUPPORTED_FIBRE |
7616 SUPPORTED_Autoneg |
7617 SUPPORTED_Pause |
7618 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7619 break;
7620
7621 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7622 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7623 ext_phy_type);
7624
34f80b04
EG
7625 bp->port.supported |= (SUPPORTED_10baseT_Half |
7626 SUPPORTED_10baseT_Full |
7627 SUPPORTED_100baseT_Half |
7628 SUPPORTED_100baseT_Full |
7629 SUPPORTED_1000baseT_Full |
7630 SUPPORTED_TP |
7631 SUPPORTED_FIBRE |
7632 SUPPORTED_Autoneg |
7633 SUPPORTED_Pause |
7634 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7635 break;
7636
7637 default:
7638 BNX2X_ERR("NVRAM config error. "
7639 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7640 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7641 return;
7642 }
7643
34f80b04
EG
7644 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7645 port*0x10);
7646 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7647 break;
7648
7649 case SWITCH_CFG_10G:
7650 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7651
c18487ee
YR
7652 ext_phy_type =
7653 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7654 switch (ext_phy_type) {
7655 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7656 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7657 ext_phy_type);
7658
34f80b04
EG
7659 bp->port.supported |= (SUPPORTED_10baseT_Half |
7660 SUPPORTED_10baseT_Full |
7661 SUPPORTED_100baseT_Half |
7662 SUPPORTED_100baseT_Full |
7663 SUPPORTED_1000baseT_Full |
7664 SUPPORTED_2500baseX_Full |
7665 SUPPORTED_10000baseT_Full |
7666 SUPPORTED_TP |
7667 SUPPORTED_FIBRE |
7668 SUPPORTED_Autoneg |
7669 SUPPORTED_Pause |
7670 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7671 break;
7672
589abe3a
EG
7673 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7674 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7675 ext_phy_type);
f1410647 7676
34f80b04 7677 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7678 SUPPORTED_1000baseT_Full |
34f80b04 7679 SUPPORTED_FIBRE |
589abe3a 7680 SUPPORTED_Autoneg |
34f80b04
EG
7681 SUPPORTED_Pause |
7682 SUPPORTED_Asym_Pause);
f1410647
ET
7683 break;
7684
589abe3a
EG
7685 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7686 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7687 ext_phy_type);
7688
34f80b04 7689 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7690 SUPPORTED_2500baseX_Full |
34f80b04 7691 SUPPORTED_1000baseT_Full |
589abe3a
EG
7692 SUPPORTED_FIBRE |
7693 SUPPORTED_Autoneg |
7694 SUPPORTED_Pause |
7695 SUPPORTED_Asym_Pause);
7696 break;
7697
7698 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7699 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7700 ext_phy_type);
7701
7702 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7703 SUPPORTED_FIBRE |
7704 SUPPORTED_Pause |
7705 SUPPORTED_Asym_Pause);
f1410647
ET
7706 break;
7707
589abe3a
EG
7708 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7709 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7710 ext_phy_type);
7711
34f80b04
EG
7712 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7713 SUPPORTED_1000baseT_Full |
7714 SUPPORTED_FIBRE |
34f80b04
EG
7715 SUPPORTED_Pause |
7716 SUPPORTED_Asym_Pause);
f1410647
ET
7717 break;
7718
589abe3a
EG
7719 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7720 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7721 ext_phy_type);
7722
34f80b04 7723 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7724 SUPPORTED_1000baseT_Full |
34f80b04 7725 SUPPORTED_Autoneg |
589abe3a 7726 SUPPORTED_FIBRE |
34f80b04
EG
7727 SUPPORTED_Pause |
7728 SUPPORTED_Asym_Pause);
c18487ee
YR
7729 break;
7730
f1410647
ET
7731 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7732 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7733 ext_phy_type);
7734
34f80b04
EG
7735 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7736 SUPPORTED_TP |
7737 SUPPORTED_Autoneg |
7738 SUPPORTED_Pause |
7739 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7740 break;
7741
28577185
EG
7742 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7743 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7744 ext_phy_type);
7745
7746 bp->port.supported |= (SUPPORTED_10baseT_Half |
7747 SUPPORTED_10baseT_Full |
7748 SUPPORTED_100baseT_Half |
7749 SUPPORTED_100baseT_Full |
7750 SUPPORTED_1000baseT_Full |
7751 SUPPORTED_10000baseT_Full |
7752 SUPPORTED_TP |
7753 SUPPORTED_Autoneg |
7754 SUPPORTED_Pause |
7755 SUPPORTED_Asym_Pause);
7756 break;
7757
c18487ee
YR
7758 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7759 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7760 bp->link_params.ext_phy_config);
7761 break;
7762
a2fbb9ea
ET
7763 default:
7764 BNX2X_ERR("NVRAM config error. "
7765 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7766 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7767 return;
7768 }
7769
34f80b04
EG
7770 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7771 port*0x18);
7772 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7773
a2fbb9ea
ET
7774 break;
7775
7776 default:
7777 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7778 bp->port.link_config);
a2fbb9ea
ET
7779 return;
7780 }
34f80b04 7781 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7782
7783 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7784 if (!(bp->link_params.speed_cap_mask &
7785 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7786 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7787
c18487ee
YR
7788 if (!(bp->link_params.speed_cap_mask &
7789 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7790 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7791
c18487ee
YR
7792 if (!(bp->link_params.speed_cap_mask &
7793 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7794 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7795
c18487ee
YR
7796 if (!(bp->link_params.speed_cap_mask &
7797 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7798 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7799
c18487ee
YR
7800 if (!(bp->link_params.speed_cap_mask &
7801 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7802 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7803 SUPPORTED_1000baseT_Full);
a2fbb9ea 7804
c18487ee
YR
7805 if (!(bp->link_params.speed_cap_mask &
7806 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7807 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7808
c18487ee
YR
7809 if (!(bp->link_params.speed_cap_mask &
7810 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7811 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7812
34f80b04 7813 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7814}
7815
34f80b04 7816static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7817{
c18487ee 7818 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7819
34f80b04 7820 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7821 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7822 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7823 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7824 bp->port.advertising = bp->port.supported;
a2fbb9ea 7825 } else {
c18487ee
YR
7826 u32 ext_phy_type =
7827 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7828
7829 if ((ext_phy_type ==
7830 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7831 (ext_phy_type ==
7832 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7833 /* force 10G, no AN */
c18487ee 7834 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7835 bp->port.advertising =
a2fbb9ea
ET
7836 (ADVERTISED_10000baseT_Full |
7837 ADVERTISED_FIBRE);
7838 break;
7839 }
7840 BNX2X_ERR("NVRAM config error. "
7841 "Invalid link_config 0x%x"
7842 " Autoneg not supported\n",
34f80b04 7843 bp->port.link_config);
a2fbb9ea
ET
7844 return;
7845 }
7846 break;
7847
7848 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7849 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7850 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7851 bp->port.advertising = (ADVERTISED_10baseT_Full |
7852 ADVERTISED_TP);
a2fbb9ea
ET
7853 } else {
7854 BNX2X_ERR("NVRAM config error. "
7855 "Invalid link_config 0x%x"
7856 " speed_cap_mask 0x%x\n",
34f80b04 7857 bp->port.link_config,
c18487ee 7858 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7859 return;
7860 }
7861 break;
7862
7863 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7864 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7865 bp->link_params.req_line_speed = SPEED_10;
7866 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7867 bp->port.advertising = (ADVERTISED_10baseT_Half |
7868 ADVERTISED_TP);
a2fbb9ea
ET
7869 } else {
7870 BNX2X_ERR("NVRAM config error. "
7871 "Invalid link_config 0x%x"
7872 " speed_cap_mask 0x%x\n",
34f80b04 7873 bp->port.link_config,
c18487ee 7874 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7875 return;
7876 }
7877 break;
7878
7879 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7880 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7881 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7882 bp->port.advertising = (ADVERTISED_100baseT_Full |
7883 ADVERTISED_TP);
a2fbb9ea
ET
7884 } else {
7885 BNX2X_ERR("NVRAM config error. "
7886 "Invalid link_config 0x%x"
7887 " speed_cap_mask 0x%x\n",
34f80b04 7888 bp->port.link_config,
c18487ee 7889 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7890 return;
7891 }
7892 break;
7893
7894 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7895 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7896 bp->link_params.req_line_speed = SPEED_100;
7897 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7898 bp->port.advertising = (ADVERTISED_100baseT_Half |
7899 ADVERTISED_TP);
a2fbb9ea
ET
7900 } else {
7901 BNX2X_ERR("NVRAM config error. "
7902 "Invalid link_config 0x%x"
7903 " speed_cap_mask 0x%x\n",
34f80b04 7904 bp->port.link_config,
c18487ee 7905 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7906 return;
7907 }
7908 break;
7909
7910 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7911 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7912 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7913 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7914 ADVERTISED_TP);
a2fbb9ea
ET
7915 } else {
7916 BNX2X_ERR("NVRAM config error. "
7917 "Invalid link_config 0x%x"
7918 " speed_cap_mask 0x%x\n",
34f80b04 7919 bp->port.link_config,
c18487ee 7920 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7921 return;
7922 }
7923 break;
7924
7925 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7926 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7927 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7928 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7929 ADVERTISED_TP);
a2fbb9ea
ET
7930 } else {
7931 BNX2X_ERR("NVRAM config error. "
7932 "Invalid link_config 0x%x"
7933 " speed_cap_mask 0x%x\n",
34f80b04 7934 bp->port.link_config,
c18487ee 7935 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7936 return;
7937 }
7938 break;
7939
7940 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7941 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7942 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7943 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7944 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7945 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7946 ADVERTISED_FIBRE);
a2fbb9ea
ET
7947 } else {
7948 BNX2X_ERR("NVRAM config error. "
7949 "Invalid link_config 0x%x"
7950 " speed_cap_mask 0x%x\n",
34f80b04 7951 bp->port.link_config,
c18487ee 7952 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7953 return;
7954 }
7955 break;
7956
7957 default:
7958 BNX2X_ERR("NVRAM config error. "
7959 "BAD link speed link_config 0x%x\n",
34f80b04 7960 bp->port.link_config);
c18487ee 7961 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7962 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7963 break;
7964 }
a2fbb9ea 7965
34f80b04
EG
7966 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7967 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7968 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7969 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7970 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7971
c18487ee 7972 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7973 " advertising 0x%x\n",
c18487ee
YR
7974 bp->link_params.req_line_speed,
7975 bp->link_params.req_duplex,
34f80b04 7976 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7977}
7978
34f80b04 7979static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7980{
34f80b04
EG
7981 int port = BP_PORT(bp);
7982 u32 val, val2;
589abe3a 7983 u32 config;
c2c8b03e 7984 u16 i;
a2fbb9ea 7985
c18487ee 7986 bp->link_params.bp = bp;
34f80b04 7987 bp->link_params.port = port;
c18487ee 7988
c18487ee 7989 bp->link_params.lane_config =
a2fbb9ea 7990 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7991 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7992 SHMEM_RD(bp,
7993 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7994 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7995 SHMEM_RD(bp,
7996 dev_info.port_hw_config[port].speed_capability_mask);
7997
34f80b04 7998 bp->port.link_config =
a2fbb9ea
ET
7999 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8000
c2c8b03e
EG
8001 /* Get the 4 lanes xgxs config rx and tx */
8002 for (i = 0; i < 2; i++) {
8003 val = SHMEM_RD(bp,
8004 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8005 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8006 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8007
8008 val = SHMEM_RD(bp,
8009 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8010 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8011 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8012 }
8013
589abe3a
EG
8014 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8015 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8016 bp->link_params.feature_config_flags |=
8017 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8018 else
8019 bp->link_params.feature_config_flags &=
8020 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8021
c2c8b03e
EG
8022 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8023 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8024 bp->link_params.lane_config,
8025 bp->link_params.ext_phy_config,
34f80b04 8026 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8027
34f80b04 8028 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8029 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8030 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8031
8032 bnx2x_link_settings_requested(bp);
8033
8034 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8035 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8036 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8037 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8038 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8039 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8040 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8041 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8042 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8043 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8044}
8045
8046static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8047{
8048 int func = BP_FUNC(bp);
8049 u32 val, val2;
8050 int rc = 0;
a2fbb9ea 8051
34f80b04 8052 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8053
34f80b04
EG
8054 bp->e1hov = 0;
8055 bp->e1hmf = 0;
8056 if (CHIP_IS_E1H(bp)) {
8057 bp->mf_config =
8058 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8059
3196a88a
EG
8060 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8061 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8062 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8063
34f80b04
EG
8064 bp->e1hov = val;
8065 bp->e1hmf = 1;
8066 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8067 "(0x%04x)\n",
8068 func, bp->e1hov, bp->e1hov);
8069 } else {
8070 BNX2X_DEV_INFO("Single function mode\n");
8071 if (BP_E1HVN(bp)) {
8072 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8073 " aborting\n", func);
8074 rc = -EPERM;
8075 }
8076 }
8077 }
a2fbb9ea 8078
34f80b04
EG
8079 if (!BP_NOMCP(bp)) {
8080 bnx2x_get_port_hwinfo(bp);
8081
8082 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8083 DRV_MSG_SEQ_NUMBER_MASK);
8084 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8085 }
8086
8087 if (IS_E1HMF(bp)) {
8088 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8089 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8090 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8091 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8092 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8093 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8094 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8095 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8096 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8097 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8098 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8099 ETH_ALEN);
8100 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8101 ETH_ALEN);
a2fbb9ea 8102 }
34f80b04
EG
8103
8104 return rc;
a2fbb9ea
ET
8105 }
8106
34f80b04
EG
8107 if (BP_NOMCP(bp)) {
8108 /* only supposed to happen on emulation/FPGA */
33471629 8109 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8110 random_ether_addr(bp->dev->dev_addr);
8111 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8112 }
a2fbb9ea 8113
34f80b04
EG
8114 return rc;
8115}
8116
8117static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8118{
8119 int func = BP_FUNC(bp);
87942b46 8120 int timer_interval;
34f80b04
EG
8121 int rc;
8122
da5a662a
VZ
8123 /* Disable interrupt handling until HW is initialized */
8124 atomic_set(&bp->intr_sem, 1);
8125
34f80b04 8126 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8127
1cf167f2 8128 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8129 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8130
8131 rc = bnx2x_get_hwinfo(bp);
8132
8133 /* need to reset chip if undi was active */
8134 if (!BP_NOMCP(bp))
8135 bnx2x_undi_unload(bp);
8136
8137 if (CHIP_REV_IS_FPGA(bp))
8138 printk(KERN_ERR PFX "FPGA detected\n");
8139
8140 if (BP_NOMCP(bp) && (func == 0))
8141 printk(KERN_ERR PFX
8142 "MCP disabled, must load devices in order!\n");
8143
555f6c78 8144 /* Set multi queue mode */
8badd27a
EG
8145 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8146 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8147 printk(KERN_ERR PFX
8badd27a 8148 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8149 multi_mode = ETH_RSS_MODE_DISABLED;
8150 }
8151 bp->multi_mode = multi_mode;
8152
8153
7a9b2557
VZ
8154 /* Set TPA flags */
8155 if (disable_tpa) {
8156 bp->flags &= ~TPA_ENABLE_FLAG;
8157 bp->dev->features &= ~NETIF_F_LRO;
8158 } else {
8159 bp->flags |= TPA_ENABLE_FLAG;
8160 bp->dev->features |= NETIF_F_LRO;
8161 }
8162
8163
34f80b04
EG
8164 bp->tx_ring_size = MAX_TX_AVAIL;
8165 bp->rx_ring_size = MAX_RX_AVAIL;
8166
8167 bp->rx_csum = 1;
34f80b04
EG
8168
8169 bp->tx_ticks = 50;
8170 bp->rx_ticks = 25;
8171
87942b46
EG
8172 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8173 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8174
8175 init_timer(&bp->timer);
8176 bp->timer.expires = jiffies + bp->current_interval;
8177 bp->timer.data = (unsigned long) bp;
8178 bp->timer.function = bnx2x_timer;
8179
8180 return rc;
a2fbb9ea
ET
8181}
8182
8183/*
8184 * ethtool service functions
8185 */
8186
8187/* All ethtool functions called with rtnl_lock */
8188
8189static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8190{
8191 struct bnx2x *bp = netdev_priv(dev);
8192
34f80b04
EG
8193 cmd->supported = bp->port.supported;
8194 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8195
8196 if (netif_carrier_ok(dev)) {
c18487ee
YR
8197 cmd->speed = bp->link_vars.line_speed;
8198 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8199 } else {
c18487ee
YR
8200 cmd->speed = bp->link_params.req_line_speed;
8201 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8202 }
34f80b04
EG
8203 if (IS_E1HMF(bp)) {
8204 u16 vn_max_rate;
8205
8206 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8207 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8208 if (vn_max_rate < cmd->speed)
8209 cmd->speed = vn_max_rate;
8210 }
a2fbb9ea 8211
c18487ee
YR
8212 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8213 u32 ext_phy_type =
8214 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8215
8216 switch (ext_phy_type) {
8217 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8218 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8219 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8220 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8221 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8222 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8223 cmd->port = PORT_FIBRE;
8224 break;
8225
8226 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8227 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8228 cmd->port = PORT_TP;
8229 break;
8230
c18487ee
YR
8231 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8232 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8233 bp->link_params.ext_phy_config);
8234 break;
8235
f1410647
ET
8236 default:
8237 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8238 bp->link_params.ext_phy_config);
8239 break;
f1410647
ET
8240 }
8241 } else
a2fbb9ea 8242 cmd->port = PORT_TP;
a2fbb9ea 8243
34f80b04 8244 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8245 cmd->transceiver = XCVR_INTERNAL;
8246
c18487ee 8247 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8248 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8249 else
a2fbb9ea 8250 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8251
8252 cmd->maxtxpkt = 0;
8253 cmd->maxrxpkt = 0;
8254
8255 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8256 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8257 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8258 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8259 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8260 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8261 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8262
8263 return 0;
8264}
8265
8266static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8267{
8268 struct bnx2x *bp = netdev_priv(dev);
8269 u32 advertising;
8270
34f80b04
EG
8271 if (IS_E1HMF(bp))
8272 return 0;
8273
a2fbb9ea
ET
8274 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8275 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8276 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8277 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8278 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8279 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8280 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8281
a2fbb9ea 8282 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8283 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8284 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8285 return -EINVAL;
f1410647 8286 }
a2fbb9ea
ET
8287
8288 /* advertise the requested speed and duplex if supported */
34f80b04 8289 cmd->advertising &= bp->port.supported;
a2fbb9ea 8290
c18487ee
YR
8291 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8292 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8293 bp->port.advertising |= (ADVERTISED_Autoneg |
8294 cmd->advertising);
a2fbb9ea
ET
8295
8296 } else { /* forced speed */
8297 /* advertise the requested speed and duplex if supported */
8298 switch (cmd->speed) {
8299 case SPEED_10:
8300 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8301 if (!(bp->port.supported &
f1410647
ET
8302 SUPPORTED_10baseT_Full)) {
8303 DP(NETIF_MSG_LINK,
8304 "10M full not supported\n");
a2fbb9ea 8305 return -EINVAL;
f1410647 8306 }
a2fbb9ea
ET
8307
8308 advertising = (ADVERTISED_10baseT_Full |
8309 ADVERTISED_TP);
8310 } else {
34f80b04 8311 if (!(bp->port.supported &
f1410647
ET
8312 SUPPORTED_10baseT_Half)) {
8313 DP(NETIF_MSG_LINK,
8314 "10M half not supported\n");
a2fbb9ea 8315 return -EINVAL;
f1410647 8316 }
a2fbb9ea
ET
8317
8318 advertising = (ADVERTISED_10baseT_Half |
8319 ADVERTISED_TP);
8320 }
8321 break;
8322
8323 case SPEED_100:
8324 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8325 if (!(bp->port.supported &
f1410647
ET
8326 SUPPORTED_100baseT_Full)) {
8327 DP(NETIF_MSG_LINK,
8328 "100M full not supported\n");
a2fbb9ea 8329 return -EINVAL;
f1410647 8330 }
a2fbb9ea
ET
8331
8332 advertising = (ADVERTISED_100baseT_Full |
8333 ADVERTISED_TP);
8334 } else {
34f80b04 8335 if (!(bp->port.supported &
f1410647
ET
8336 SUPPORTED_100baseT_Half)) {
8337 DP(NETIF_MSG_LINK,
8338 "100M half not supported\n");
a2fbb9ea 8339 return -EINVAL;
f1410647 8340 }
a2fbb9ea
ET
8341
8342 advertising = (ADVERTISED_100baseT_Half |
8343 ADVERTISED_TP);
8344 }
8345 break;
8346
8347 case SPEED_1000:
f1410647
ET
8348 if (cmd->duplex != DUPLEX_FULL) {
8349 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8350 return -EINVAL;
f1410647 8351 }
a2fbb9ea 8352
34f80b04 8353 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8354 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8355 return -EINVAL;
f1410647 8356 }
a2fbb9ea
ET
8357
8358 advertising = (ADVERTISED_1000baseT_Full |
8359 ADVERTISED_TP);
8360 break;
8361
8362 case SPEED_2500:
f1410647
ET
8363 if (cmd->duplex != DUPLEX_FULL) {
8364 DP(NETIF_MSG_LINK,
8365 "2.5G half not supported\n");
a2fbb9ea 8366 return -EINVAL;
f1410647 8367 }
a2fbb9ea 8368
34f80b04 8369 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8370 DP(NETIF_MSG_LINK,
8371 "2.5G full not supported\n");
a2fbb9ea 8372 return -EINVAL;
f1410647 8373 }
a2fbb9ea 8374
f1410647 8375 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8376 ADVERTISED_TP);
8377 break;
8378
8379 case SPEED_10000:
f1410647
ET
8380 if (cmd->duplex != DUPLEX_FULL) {
8381 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8382 return -EINVAL;
f1410647 8383 }
a2fbb9ea 8384
34f80b04 8385 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8386 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8387 return -EINVAL;
f1410647 8388 }
a2fbb9ea
ET
8389
8390 advertising = (ADVERTISED_10000baseT_Full |
8391 ADVERTISED_FIBRE);
8392 break;
8393
8394 default:
f1410647 8395 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8396 return -EINVAL;
8397 }
8398
c18487ee
YR
8399 bp->link_params.req_line_speed = cmd->speed;
8400 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8401 bp->port.advertising = advertising;
a2fbb9ea
ET
8402 }
8403
c18487ee 8404 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8405 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8406 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8407 bp->port.advertising);
a2fbb9ea 8408
34f80b04 8409 if (netif_running(dev)) {
bb2a0f7a 8410 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8411 bnx2x_link_set(bp);
8412 }
a2fbb9ea
ET
8413
8414 return 0;
8415}
8416
c18487ee
YR
8417#define PHY_FW_VER_LEN 10
8418
a2fbb9ea
ET
8419static void bnx2x_get_drvinfo(struct net_device *dev,
8420 struct ethtool_drvinfo *info)
8421{
8422 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8423 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8424
8425 strcpy(info->driver, DRV_MODULE_NAME);
8426 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8427
8428 phy_fw_ver[0] = '\0';
34f80b04 8429 if (bp->port.pmf) {
4a37fb66 8430 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8431 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8432 (bp->state != BNX2X_STATE_CLOSED),
8433 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8434 bnx2x_release_phy_lock(bp);
34f80b04 8435 }
c18487ee 8436
f0e53a84
EG
8437 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8438 (bp->common.bc_ver & 0xff0000) >> 16,
8439 (bp->common.bc_ver & 0xff00) >> 8,
8440 (bp->common.bc_ver & 0xff),
8441 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8442 strcpy(info->bus_info, pci_name(bp->pdev));
8443 info->n_stats = BNX2X_NUM_STATS;
8444 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8445 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8446 info->regdump_len = 0;
8447}
8448
8449static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8450{
8451 struct bnx2x *bp = netdev_priv(dev);
8452
8453 if (bp->flags & NO_WOL_FLAG) {
8454 wol->supported = 0;
8455 wol->wolopts = 0;
8456 } else {
8457 wol->supported = WAKE_MAGIC;
8458 if (bp->wol)
8459 wol->wolopts = WAKE_MAGIC;
8460 else
8461 wol->wolopts = 0;
8462 }
8463 memset(&wol->sopass, 0, sizeof(wol->sopass));
8464}
8465
8466static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8467{
8468 struct bnx2x *bp = netdev_priv(dev);
8469
8470 if (wol->wolopts & ~WAKE_MAGIC)
8471 return -EINVAL;
8472
8473 if (wol->wolopts & WAKE_MAGIC) {
8474 if (bp->flags & NO_WOL_FLAG)
8475 return -EINVAL;
8476
8477 bp->wol = 1;
34f80b04 8478 } else
a2fbb9ea 8479 bp->wol = 0;
34f80b04 8480
a2fbb9ea
ET
8481 return 0;
8482}
8483
8484static u32 bnx2x_get_msglevel(struct net_device *dev)
8485{
8486 struct bnx2x *bp = netdev_priv(dev);
8487
8488 return bp->msglevel;
8489}
8490
8491static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8492{
8493 struct bnx2x *bp = netdev_priv(dev);
8494
8495 if (capable(CAP_NET_ADMIN))
8496 bp->msglevel = level;
8497}
8498
8499static int bnx2x_nway_reset(struct net_device *dev)
8500{
8501 struct bnx2x *bp = netdev_priv(dev);
8502
34f80b04
EG
8503 if (!bp->port.pmf)
8504 return 0;
a2fbb9ea 8505
34f80b04 8506 if (netif_running(dev)) {
bb2a0f7a 8507 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8508 bnx2x_link_set(bp);
8509 }
a2fbb9ea
ET
8510
8511 return 0;
8512}
8513
8514static int bnx2x_get_eeprom_len(struct net_device *dev)
8515{
8516 struct bnx2x *bp = netdev_priv(dev);
8517
34f80b04 8518 return bp->common.flash_size;
a2fbb9ea
ET
8519}
8520
8521static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8522{
34f80b04 8523 int port = BP_PORT(bp);
a2fbb9ea
ET
8524 int count, i;
8525 u32 val = 0;
8526
8527 /* adjust timeout for emulation/FPGA */
8528 count = NVRAM_TIMEOUT_COUNT;
8529 if (CHIP_REV_IS_SLOW(bp))
8530 count *= 100;
8531
8532 /* request access to nvram interface */
8533 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8534 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8535
8536 for (i = 0; i < count*10; i++) {
8537 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8538 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8539 break;
8540
8541 udelay(5);
8542 }
8543
8544 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8545 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8546 return -EBUSY;
8547 }
8548
8549 return 0;
8550}
8551
8552static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8553{
34f80b04 8554 int port = BP_PORT(bp);
a2fbb9ea
ET
8555 int count, i;
8556 u32 val = 0;
8557
8558 /* adjust timeout for emulation/FPGA */
8559 count = NVRAM_TIMEOUT_COUNT;
8560 if (CHIP_REV_IS_SLOW(bp))
8561 count *= 100;
8562
8563 /* relinquish nvram interface */
8564 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8565 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8566
8567 for (i = 0; i < count*10; i++) {
8568 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8569 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8570 break;
8571
8572 udelay(5);
8573 }
8574
8575 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8576 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8577 return -EBUSY;
8578 }
8579
8580 return 0;
8581}
8582
8583static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8584{
8585 u32 val;
8586
8587 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8588
8589 /* enable both bits, even on read */
8590 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8591 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8592 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8593}
8594
8595static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8596{
8597 u32 val;
8598
8599 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8600
8601 /* disable both bits, even after read */
8602 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8603 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8604 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8605}
8606
8607static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8608 u32 cmd_flags)
8609{
f1410647 8610 int count, i, rc;
a2fbb9ea
ET
8611 u32 val;
8612
8613 /* build the command word */
8614 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8615
8616 /* need to clear DONE bit separately */
8617 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8618
8619 /* address of the NVRAM to read from */
8620 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8621 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8622
8623 /* issue a read command */
8624 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8625
8626 /* adjust timeout for emulation/FPGA */
8627 count = NVRAM_TIMEOUT_COUNT;
8628 if (CHIP_REV_IS_SLOW(bp))
8629 count *= 100;
8630
8631 /* wait for completion */
8632 *ret_val = 0;
8633 rc = -EBUSY;
8634 for (i = 0; i < count; i++) {
8635 udelay(5);
8636 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8637
8638 if (val & MCPR_NVM_COMMAND_DONE) {
8639 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8640 /* we read nvram data in cpu order
8641 * but ethtool sees it as an array of bytes
8642 * converting to big-endian will do the work */
8643 val = cpu_to_be32(val);
8644 *ret_val = val;
8645 rc = 0;
8646 break;
8647 }
8648 }
8649
8650 return rc;
8651}
8652
8653static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8654 int buf_size)
8655{
8656 int rc;
8657 u32 cmd_flags;
8658 u32 val;
8659
8660 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8661 DP(BNX2X_MSG_NVM,
c14423fe 8662 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8663 offset, buf_size);
8664 return -EINVAL;
8665 }
8666
34f80b04
EG
8667 if (offset + buf_size > bp->common.flash_size) {
8668 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8669 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8670 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8671 return -EINVAL;
8672 }
8673
8674 /* request access to nvram interface */
8675 rc = bnx2x_acquire_nvram_lock(bp);
8676 if (rc)
8677 return rc;
8678
8679 /* enable access to nvram interface */
8680 bnx2x_enable_nvram_access(bp);
8681
8682 /* read the first word(s) */
8683 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8684 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8685 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8686 memcpy(ret_buf, &val, 4);
8687
8688 /* advance to the next dword */
8689 offset += sizeof(u32);
8690 ret_buf += sizeof(u32);
8691 buf_size -= sizeof(u32);
8692 cmd_flags = 0;
8693 }
8694
8695 if (rc == 0) {
8696 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8697 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8698 memcpy(ret_buf, &val, 4);
8699 }
8700
8701 /* disable access to nvram interface */
8702 bnx2x_disable_nvram_access(bp);
8703 bnx2x_release_nvram_lock(bp);
8704
8705 return rc;
8706}
8707
8708static int bnx2x_get_eeprom(struct net_device *dev,
8709 struct ethtool_eeprom *eeprom, u8 *eebuf)
8710{
8711 struct bnx2x *bp = netdev_priv(dev);
8712 int rc;
8713
2add3acb
EG
8714 if (!netif_running(dev))
8715 return -EAGAIN;
8716
34f80b04 8717 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8718 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8719 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8720 eeprom->len, eeprom->len);
8721
8722 /* parameters already validated in ethtool_get_eeprom */
8723
8724 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8725
8726 return rc;
8727}
8728
8729static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8730 u32 cmd_flags)
8731{
f1410647 8732 int count, i, rc;
a2fbb9ea
ET
8733
8734 /* build the command word */
8735 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8736
8737 /* need to clear DONE bit separately */
8738 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8739
8740 /* write the data */
8741 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8742
8743 /* address of the NVRAM to write to */
8744 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8745 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8746
8747 /* issue the write command */
8748 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8749
8750 /* adjust timeout for emulation/FPGA */
8751 count = NVRAM_TIMEOUT_COUNT;
8752 if (CHIP_REV_IS_SLOW(bp))
8753 count *= 100;
8754
8755 /* wait for completion */
8756 rc = -EBUSY;
8757 for (i = 0; i < count; i++) {
8758 udelay(5);
8759 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8760 if (val & MCPR_NVM_COMMAND_DONE) {
8761 rc = 0;
8762 break;
8763 }
8764 }
8765
8766 return rc;
8767}
8768
f1410647 8769#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8770
8771static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8772 int buf_size)
8773{
8774 int rc;
8775 u32 cmd_flags;
8776 u32 align_offset;
8777 u32 val;
8778
34f80b04
EG
8779 if (offset + buf_size > bp->common.flash_size) {
8780 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8781 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8782 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8783 return -EINVAL;
8784 }
8785
8786 /* request access to nvram interface */
8787 rc = bnx2x_acquire_nvram_lock(bp);
8788 if (rc)
8789 return rc;
8790
8791 /* enable access to nvram interface */
8792 bnx2x_enable_nvram_access(bp);
8793
8794 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8795 align_offset = (offset & ~0x03);
8796 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8797
8798 if (rc == 0) {
8799 val &= ~(0xff << BYTE_OFFSET(offset));
8800 val |= (*data_buf << BYTE_OFFSET(offset));
8801
8802 /* nvram data is returned as an array of bytes
8803 * convert it back to cpu order */
8804 val = be32_to_cpu(val);
8805
a2fbb9ea
ET
8806 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8807 cmd_flags);
8808 }
8809
8810 /* disable access to nvram interface */
8811 bnx2x_disable_nvram_access(bp);
8812 bnx2x_release_nvram_lock(bp);
8813
8814 return rc;
8815}
8816
8817static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8818 int buf_size)
8819{
8820 int rc;
8821 u32 cmd_flags;
8822 u32 val;
8823 u32 written_so_far;
8824
34f80b04 8825 if (buf_size == 1) /* ethtool */
a2fbb9ea 8826 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8827
8828 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8829 DP(BNX2X_MSG_NVM,
c14423fe 8830 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8831 offset, buf_size);
8832 return -EINVAL;
8833 }
8834
34f80b04
EG
8835 if (offset + buf_size > bp->common.flash_size) {
8836 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8837 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8838 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8839 return -EINVAL;
8840 }
8841
8842 /* request access to nvram interface */
8843 rc = bnx2x_acquire_nvram_lock(bp);
8844 if (rc)
8845 return rc;
8846
8847 /* enable access to nvram interface */
8848 bnx2x_enable_nvram_access(bp);
8849
8850 written_so_far = 0;
8851 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8852 while ((written_so_far < buf_size) && (rc == 0)) {
8853 if (written_so_far == (buf_size - sizeof(u32)))
8854 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8855 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8856 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8857 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8858 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8859
8860 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8861
8862 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8863
8864 /* advance to the next dword */
8865 offset += sizeof(u32);
8866 data_buf += sizeof(u32);
8867 written_so_far += sizeof(u32);
8868 cmd_flags = 0;
8869 }
8870
8871 /* disable access to nvram interface */
8872 bnx2x_disable_nvram_access(bp);
8873 bnx2x_release_nvram_lock(bp);
8874
8875 return rc;
8876}
8877
8878static int bnx2x_set_eeprom(struct net_device *dev,
8879 struct ethtool_eeprom *eeprom, u8 *eebuf)
8880{
8881 struct bnx2x *bp = netdev_priv(dev);
8882 int rc;
8883
9f4c9583
EG
8884 if (!netif_running(dev))
8885 return -EAGAIN;
8886
34f80b04 8887 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8888 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8889 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8890 eeprom->len, eeprom->len);
8891
8892 /* parameters already validated in ethtool_set_eeprom */
8893
c18487ee 8894 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8895 if (eeprom->magic == 0x00504859)
8896 if (bp->port.pmf) {
8897
4a37fb66 8898 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8899 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8900 bp->link_params.ext_phy_config,
8901 (bp->state != BNX2X_STATE_CLOSED),
8902 eebuf, eeprom->len);
bb2a0f7a
YG
8903 if ((bp->state == BNX2X_STATE_OPEN) ||
8904 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8905 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8906 &bp->link_vars, 1);
34f80b04
EG
8907 rc |= bnx2x_phy_init(&bp->link_params,
8908 &bp->link_vars);
bb2a0f7a 8909 }
4a37fb66 8910 bnx2x_release_phy_lock(bp);
34f80b04
EG
8911
8912 } else /* Only the PMF can access the PHY */
8913 return -EINVAL;
8914 else
c18487ee 8915 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8916
8917 return rc;
8918}
8919
8920static int bnx2x_get_coalesce(struct net_device *dev,
8921 struct ethtool_coalesce *coal)
8922{
8923 struct bnx2x *bp = netdev_priv(dev);
8924
8925 memset(coal, 0, sizeof(struct ethtool_coalesce));
8926
8927 coal->rx_coalesce_usecs = bp->rx_ticks;
8928 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8929
8930 return 0;
8931}
8932
8933static int bnx2x_set_coalesce(struct net_device *dev,
8934 struct ethtool_coalesce *coal)
8935{
8936 struct bnx2x *bp = netdev_priv(dev);
8937
8938 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8939 if (bp->rx_ticks > 3000)
8940 bp->rx_ticks = 3000;
8941
8942 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8943 if (bp->tx_ticks > 0x3000)
8944 bp->tx_ticks = 0x3000;
8945
34f80b04 8946 if (netif_running(dev))
a2fbb9ea
ET
8947 bnx2x_update_coalesce(bp);
8948
8949 return 0;
8950}
8951
8952static void bnx2x_get_ringparam(struct net_device *dev,
8953 struct ethtool_ringparam *ering)
8954{
8955 struct bnx2x *bp = netdev_priv(dev);
8956
8957 ering->rx_max_pending = MAX_RX_AVAIL;
8958 ering->rx_mini_max_pending = 0;
8959 ering->rx_jumbo_max_pending = 0;
8960
8961 ering->rx_pending = bp->rx_ring_size;
8962 ering->rx_mini_pending = 0;
8963 ering->rx_jumbo_pending = 0;
8964
8965 ering->tx_max_pending = MAX_TX_AVAIL;
8966 ering->tx_pending = bp->tx_ring_size;
8967}
8968
8969static int bnx2x_set_ringparam(struct net_device *dev,
8970 struct ethtool_ringparam *ering)
8971{
8972 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8973 int rc = 0;
a2fbb9ea
ET
8974
8975 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8976 (ering->tx_pending > MAX_TX_AVAIL) ||
8977 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8978 return -EINVAL;
8979
8980 bp->rx_ring_size = ering->rx_pending;
8981 bp->tx_ring_size = ering->tx_pending;
8982
34f80b04
EG
8983 if (netif_running(dev)) {
8984 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8985 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8986 }
8987
34f80b04 8988 return rc;
a2fbb9ea
ET
8989}
8990
8991static void bnx2x_get_pauseparam(struct net_device *dev,
8992 struct ethtool_pauseparam *epause)
8993{
8994 struct bnx2x *bp = netdev_priv(dev);
8995
c0700f90 8996 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8997 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8998
c0700f90
DM
8999 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9000 BNX2X_FLOW_CTRL_RX);
9001 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9002 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9003
9004 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9005 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9006 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9007}
9008
9009static int bnx2x_set_pauseparam(struct net_device *dev,
9010 struct ethtool_pauseparam *epause)
9011{
9012 struct bnx2x *bp = netdev_priv(dev);
9013
34f80b04
EG
9014 if (IS_E1HMF(bp))
9015 return 0;
9016
a2fbb9ea
ET
9017 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9018 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9019 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9020
c0700f90 9021 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9022
f1410647 9023 if (epause->rx_pause)
c0700f90 9024 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9025
f1410647 9026 if (epause->tx_pause)
c0700f90 9027 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9028
c0700f90
DM
9029 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9030 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9031
c18487ee 9032 if (epause->autoneg) {
34f80b04 9033 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9034 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9035 return -EINVAL;
9036 }
a2fbb9ea 9037
c18487ee 9038 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9039 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9040 }
a2fbb9ea 9041
c18487ee
YR
9042 DP(NETIF_MSG_LINK,
9043 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9044
9045 if (netif_running(dev)) {
bb2a0f7a 9046 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9047 bnx2x_link_set(bp);
9048 }
a2fbb9ea
ET
9049
9050 return 0;
9051}
9052
df0f2343
VZ
9053static int bnx2x_set_flags(struct net_device *dev, u32 data)
9054{
9055 struct bnx2x *bp = netdev_priv(dev);
9056 int changed = 0;
9057 int rc = 0;
9058
9059 /* TPA requires Rx CSUM offloading */
9060 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9061 if (!(dev->features & NETIF_F_LRO)) {
9062 dev->features |= NETIF_F_LRO;
9063 bp->flags |= TPA_ENABLE_FLAG;
9064 changed = 1;
9065 }
9066
9067 } else if (dev->features & NETIF_F_LRO) {
9068 dev->features &= ~NETIF_F_LRO;
9069 bp->flags &= ~TPA_ENABLE_FLAG;
9070 changed = 1;
9071 }
9072
9073 if (changed && netif_running(dev)) {
9074 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9075 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9076 }
9077
9078 return rc;
9079}
9080
a2fbb9ea
ET
9081static u32 bnx2x_get_rx_csum(struct net_device *dev)
9082{
9083 struct bnx2x *bp = netdev_priv(dev);
9084
9085 return bp->rx_csum;
9086}
9087
9088static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9089{
9090 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9091 int rc = 0;
a2fbb9ea
ET
9092
9093 bp->rx_csum = data;
df0f2343
VZ
9094
9095 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9096 TPA'ed packets will be discarded due to wrong TCP CSUM */
9097 if (!data) {
9098 u32 flags = ethtool_op_get_flags(dev);
9099
9100 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9101 }
9102
9103 return rc;
a2fbb9ea
ET
9104}
9105
9106static int bnx2x_set_tso(struct net_device *dev, u32 data)
9107{
755735eb 9108 if (data) {
a2fbb9ea 9109 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9110 dev->features |= NETIF_F_TSO6;
9111 } else {
a2fbb9ea 9112 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9113 dev->features &= ~NETIF_F_TSO6;
9114 }
9115
a2fbb9ea
ET
9116 return 0;
9117}
9118
f3c87cdd 9119static const struct {
a2fbb9ea
ET
9120 char string[ETH_GSTRING_LEN];
9121} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9122 { "register_test (offline)" },
9123 { "memory_test (offline)" },
9124 { "loopback_test (offline)" },
9125 { "nvram_test (online)" },
9126 { "interrupt_test (online)" },
9127 { "link_test (online)" },
d3d4f495 9128 { "idle check (online)" }
a2fbb9ea
ET
9129};
9130
9131static int bnx2x_self_test_count(struct net_device *dev)
9132{
9133 return BNX2X_NUM_TESTS;
9134}
9135
f3c87cdd
YG
9136static int bnx2x_test_registers(struct bnx2x *bp)
9137{
9138 int idx, i, rc = -ENODEV;
9139 u32 wr_val = 0;
9dabc424 9140 int port = BP_PORT(bp);
f3c87cdd
YG
9141 static const struct {
9142 u32 offset0;
9143 u32 offset1;
9144 u32 mask;
9145 } reg_tbl[] = {
9146/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9147 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9148 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9149 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9150 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9151 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9152 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9153 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9154 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9155 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9156/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9157 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9158 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9159 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9160 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9161 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9162 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9163 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9164 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9165 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9166/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9167 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9168 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9169 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9170 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9171 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9172 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9173 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9174 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9175 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9176/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9177 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9178 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9179 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9180 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9181 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9182 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9183 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9184
9185 { 0xffffffff, 0, 0x00000000 }
9186 };
9187
9188 if (!netif_running(bp->dev))
9189 return rc;
9190
9191 /* Repeat the test twice:
9192 First by writing 0x00000000, second by writing 0xffffffff */
9193 for (idx = 0; idx < 2; idx++) {
9194
9195 switch (idx) {
9196 case 0:
9197 wr_val = 0;
9198 break;
9199 case 1:
9200 wr_val = 0xffffffff;
9201 break;
9202 }
9203
9204 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9205 u32 offset, mask, save_val, val;
f3c87cdd
YG
9206
9207 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9208 mask = reg_tbl[i].mask;
9209
9210 save_val = REG_RD(bp, offset);
9211
9212 REG_WR(bp, offset, wr_val);
9213 val = REG_RD(bp, offset);
9214
9215 /* Restore the original register's value */
9216 REG_WR(bp, offset, save_val);
9217
9218 /* verify that value is as expected value */
9219 if ((val & mask) != (wr_val & mask))
9220 goto test_reg_exit;
9221 }
9222 }
9223
9224 rc = 0;
9225
9226test_reg_exit:
9227 return rc;
9228}
9229
9230static int bnx2x_test_memory(struct bnx2x *bp)
9231{
9232 int i, j, rc = -ENODEV;
9233 u32 val;
9234 static const struct {
9235 u32 offset;
9236 int size;
9237 } mem_tbl[] = {
9238 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9239 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9240 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9241 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9242 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9243 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9244 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9245
9246 { 0xffffffff, 0 }
9247 };
9248 static const struct {
9249 char *name;
9250 u32 offset;
9dabc424
YG
9251 u32 e1_mask;
9252 u32 e1h_mask;
f3c87cdd 9253 } prty_tbl[] = {
9dabc424
YG
9254 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9255 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9256 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9257 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9258 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9259 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9260
9261 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9262 };
9263
9264 if (!netif_running(bp->dev))
9265 return rc;
9266
9267 /* Go through all the memories */
9268 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9269 for (j = 0; j < mem_tbl[i].size; j++)
9270 REG_RD(bp, mem_tbl[i].offset + j*4);
9271
9272 /* Check the parity status */
9273 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9274 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9275 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9276 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9277 DP(NETIF_MSG_HW,
9278 "%s is 0x%x\n", prty_tbl[i].name, val);
9279 goto test_mem_exit;
9280 }
9281 }
9282
9283 rc = 0;
9284
9285test_mem_exit:
9286 return rc;
9287}
9288
f3c87cdd
YG
9289static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9290{
9291 int cnt = 1000;
9292
9293 if (link_up)
9294 while (bnx2x_link_test(bp) && cnt--)
9295 msleep(10);
9296}
9297
9298static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9299{
9300 unsigned int pkt_size, num_pkts, i;
9301 struct sk_buff *skb;
9302 unsigned char *packet;
9303 struct bnx2x_fastpath *fp = &bp->fp[0];
9304 u16 tx_start_idx, tx_idx;
9305 u16 rx_start_idx, rx_idx;
9306 u16 pkt_prod;
9307 struct sw_tx_bd *tx_buf;
9308 struct eth_tx_bd *tx_bd;
9309 dma_addr_t mapping;
9310 union eth_rx_cqe *cqe;
9311 u8 cqe_fp_flags;
9312 struct sw_rx_bd *rx_buf;
9313 u16 len;
9314 int rc = -ENODEV;
9315
9316 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9317 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9318 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
9319
9320 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 9321 u16 cnt = 1000;
f3c87cdd 9322 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 9323 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 9324 /* wait until link state is restored */
3910c8ae
EG
9325 if (link_up)
9326 while (cnt-- && bnx2x_test_link(&bp->link_params,
9327 &bp->link_vars))
9328 msleep(10);
f3c87cdd
YG
9329 } else
9330 return -EINVAL;
9331
9332 pkt_size = 1514;
9333 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9334 if (!skb) {
9335 rc = -ENOMEM;
9336 goto test_loopback_exit;
9337 }
9338 packet = skb_put(skb, pkt_size);
9339 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9340 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9341 for (i = ETH_HLEN; i < pkt_size; i++)
9342 packet[i] = (unsigned char) (i & 0xff);
9343
9344 num_pkts = 0;
9345 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9346 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9347
9348 pkt_prod = fp->tx_pkt_prod++;
9349 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9350 tx_buf->first_bd = fp->tx_bd_prod;
9351 tx_buf->skb = skb;
9352
9353 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9354 mapping = pci_map_single(bp->pdev, skb->data,
9355 skb_headlen(skb), PCI_DMA_TODEVICE);
9356 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9357 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9358 tx_bd->nbd = cpu_to_le16(1);
9359 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9360 tx_bd->vlan = cpu_to_le16(pkt_prod);
9361 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9362 ETH_TX_BD_FLAGS_END_BD);
9363 tx_bd->general_data = ((UNICAST_ADDRESS <<
9364 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9365
58f4c4cf
EG
9366 wmb();
9367
f3c87cdd
YG
9368 fp->hw_tx_prods->bds_prod =
9369 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9370 mb(); /* FW restriction: must not reorder writing nbd and packets */
9371 fp->hw_tx_prods->packets_prod =
9372 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9373 DOORBELL(bp, FP_IDX(fp), 0);
9374
9375 mmiowb();
9376
9377 num_pkts++;
9378 fp->tx_bd_prod++;
9379 bp->dev->trans_start = jiffies;
9380
9381 udelay(100);
9382
9383 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9384 if (tx_idx != tx_start_idx + num_pkts)
9385 goto test_loopback_exit;
9386
9387 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9388 if (rx_idx != rx_start_idx + num_pkts)
9389 goto test_loopback_exit;
9390
9391 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9392 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9393 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9394 goto test_loopback_rx_exit;
9395
9396 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9397 if (len != pkt_size)
9398 goto test_loopback_rx_exit;
9399
9400 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9401 skb = rx_buf->skb;
9402 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9403 for (i = ETH_HLEN; i < pkt_size; i++)
9404 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9405 goto test_loopback_rx_exit;
9406
9407 rc = 0;
9408
9409test_loopback_rx_exit:
f3c87cdd
YG
9410
9411 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9412 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9413 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9414 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9415
9416 /* Update producers */
9417 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9418 fp->rx_sge_prod);
f3c87cdd
YG
9419
9420test_loopback_exit:
9421 bp->link_params.loopback_mode = LOOPBACK_NONE;
9422
9423 return rc;
9424}
9425
9426static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9427{
9428 int rc = 0;
9429
9430 if (!netif_running(bp->dev))
9431 return BNX2X_LOOPBACK_FAILED;
9432
f8ef6e44 9433 bnx2x_netif_stop(bp, 1);
3910c8ae 9434 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
9435
9436 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9437 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9438 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9439 }
9440
9441 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9442 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9443 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9444 }
9445
3910c8ae 9446 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9447 bnx2x_netif_start(bp);
9448
9449 return rc;
9450}
9451
9452#define CRC32_RESIDUAL 0xdebb20e3
9453
9454static int bnx2x_test_nvram(struct bnx2x *bp)
9455{
9456 static const struct {
9457 int offset;
9458 int size;
9459 } nvram_tbl[] = {
9460 { 0, 0x14 }, /* bootstrap */
9461 { 0x14, 0xec }, /* dir */
9462 { 0x100, 0x350 }, /* manuf_info */
9463 { 0x450, 0xf0 }, /* feature_info */
9464 { 0x640, 0x64 }, /* upgrade_key_info */
9465 { 0x6a4, 0x64 },
9466 { 0x708, 0x70 }, /* manuf_key_info */
9467 { 0x778, 0x70 },
9468 { 0, 0 }
9469 };
9470 u32 buf[0x350 / 4];
9471 u8 *data = (u8 *)buf;
9472 int i, rc;
9473 u32 magic, csum;
9474
9475 rc = bnx2x_nvram_read(bp, 0, data, 4);
9476 if (rc) {
9477 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9478 goto test_nvram_exit;
9479 }
9480
9481 magic = be32_to_cpu(buf[0]);
9482 if (magic != 0x669955aa) {
9483 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9484 rc = -ENODEV;
9485 goto test_nvram_exit;
9486 }
9487
9488 for (i = 0; nvram_tbl[i].size; i++) {
9489
9490 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9491 nvram_tbl[i].size);
9492 if (rc) {
9493 DP(NETIF_MSG_PROBE,
9494 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9495 goto test_nvram_exit;
9496 }
9497
9498 csum = ether_crc_le(nvram_tbl[i].size, data);
9499 if (csum != CRC32_RESIDUAL) {
9500 DP(NETIF_MSG_PROBE,
9501 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9502 rc = -ENODEV;
9503 goto test_nvram_exit;
9504 }
9505 }
9506
9507test_nvram_exit:
9508 return rc;
9509}
9510
9511static int bnx2x_test_intr(struct bnx2x *bp)
9512{
9513 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9514 int i, rc;
9515
9516 if (!netif_running(bp->dev))
9517 return -ENODEV;
9518
8d9c5f34 9519 config->hdr.length = 0;
af246401
EG
9520 if (CHIP_IS_E1(bp))
9521 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9522 else
9523 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9524 config->hdr.client_id = BP_CL_ID(bp);
9525 config->hdr.reserved1 = 0;
9526
9527 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9528 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9529 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9530 if (rc == 0) {
9531 bp->set_mac_pending++;
9532 for (i = 0; i < 10; i++) {
9533 if (!bp->set_mac_pending)
9534 break;
9535 msleep_interruptible(10);
9536 }
9537 if (i == 10)
9538 rc = -ENODEV;
9539 }
9540
9541 return rc;
9542}
9543
a2fbb9ea
ET
9544static void bnx2x_self_test(struct net_device *dev,
9545 struct ethtool_test *etest, u64 *buf)
9546{
9547 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9548
9549 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9550
f3c87cdd 9551 if (!netif_running(dev))
a2fbb9ea 9552 return;
a2fbb9ea 9553
33471629 9554 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9555 if (IS_E1HMF(bp))
9556 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9557
9558 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9559 u8 link_up;
9560
9561 link_up = bp->link_vars.link_up;
9562 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9563 bnx2x_nic_load(bp, LOAD_DIAG);
9564 /* wait until link state is restored */
9565 bnx2x_wait_for_link(bp, link_up);
9566
9567 if (bnx2x_test_registers(bp) != 0) {
9568 buf[0] = 1;
9569 etest->flags |= ETH_TEST_FL_FAILED;
9570 }
9571 if (bnx2x_test_memory(bp) != 0) {
9572 buf[1] = 1;
9573 etest->flags |= ETH_TEST_FL_FAILED;
9574 }
9575 buf[2] = bnx2x_test_loopback(bp, link_up);
9576 if (buf[2] != 0)
9577 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9578
f3c87cdd
YG
9579 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9580 bnx2x_nic_load(bp, LOAD_NORMAL);
9581 /* wait until link state is restored */
9582 bnx2x_wait_for_link(bp, link_up);
9583 }
9584 if (bnx2x_test_nvram(bp) != 0) {
9585 buf[3] = 1;
a2fbb9ea
ET
9586 etest->flags |= ETH_TEST_FL_FAILED;
9587 }
f3c87cdd
YG
9588 if (bnx2x_test_intr(bp) != 0) {
9589 buf[4] = 1;
9590 etest->flags |= ETH_TEST_FL_FAILED;
9591 }
9592 if (bp->port.pmf)
9593 if (bnx2x_link_test(bp) != 0) {
9594 buf[5] = 1;
9595 etest->flags |= ETH_TEST_FL_FAILED;
9596 }
f3c87cdd
YG
9597
9598#ifdef BNX2X_EXTRA_DEBUG
9599 bnx2x_panic_dump(bp);
9600#endif
a2fbb9ea
ET
9601}
9602
de832a55
EG
9603static const struct {
9604 long offset;
9605 int size;
9606 u8 string[ETH_GSTRING_LEN];
9607} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9608/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9609 { Q_STATS_OFFSET32(error_bytes_received_hi),
9610 8, "[%d]: rx_error_bytes" },
9611 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9612 8, "[%d]: rx_ucast_packets" },
9613 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9614 8, "[%d]: rx_mcast_packets" },
9615 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9616 8, "[%d]: rx_bcast_packets" },
9617 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9618 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9619 4, "[%d]: rx_phy_ip_err_discards"},
9620 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9621 4, "[%d]: rx_skb_alloc_discard" },
9622 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9623
9624/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9625 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9626 8, "[%d]: tx_packets" }
9627};
9628
bb2a0f7a
YG
9629static const struct {
9630 long offset;
9631 int size;
9632 u32 flags;
66e855f3
YG
9633#define STATS_FLAGS_PORT 1
9634#define STATS_FLAGS_FUNC 2
de832a55 9635#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9636 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9637} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9638/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9639 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9640 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9641 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9642 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9643 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9644 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9645 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9646 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9647 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9648 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9649 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9650 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9651 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9652 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9653 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9654 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9655 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9656/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9657 8, STATS_FLAGS_PORT, "rx_fragments" },
9658 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9659 8, STATS_FLAGS_PORT, "rx_jabbers" },
9660 { STATS_OFFSET32(no_buff_discard_hi),
9661 8, STATS_FLAGS_BOTH, "rx_discards" },
9662 { STATS_OFFSET32(mac_filter_discard),
9663 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9664 { STATS_OFFSET32(xxoverflow_discard),
9665 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9666 { STATS_OFFSET32(brb_drop_hi),
9667 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9668 { STATS_OFFSET32(brb_truncate_hi),
9669 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9670 { STATS_OFFSET32(pause_frames_received_hi),
9671 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9672 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9673 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9674 { STATS_OFFSET32(nig_timer_max),
9675 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9676/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9677 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9678 { STATS_OFFSET32(rx_skb_alloc_failed),
9679 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9680 { STATS_OFFSET32(hw_csum_err),
9681 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9682
9683 { STATS_OFFSET32(total_bytes_transmitted_hi),
9684 8, STATS_FLAGS_BOTH, "tx_bytes" },
9685 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9686 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9687 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9688 8, STATS_FLAGS_BOTH, "tx_packets" },
9689 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9690 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9691 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9692 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9693 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9694 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9695 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9696 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9697/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9698 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9699 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9700 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9701 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9702 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9703 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9704 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9705 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9706 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9707 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9708 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9709 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9710 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9711 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9712 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9713 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9714 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9715 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9716 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9717/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9718 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9719 { STATS_OFFSET32(pause_frames_sent_hi),
9720 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9721};
9722
de832a55
EG
9723#define IS_PORT_STAT(i) \
9724 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9725#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9726#define IS_E1HMF_MODE_STAT(bp) \
9727 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9728
a2fbb9ea
ET
9729static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9730{
bb2a0f7a 9731 struct bnx2x *bp = netdev_priv(dev);
de832a55 9732 int i, j, k;
bb2a0f7a 9733
a2fbb9ea
ET
9734 switch (stringset) {
9735 case ETH_SS_STATS:
de832a55
EG
9736 if (is_multi(bp)) {
9737 k = 0;
9738 for_each_queue(bp, i) {
9739 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9740 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9741 bnx2x_q_stats_arr[j].string, i);
9742 k += BNX2X_NUM_Q_STATS;
9743 }
9744 if (IS_E1HMF_MODE_STAT(bp))
9745 break;
9746 for (j = 0; j < BNX2X_NUM_STATS; j++)
9747 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9748 bnx2x_stats_arr[j].string);
9749 } else {
9750 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9751 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9752 continue;
9753 strcpy(buf + j*ETH_GSTRING_LEN,
9754 bnx2x_stats_arr[i].string);
9755 j++;
9756 }
bb2a0f7a 9757 }
a2fbb9ea
ET
9758 break;
9759
9760 case ETH_SS_TEST:
9761 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9762 break;
9763 }
9764}
9765
9766static int bnx2x_get_stats_count(struct net_device *dev)
9767{
bb2a0f7a 9768 struct bnx2x *bp = netdev_priv(dev);
de832a55 9769 int i, num_stats;
bb2a0f7a 9770
de832a55
EG
9771 if (is_multi(bp)) {
9772 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9773 if (!IS_E1HMF_MODE_STAT(bp))
9774 num_stats += BNX2X_NUM_STATS;
9775 } else {
9776 if (IS_E1HMF_MODE_STAT(bp)) {
9777 num_stats = 0;
9778 for (i = 0; i < BNX2X_NUM_STATS; i++)
9779 if (IS_FUNC_STAT(i))
9780 num_stats++;
9781 } else
9782 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9783 }
de832a55 9784
bb2a0f7a 9785 return num_stats;
a2fbb9ea
ET
9786}
9787
9788static void bnx2x_get_ethtool_stats(struct net_device *dev,
9789 struct ethtool_stats *stats, u64 *buf)
9790{
9791 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9792 u32 *hw_stats, *offset;
9793 int i, j, k;
bb2a0f7a 9794
de832a55
EG
9795 if (is_multi(bp)) {
9796 k = 0;
9797 for_each_queue(bp, i) {
9798 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9799 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9800 if (bnx2x_q_stats_arr[j].size == 0) {
9801 /* skip this counter */
9802 buf[k + j] = 0;
9803 continue;
9804 }
9805 offset = (hw_stats +
9806 bnx2x_q_stats_arr[j].offset);
9807 if (bnx2x_q_stats_arr[j].size == 4) {
9808 /* 4-byte counter */
9809 buf[k + j] = (u64) *offset;
9810 continue;
9811 }
9812 /* 8-byte counter */
9813 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9814 }
9815 k += BNX2X_NUM_Q_STATS;
9816 }
9817 if (IS_E1HMF_MODE_STAT(bp))
9818 return;
9819 hw_stats = (u32 *)&bp->eth_stats;
9820 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9821 if (bnx2x_stats_arr[j].size == 0) {
9822 /* skip this counter */
9823 buf[k + j] = 0;
9824 continue;
9825 }
9826 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9827 if (bnx2x_stats_arr[j].size == 4) {
9828 /* 4-byte counter */
9829 buf[k + j] = (u64) *offset;
9830 continue;
9831 }
9832 /* 8-byte counter */
9833 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9834 }
de832a55
EG
9835 } else {
9836 hw_stats = (u32 *)&bp->eth_stats;
9837 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9838 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9839 continue;
9840 if (bnx2x_stats_arr[i].size == 0) {
9841 /* skip this counter */
9842 buf[j] = 0;
9843 j++;
9844 continue;
9845 }
9846 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9847 if (bnx2x_stats_arr[i].size == 4) {
9848 /* 4-byte counter */
9849 buf[j] = (u64) *offset;
9850 j++;
9851 continue;
9852 }
9853 /* 8-byte counter */
9854 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9855 j++;
a2fbb9ea 9856 }
a2fbb9ea
ET
9857 }
9858}
9859
9860static int bnx2x_phys_id(struct net_device *dev, u32 data)
9861{
9862 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9863 int port = BP_PORT(bp);
a2fbb9ea
ET
9864 int i;
9865
34f80b04
EG
9866 if (!netif_running(dev))
9867 return 0;
9868
9869 if (!bp->port.pmf)
9870 return 0;
9871
a2fbb9ea
ET
9872 if (data == 0)
9873 data = 2;
9874
9875 for (i = 0; i < (data * 2); i++) {
c18487ee 9876 if ((i % 2) == 0)
34f80b04 9877 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9878 bp->link_params.hw_led_mode,
9879 bp->link_params.chip_id);
9880 else
34f80b04 9881 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9882 bp->link_params.hw_led_mode,
9883 bp->link_params.chip_id);
9884
a2fbb9ea
ET
9885 msleep_interruptible(500);
9886 if (signal_pending(current))
9887 break;
9888 }
9889
c18487ee 9890 if (bp->link_vars.link_up)
34f80b04 9891 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9892 bp->link_vars.line_speed,
9893 bp->link_params.hw_led_mode,
9894 bp->link_params.chip_id);
a2fbb9ea
ET
9895
9896 return 0;
9897}
9898
9899static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9900 .get_settings = bnx2x_get_settings,
9901 .set_settings = bnx2x_set_settings,
9902 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9903 .get_wol = bnx2x_get_wol,
9904 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9905 .get_msglevel = bnx2x_get_msglevel,
9906 .set_msglevel = bnx2x_set_msglevel,
9907 .nway_reset = bnx2x_nway_reset,
9908 .get_link = ethtool_op_get_link,
9909 .get_eeprom_len = bnx2x_get_eeprom_len,
9910 .get_eeprom = bnx2x_get_eeprom,
9911 .set_eeprom = bnx2x_set_eeprom,
9912 .get_coalesce = bnx2x_get_coalesce,
9913 .set_coalesce = bnx2x_set_coalesce,
9914 .get_ringparam = bnx2x_get_ringparam,
9915 .set_ringparam = bnx2x_set_ringparam,
9916 .get_pauseparam = bnx2x_get_pauseparam,
9917 .set_pauseparam = bnx2x_set_pauseparam,
9918 .get_rx_csum = bnx2x_get_rx_csum,
9919 .set_rx_csum = bnx2x_set_rx_csum,
9920 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9921 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9922 .set_flags = bnx2x_set_flags,
9923 .get_flags = ethtool_op_get_flags,
9924 .get_sg = ethtool_op_get_sg,
9925 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9926 .get_tso = ethtool_op_get_tso,
9927 .set_tso = bnx2x_set_tso,
9928 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9929 .self_test = bnx2x_self_test,
9930 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9931 .phys_id = bnx2x_phys_id,
9932 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9933 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9934};
9935
9936/* end of ethtool_ops */
9937
9938/****************************************************************************
9939* General service functions
9940****************************************************************************/
9941
9942static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9943{
9944 u16 pmcsr;
9945
9946 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9947
9948 switch (state) {
9949 case PCI_D0:
34f80b04 9950 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9951 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9952 PCI_PM_CTRL_PME_STATUS));
9953
9954 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9955 /* delay required during transition out of D3hot */
a2fbb9ea 9956 msleep(20);
34f80b04 9957 break;
a2fbb9ea 9958
34f80b04
EG
9959 case PCI_D3hot:
9960 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9961 pmcsr |= 3;
a2fbb9ea 9962
34f80b04
EG
9963 if (bp->wol)
9964 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9965
34f80b04
EG
9966 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9967 pmcsr);
a2fbb9ea 9968
34f80b04
EG
9969 /* No more memory access after this point until
9970 * device is brought back to D0.
9971 */
9972 break;
9973
9974 default:
9975 return -EINVAL;
9976 }
9977 return 0;
a2fbb9ea
ET
9978}
9979
237907c1
EG
9980static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9981{
9982 u16 rx_cons_sb;
9983
9984 /* Tell compiler that status block fields can change */
9985 barrier();
9986 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9987 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9988 rx_cons_sb++;
9989 return (fp->rx_comp_cons != rx_cons_sb);
9990}
9991
34f80b04
EG
9992/*
9993 * net_device service functions
9994 */
9995
a2fbb9ea
ET
9996static int bnx2x_poll(struct napi_struct *napi, int budget)
9997{
9998 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9999 napi);
10000 struct bnx2x *bp = fp->bp;
10001 int work_done = 0;
10002
10003#ifdef BNX2X_STOP_ON_ERROR
10004 if (unlikely(bp->panic))
34f80b04 10005 goto poll_panic;
a2fbb9ea
ET
10006#endif
10007
10008 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10009 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10010 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10011
10012 bnx2x_update_fpsb_idx(fp);
10013
237907c1 10014 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
10015 bnx2x_tx_int(fp, budget);
10016
237907c1 10017 if (bnx2x_has_rx_work(fp))
a2fbb9ea 10018 work_done = bnx2x_rx_int(fp, budget);
da5a662a 10019 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
10020
10021 /* must not complete if we consumed full budget */
da5a662a 10022 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
10023
10024#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10025poll_panic:
a2fbb9ea 10026#endif
288379f0 10027 napi_complete(napi);
a2fbb9ea 10028
34f80b04 10029 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 10030 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 10031 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
10032 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10033 }
a2fbb9ea
ET
10034 return work_done;
10035}
10036
755735eb
EG
10037
10038/* we split the first BD into headers and data BDs
33471629 10039 * to ease the pain of our fellow microcode engineers
755735eb
EG
10040 * we use one mapping for both BDs
10041 * So far this has only been observed to happen
10042 * in Other Operating Systems(TM)
10043 */
10044static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10045 struct bnx2x_fastpath *fp,
10046 struct eth_tx_bd **tx_bd, u16 hlen,
10047 u16 bd_prod, int nbd)
10048{
10049 struct eth_tx_bd *h_tx_bd = *tx_bd;
10050 struct eth_tx_bd *d_tx_bd;
10051 dma_addr_t mapping;
10052 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10053
10054 /* first fix first BD */
10055 h_tx_bd->nbd = cpu_to_le16(nbd);
10056 h_tx_bd->nbytes = cpu_to_le16(hlen);
10057
10058 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10059 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10060 h_tx_bd->addr_lo, h_tx_bd->nbd);
10061
10062 /* now get a new data BD
10063 * (after the pbd) and fill it */
10064 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10065 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10066
10067 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10068 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10069
10070 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10071 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10072 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10073 d_tx_bd->vlan = 0;
10074 /* this marks the BD as one that has no individual mapping
10075 * the FW ignores this flag in a BD not marked start
10076 */
10077 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10078 DP(NETIF_MSG_TX_QUEUED,
10079 "TSO split data size is %d (%x:%x)\n",
10080 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10081
10082 /* update tx_bd for marking the last BD flag */
10083 *tx_bd = d_tx_bd;
10084
10085 return bd_prod;
10086}
10087
10088static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10089{
10090 if (fix > 0)
10091 csum = (u16) ~csum_fold(csum_sub(csum,
10092 csum_partial(t_header - fix, fix, 0)));
10093
10094 else if (fix < 0)
10095 csum = (u16) ~csum_fold(csum_add(csum,
10096 csum_partial(t_header, -fix, 0)));
10097
10098 return swab16(csum);
10099}
10100
10101static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10102{
10103 u32 rc;
10104
10105 if (skb->ip_summed != CHECKSUM_PARTIAL)
10106 rc = XMIT_PLAIN;
10107
10108 else {
10109 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10110 rc = XMIT_CSUM_V6;
10111 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10112 rc |= XMIT_CSUM_TCP;
10113
10114 } else {
10115 rc = XMIT_CSUM_V4;
10116 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10117 rc |= XMIT_CSUM_TCP;
10118 }
10119 }
10120
10121 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10122 rc |= XMIT_GSO_V4;
10123
10124 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10125 rc |= XMIT_GSO_V6;
10126
10127 return rc;
10128}
10129
632da4d6 10130#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
10131/* check if packet requires linearization (packet is too fragmented) */
10132static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10133 u32 xmit_type)
10134{
10135 int to_copy = 0;
10136 int hlen = 0;
10137 int first_bd_sz = 0;
10138
10139 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10140 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10141
10142 if (xmit_type & XMIT_GSO) {
10143 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10144 /* Check if LSO packet needs to be copied:
10145 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10146 int wnd_size = MAX_FETCH_BD - 3;
33471629 10147 /* Number of windows to check */
755735eb
EG
10148 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10149 int wnd_idx = 0;
10150 int frag_idx = 0;
10151 u32 wnd_sum = 0;
10152
10153 /* Headers length */
10154 hlen = (int)(skb_transport_header(skb) - skb->data) +
10155 tcp_hdrlen(skb);
10156
10157 /* Amount of data (w/o headers) on linear part of SKB*/
10158 first_bd_sz = skb_headlen(skb) - hlen;
10159
10160 wnd_sum = first_bd_sz;
10161
10162 /* Calculate the first sum - it's special */
10163 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10164 wnd_sum +=
10165 skb_shinfo(skb)->frags[frag_idx].size;
10166
10167 /* If there was data on linear skb data - check it */
10168 if (first_bd_sz > 0) {
10169 if (unlikely(wnd_sum < lso_mss)) {
10170 to_copy = 1;
10171 goto exit_lbl;
10172 }
10173
10174 wnd_sum -= first_bd_sz;
10175 }
10176
10177 /* Others are easier: run through the frag list and
10178 check all windows */
10179 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10180 wnd_sum +=
10181 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10182
10183 if (unlikely(wnd_sum < lso_mss)) {
10184 to_copy = 1;
10185 break;
10186 }
10187 wnd_sum -=
10188 skb_shinfo(skb)->frags[wnd_idx].size;
10189 }
10190
10191 } else {
10192 /* in non-LSO too fragmented packet should always
10193 be linearized */
10194 to_copy = 1;
10195 }
10196 }
10197
10198exit_lbl:
10199 if (unlikely(to_copy))
10200 DP(NETIF_MSG_TX_QUEUED,
10201 "Linearization IS REQUIRED for %s packet. "
10202 "num_frags %d hlen %d first_bd_sz %d\n",
10203 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10204 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10205
10206 return to_copy;
10207}
632da4d6 10208#endif
755735eb
EG
10209
10210/* called with netif_tx_lock
a2fbb9ea 10211 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10212 * netif_wake_queue()
a2fbb9ea
ET
10213 */
10214static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10215{
10216 struct bnx2x *bp = netdev_priv(dev);
10217 struct bnx2x_fastpath *fp;
555f6c78 10218 struct netdev_queue *txq;
a2fbb9ea
ET
10219 struct sw_tx_bd *tx_buf;
10220 struct eth_tx_bd *tx_bd;
10221 struct eth_tx_parse_bd *pbd = NULL;
10222 u16 pkt_prod, bd_prod;
755735eb 10223 int nbd, fp_index;
a2fbb9ea 10224 dma_addr_t mapping;
755735eb
EG
10225 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10226 int vlan_off = (bp->e1hov ? 4 : 0);
10227 int i;
10228 u8 hlen = 0;
a2fbb9ea
ET
10229
10230#ifdef BNX2X_STOP_ON_ERROR
10231 if (unlikely(bp->panic))
10232 return NETDEV_TX_BUSY;
10233#endif
10234
555f6c78
EG
10235 fp_index = skb_get_queue_mapping(skb);
10236 txq = netdev_get_tx_queue(dev, fp_index);
10237
a2fbb9ea 10238 fp = &bp->fp[fp_index];
755735eb 10239
231fd58a 10240 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10241 fp->eth_q_stats.driver_xoff++,
555f6c78 10242 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10243 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10244 return NETDEV_TX_BUSY;
10245 }
10246
755735eb
EG
10247 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10248 " gso type %x xmit_type %x\n",
10249 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10250 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10251
632da4d6 10252#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10253 /* First, check if we need to linearize the skb
755735eb
EG
10254 (due to FW restrictions) */
10255 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10256 /* Statistics of linearization */
10257 bp->lin_cnt++;
10258 if (skb_linearize(skb) != 0) {
10259 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10260 "silently dropping this SKB\n");
10261 dev_kfree_skb_any(skb);
da5a662a 10262 return NETDEV_TX_OK;
755735eb
EG
10263 }
10264 }
632da4d6 10265#endif
755735eb 10266
a2fbb9ea 10267 /*
755735eb 10268 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10269 then for TSO or xsum we have a parsing info BD,
755735eb 10270 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10271 (don't forget to mark the last one as last,
10272 and to unmap only AFTER you write to the BD ...)
755735eb 10273 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10274 */
10275
10276 pkt_prod = fp->tx_pkt_prod++;
755735eb 10277 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10278
755735eb 10279 /* get a tx_buf and first BD */
a2fbb9ea
ET
10280 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10281 tx_bd = &fp->tx_desc_ring[bd_prod];
10282
10283 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10284 tx_bd->general_data = (UNICAST_ADDRESS <<
10285 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10286 /* header nbd */
10287 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10288
755735eb
EG
10289 /* remember the first BD of the packet */
10290 tx_buf->first_bd = fp->tx_bd_prod;
10291 tx_buf->skb = skb;
a2fbb9ea
ET
10292
10293 DP(NETIF_MSG_TX_QUEUED,
10294 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10295 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10296
0c6671b0
EG
10297#ifdef BCM_VLAN
10298 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10299 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10300 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10301 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10302 vlan_off += 4;
10303 } else
0c6671b0 10304#endif
755735eb 10305 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10306
755735eb 10307 if (xmit_type) {
755735eb 10308 /* turn on parsing and get a BD */
a2fbb9ea
ET
10309 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10310 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10311
10312 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10313 }
10314
10315 if (xmit_type & XMIT_CSUM) {
10316 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10317
10318 /* for now NS flag is not used in Linux */
755735eb 10319 pbd->global_data = (hlen |
96fc1784 10320 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 10321 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10322
755735eb
EG
10323 pbd->ip_hlen = (skb_transport_header(skb) -
10324 skb_network_header(skb)) / 2;
10325
10326 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10327
755735eb
EG
10328 pbd->total_hlen = cpu_to_le16(hlen);
10329 hlen = hlen*2 - vlan_off;
a2fbb9ea 10330
755735eb
EG
10331 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10332
10333 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10334 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10335 ETH_TX_BD_FLAGS_IP_CSUM;
10336 else
10337 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10338
10339 if (xmit_type & XMIT_CSUM_TCP) {
10340 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10341
10342 } else {
10343 s8 fix = SKB_CS_OFF(skb); /* signed! */
10344
a2fbb9ea 10345 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10346 pbd->cs_offset = fix / 2;
a2fbb9ea 10347
755735eb
EG
10348 DP(NETIF_MSG_TX_QUEUED,
10349 "hlen %d offset %d fix %d csum before fix %x\n",
10350 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10351 SKB_CS(skb));
10352
10353 /* HW bug: fixup the CSUM */
10354 pbd->tcp_pseudo_csum =
10355 bnx2x_csum_fix(skb_transport_header(skb),
10356 SKB_CS(skb), fix);
10357
10358 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10359 pbd->tcp_pseudo_csum);
10360 }
a2fbb9ea
ET
10361 }
10362
10363 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10364 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10365
10366 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10367 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10368 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10369 tx_bd->nbd = cpu_to_le16(nbd);
10370 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10371
10372 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10373 " nbytes %d flags %x vlan %x\n",
10374 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10375 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10376 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10377
755735eb 10378 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10379
10380 DP(NETIF_MSG_TX_QUEUED,
10381 "TSO packet len %d hlen %d total len %d tso size %d\n",
10382 skb->len, hlen, skb_headlen(skb),
10383 skb_shinfo(skb)->gso_size);
10384
10385 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10386
755735eb
EG
10387 if (unlikely(skb_headlen(skb) > hlen))
10388 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10389 bd_prod, ++nbd);
a2fbb9ea
ET
10390
10391 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10392 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10393 pbd->tcp_flags = pbd_tcp_flags(skb);
10394
10395 if (xmit_type & XMIT_GSO_V4) {
10396 pbd->ip_id = swab16(ip_hdr(skb)->id);
10397 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10398 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10399 ip_hdr(skb)->daddr,
10400 0, IPPROTO_TCP, 0));
755735eb
EG
10401
10402 } else
10403 pbd->tcp_pseudo_csum =
10404 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10405 &ipv6_hdr(skb)->daddr,
10406 0, IPPROTO_TCP, 0));
10407
a2fbb9ea
ET
10408 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10409 }
10410
755735eb
EG
10411 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10412 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10413
755735eb
EG
10414 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10415 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10416
755735eb
EG
10417 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10418 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10419
755735eb
EG
10420 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10421 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10422 tx_bd->nbytes = cpu_to_le16(frag->size);
10423 tx_bd->vlan = cpu_to_le16(pkt_prod);
10424 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10425
755735eb
EG
10426 DP(NETIF_MSG_TX_QUEUED,
10427 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10428 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10429 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10430 }
10431
755735eb 10432 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10433 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10434
10435 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10436 tx_bd, tx_bd->bd_flags.as_bitfield);
10437
a2fbb9ea
ET
10438 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10439
755735eb 10440 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10441 * if the packet contains or ends with it
10442 */
10443 if (TX_BD_POFF(bd_prod) < nbd)
10444 nbd++;
10445
10446 if (pbd)
10447 DP(NETIF_MSG_TX_QUEUED,
10448 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10449 " tcp_flags %x xsum %x seq %u hlen %u\n",
10450 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10451 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10452 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10453
755735eb 10454 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10455
58f4c4cf
EG
10456 /*
10457 * Make sure that the BD data is updated before updating the producer
10458 * since FW might read the BD right after the producer is updated.
10459 * This is only applicable for weak-ordered memory model archs such
10460 * as IA-64. The following barrier is also mandatory since FW will
10461 * assumes packets must have BDs.
10462 */
10463 wmb();
10464
96fc1784
ET
10465 fp->hw_tx_prods->bds_prod =
10466 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 10467 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
10468 fp->hw_tx_prods->packets_prod =
10469 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 10470 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
10471
10472 mmiowb();
10473
755735eb 10474 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10475 dev->trans_start = jiffies;
10476
10477 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10478 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10479 if we put Tx into XOFF state. */
10480 smp_mb();
555f6c78 10481 netif_tx_stop_queue(txq);
de832a55 10482 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10483 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10484 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10485 }
10486 fp->tx_pkt++;
10487
10488 return NETDEV_TX_OK;
10489}
10490
bb2a0f7a 10491/* called with rtnl_lock */
a2fbb9ea
ET
10492static int bnx2x_open(struct net_device *dev)
10493{
10494 struct bnx2x *bp = netdev_priv(dev);
10495
6eccabb3
EG
10496 netif_carrier_off(dev);
10497
a2fbb9ea
ET
10498 bnx2x_set_power_state(bp, PCI_D0);
10499
bb2a0f7a 10500 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10501}
10502
bb2a0f7a 10503/* called with rtnl_lock */
a2fbb9ea
ET
10504static int bnx2x_close(struct net_device *dev)
10505{
a2fbb9ea
ET
10506 struct bnx2x *bp = netdev_priv(dev);
10507
10508 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10509 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10510 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10511 if (!CHIP_REV_IS_SLOW(bp))
10512 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10513
10514 return 0;
10515}
10516
34f80b04
EG
10517/* called with netif_tx_lock from set_multicast */
10518static void bnx2x_set_rx_mode(struct net_device *dev)
10519{
10520 struct bnx2x *bp = netdev_priv(dev);
10521 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10522 int port = BP_PORT(bp);
10523
10524 if (bp->state != BNX2X_STATE_OPEN) {
10525 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10526 return;
10527 }
10528
10529 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10530
10531 if (dev->flags & IFF_PROMISC)
10532 rx_mode = BNX2X_RX_MODE_PROMISC;
10533
10534 else if ((dev->flags & IFF_ALLMULTI) ||
10535 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10536 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10537
10538 else { /* some multicasts */
10539 if (CHIP_IS_E1(bp)) {
10540 int i, old, offset;
10541 struct dev_mc_list *mclist;
10542 struct mac_configuration_cmd *config =
10543 bnx2x_sp(bp, mcast_config);
10544
10545 for (i = 0, mclist = dev->mc_list;
10546 mclist && (i < dev->mc_count);
10547 i++, mclist = mclist->next) {
10548
10549 config->config_table[i].
10550 cam_entry.msb_mac_addr =
10551 swab16(*(u16 *)&mclist->dmi_addr[0]);
10552 config->config_table[i].
10553 cam_entry.middle_mac_addr =
10554 swab16(*(u16 *)&mclist->dmi_addr[2]);
10555 config->config_table[i].
10556 cam_entry.lsb_mac_addr =
10557 swab16(*(u16 *)&mclist->dmi_addr[4]);
10558 config->config_table[i].cam_entry.flags =
10559 cpu_to_le16(port);
10560 config->config_table[i].
10561 target_table_entry.flags = 0;
10562 config->config_table[i].
10563 target_table_entry.client_id = 0;
10564 config->config_table[i].
10565 target_table_entry.vlan_id = 0;
10566
10567 DP(NETIF_MSG_IFUP,
10568 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10569 config->config_table[i].
10570 cam_entry.msb_mac_addr,
10571 config->config_table[i].
10572 cam_entry.middle_mac_addr,
10573 config->config_table[i].
10574 cam_entry.lsb_mac_addr);
10575 }
8d9c5f34 10576 old = config->hdr.length;
34f80b04
EG
10577 if (old > i) {
10578 for (; i < old; i++) {
10579 if (CAM_IS_INVALID(config->
10580 config_table[i])) {
af246401 10581 /* already invalidated */
34f80b04
EG
10582 break;
10583 }
10584 /* invalidate */
10585 CAM_INVALIDATE(config->
10586 config_table[i]);
10587 }
10588 }
10589
10590 if (CHIP_REV_IS_SLOW(bp))
10591 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10592 else
10593 offset = BNX2X_MAX_MULTICAST*(1 + port);
10594
8d9c5f34 10595 config->hdr.length = i;
34f80b04 10596 config->hdr.offset = offset;
8d9c5f34 10597 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10598 config->hdr.reserved1 = 0;
10599
10600 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10601 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10602 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10603 0);
10604 } else { /* E1H */
10605 /* Accept one or more multicasts */
10606 struct dev_mc_list *mclist;
10607 u32 mc_filter[MC_HASH_SIZE];
10608 u32 crc, bit, regidx;
10609 int i;
10610
10611 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10612
10613 for (i = 0, mclist = dev->mc_list;
10614 mclist && (i < dev->mc_count);
10615 i++, mclist = mclist->next) {
10616
7c510e4b
JB
10617 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10618 mclist->dmi_addr);
34f80b04
EG
10619
10620 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10621 bit = (crc >> 24) & 0xff;
10622 regidx = bit >> 5;
10623 bit &= 0x1f;
10624 mc_filter[regidx] |= (1 << bit);
10625 }
10626
10627 for (i = 0; i < MC_HASH_SIZE; i++)
10628 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10629 mc_filter[i]);
10630 }
10631 }
10632
10633 bp->rx_mode = rx_mode;
10634 bnx2x_set_storm_rx_mode(bp);
10635}
10636
10637/* called with rtnl_lock */
a2fbb9ea
ET
10638static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10639{
10640 struct sockaddr *addr = p;
10641 struct bnx2x *bp = netdev_priv(dev);
10642
34f80b04 10643 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10644 return -EINVAL;
10645
10646 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10647 if (netif_running(dev)) {
10648 if (CHIP_IS_E1(bp))
3101c2bc 10649 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10650 else
3101c2bc 10651 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10652 }
a2fbb9ea
ET
10653
10654 return 0;
10655}
10656
c18487ee 10657/* called with rtnl_lock */
a2fbb9ea
ET
10658static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10659{
10660 struct mii_ioctl_data *data = if_mii(ifr);
10661 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10662 int port = BP_PORT(bp);
a2fbb9ea
ET
10663 int err;
10664
10665 switch (cmd) {
10666 case SIOCGMIIPHY:
34f80b04 10667 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10668
c14423fe 10669 /* fallthrough */
c18487ee 10670
a2fbb9ea 10671 case SIOCGMIIREG: {
c18487ee 10672 u16 mii_regval;
a2fbb9ea 10673
c18487ee
YR
10674 if (!netif_running(dev))
10675 return -EAGAIN;
a2fbb9ea 10676
34f80b04 10677 mutex_lock(&bp->port.phy_mutex);
3196a88a 10678 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10679 DEFAULT_PHY_DEV_ADDR,
10680 (data->reg_num & 0x1f), &mii_regval);
10681 data->val_out = mii_regval;
34f80b04 10682 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10683 return err;
10684 }
10685
10686 case SIOCSMIIREG:
10687 if (!capable(CAP_NET_ADMIN))
10688 return -EPERM;
10689
c18487ee
YR
10690 if (!netif_running(dev))
10691 return -EAGAIN;
10692
34f80b04 10693 mutex_lock(&bp->port.phy_mutex);
3196a88a 10694 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10695 DEFAULT_PHY_DEV_ADDR,
10696 (data->reg_num & 0x1f), data->val_in);
34f80b04 10697 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10698 return err;
10699
10700 default:
10701 /* do nothing */
10702 break;
10703 }
10704
10705 return -EOPNOTSUPP;
10706}
10707
34f80b04 10708/* called with rtnl_lock */
a2fbb9ea
ET
10709static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10710{
10711 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10712 int rc = 0;
a2fbb9ea
ET
10713
10714 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10715 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10716 return -EINVAL;
10717
10718 /* This does not race with packet allocation
c14423fe 10719 * because the actual alloc size is
a2fbb9ea
ET
10720 * only updated as part of load
10721 */
10722 dev->mtu = new_mtu;
10723
10724 if (netif_running(dev)) {
34f80b04
EG
10725 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10726 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10727 }
34f80b04
EG
10728
10729 return rc;
a2fbb9ea
ET
10730}
10731
10732static void bnx2x_tx_timeout(struct net_device *dev)
10733{
10734 struct bnx2x *bp = netdev_priv(dev);
10735
10736#ifdef BNX2X_STOP_ON_ERROR
10737 if (!bp->panic)
10738 bnx2x_panic();
10739#endif
10740 /* This allows the netif to be shutdown gracefully before resetting */
10741 schedule_work(&bp->reset_task);
10742}
10743
10744#ifdef BCM_VLAN
34f80b04 10745/* called with rtnl_lock */
a2fbb9ea
ET
10746static void bnx2x_vlan_rx_register(struct net_device *dev,
10747 struct vlan_group *vlgrp)
10748{
10749 struct bnx2x *bp = netdev_priv(dev);
10750
10751 bp->vlgrp = vlgrp;
0c6671b0
EG
10752
10753 /* Set flags according to the required capabilities */
10754 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10755
10756 if (dev->features & NETIF_F_HW_VLAN_TX)
10757 bp->flags |= HW_VLAN_TX_FLAG;
10758
10759 if (dev->features & NETIF_F_HW_VLAN_RX)
10760 bp->flags |= HW_VLAN_RX_FLAG;
10761
a2fbb9ea 10762 if (netif_running(dev))
49d66772 10763 bnx2x_set_client_config(bp);
a2fbb9ea 10764}
34f80b04 10765
a2fbb9ea
ET
10766#endif
10767
10768#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10769static void poll_bnx2x(struct net_device *dev)
10770{
10771 struct bnx2x *bp = netdev_priv(dev);
10772
10773 disable_irq(bp->pdev->irq);
10774 bnx2x_interrupt(bp->pdev->irq, dev);
10775 enable_irq(bp->pdev->irq);
10776}
10777#endif
10778
c64213cd
SH
10779static const struct net_device_ops bnx2x_netdev_ops = {
10780 .ndo_open = bnx2x_open,
10781 .ndo_stop = bnx2x_close,
10782 .ndo_start_xmit = bnx2x_start_xmit,
10783 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10784 .ndo_set_mac_address = bnx2x_change_mac_addr,
10785 .ndo_validate_addr = eth_validate_addr,
10786 .ndo_do_ioctl = bnx2x_ioctl,
10787 .ndo_change_mtu = bnx2x_change_mtu,
10788 .ndo_tx_timeout = bnx2x_tx_timeout,
10789#ifdef BCM_VLAN
10790 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10791#endif
10792#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10793 .ndo_poll_controller = poll_bnx2x,
10794#endif
10795};
10796
10797
34f80b04
EG
10798static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10799 struct net_device *dev)
a2fbb9ea
ET
10800{
10801 struct bnx2x *bp;
10802 int rc;
10803
10804 SET_NETDEV_DEV(dev, &pdev->dev);
10805 bp = netdev_priv(dev);
10806
34f80b04
EG
10807 bp->dev = dev;
10808 bp->pdev = pdev;
a2fbb9ea 10809 bp->flags = 0;
34f80b04 10810 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10811
10812 rc = pci_enable_device(pdev);
10813 if (rc) {
10814 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10815 goto err_out;
10816 }
10817
10818 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10819 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10820 " aborting\n");
10821 rc = -ENODEV;
10822 goto err_out_disable;
10823 }
10824
10825 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10826 printk(KERN_ERR PFX "Cannot find second PCI device"
10827 " base address, aborting\n");
10828 rc = -ENODEV;
10829 goto err_out_disable;
10830 }
10831
34f80b04
EG
10832 if (atomic_read(&pdev->enable_cnt) == 1) {
10833 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10834 if (rc) {
10835 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10836 " aborting\n");
10837 goto err_out_disable;
10838 }
a2fbb9ea 10839
34f80b04
EG
10840 pci_set_master(pdev);
10841 pci_save_state(pdev);
10842 }
a2fbb9ea
ET
10843
10844 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10845 if (bp->pm_cap == 0) {
10846 printk(KERN_ERR PFX "Cannot find power management"
10847 " capability, aborting\n");
10848 rc = -EIO;
10849 goto err_out_release;
10850 }
10851
10852 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10853 if (bp->pcie_cap == 0) {
10854 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10855 " aborting\n");
10856 rc = -EIO;
10857 goto err_out_release;
10858 }
10859
10860 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10861 bp->flags |= USING_DAC_FLAG;
10862 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10863 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10864 " failed, aborting\n");
10865 rc = -EIO;
10866 goto err_out_release;
10867 }
10868
10869 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10870 printk(KERN_ERR PFX "System does not support DMA,"
10871 " aborting\n");
10872 rc = -EIO;
10873 goto err_out_release;
10874 }
10875
34f80b04
EG
10876 dev->mem_start = pci_resource_start(pdev, 0);
10877 dev->base_addr = dev->mem_start;
10878 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10879
10880 dev->irq = pdev->irq;
10881
275f165f 10882 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10883 if (!bp->regview) {
10884 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10885 rc = -ENOMEM;
10886 goto err_out_release;
10887 }
10888
34f80b04
EG
10889 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10890 min_t(u64, BNX2X_DB_SIZE,
10891 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10892 if (!bp->doorbells) {
10893 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10894 rc = -ENOMEM;
10895 goto err_out_unmap;
10896 }
10897
10898 bnx2x_set_power_state(bp, PCI_D0);
10899
34f80b04
EG
10900 /* clean indirect addresses */
10901 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10902 PCICFG_VENDOR_ID_OFFSET);
10903 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10904 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10905 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10906 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10907
34f80b04 10908 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10909
c64213cd 10910 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10911 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10912 dev->features |= NETIF_F_SG;
10913 dev->features |= NETIF_F_HW_CSUM;
10914 if (bp->flags & USING_DAC_FLAG)
10915 dev->features |= NETIF_F_HIGHDMA;
10916#ifdef BCM_VLAN
10917 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10918 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10919#endif
10920 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10921 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10922
10923 return 0;
10924
10925err_out_unmap:
10926 if (bp->regview) {
10927 iounmap(bp->regview);
10928 bp->regview = NULL;
10929 }
a2fbb9ea
ET
10930 if (bp->doorbells) {
10931 iounmap(bp->doorbells);
10932 bp->doorbells = NULL;
10933 }
10934
10935err_out_release:
34f80b04
EG
10936 if (atomic_read(&pdev->enable_cnt) == 1)
10937 pci_release_regions(pdev);
a2fbb9ea
ET
10938
10939err_out_disable:
10940 pci_disable_device(pdev);
10941 pci_set_drvdata(pdev, NULL);
10942
10943err_out:
10944 return rc;
10945}
10946
25047950
ET
10947static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10948{
10949 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10950
10951 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10952 return val;
10953}
10954
10955/* return value of 1=2.5GHz 2=5GHz */
10956static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10957{
10958 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10959
10960 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10961 return val;
10962}
10963
a2fbb9ea
ET
10964static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10965 const struct pci_device_id *ent)
10966{
10967 static int version_printed;
10968 struct net_device *dev = NULL;
10969 struct bnx2x *bp;
25047950 10970 int rc;
a2fbb9ea
ET
10971
10972 if (version_printed++ == 0)
10973 printk(KERN_INFO "%s", version);
10974
10975 /* dev zeroed in init_etherdev */
555f6c78 10976 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10977 if (!dev) {
10978 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10979 return -ENOMEM;
34f80b04 10980 }
a2fbb9ea 10981
a2fbb9ea
ET
10982 bp = netdev_priv(dev);
10983 bp->msglevel = debug;
10984
34f80b04 10985 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10986 if (rc < 0) {
10987 free_netdev(dev);
10988 return rc;
10989 }
10990
a2fbb9ea
ET
10991 pci_set_drvdata(pdev, dev);
10992
34f80b04 10993 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10994 if (rc)
10995 goto init_one_exit;
10996
10997 rc = register_netdev(dev);
34f80b04 10998 if (rc) {
693fc0d1 10999 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11000 goto init_one_exit;
11001 }
11002
25047950 11003 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11004 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11005 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11006 bnx2x_get_pcie_width(bp),
11007 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11008 dev->base_addr, bp->pdev->irq);
e174961c 11009 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 11010 return 0;
34f80b04
EG
11011
11012init_one_exit:
11013 if (bp->regview)
11014 iounmap(bp->regview);
11015
11016 if (bp->doorbells)
11017 iounmap(bp->doorbells);
11018
11019 free_netdev(dev);
11020
11021 if (atomic_read(&pdev->enable_cnt) == 1)
11022 pci_release_regions(pdev);
11023
11024 pci_disable_device(pdev);
11025 pci_set_drvdata(pdev, NULL);
11026
11027 return rc;
a2fbb9ea
ET
11028}
11029
11030static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11031{
11032 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11033 struct bnx2x *bp;
11034
11035 if (!dev) {
228241eb
ET
11036 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11037 return;
11038 }
228241eb 11039 bp = netdev_priv(dev);
a2fbb9ea 11040
a2fbb9ea
ET
11041 unregister_netdev(dev);
11042
11043 if (bp->regview)
11044 iounmap(bp->regview);
11045
11046 if (bp->doorbells)
11047 iounmap(bp->doorbells);
11048
11049 free_netdev(dev);
34f80b04
EG
11050
11051 if (atomic_read(&pdev->enable_cnt) == 1)
11052 pci_release_regions(pdev);
11053
a2fbb9ea
ET
11054 pci_disable_device(pdev);
11055 pci_set_drvdata(pdev, NULL);
11056}
11057
11058static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11059{
11060 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11061 struct bnx2x *bp;
11062
34f80b04
EG
11063 if (!dev) {
11064 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11065 return -ENODEV;
11066 }
11067 bp = netdev_priv(dev);
a2fbb9ea 11068
34f80b04 11069 rtnl_lock();
a2fbb9ea 11070
34f80b04 11071 pci_save_state(pdev);
228241eb 11072
34f80b04
EG
11073 if (!netif_running(dev)) {
11074 rtnl_unlock();
11075 return 0;
11076 }
a2fbb9ea
ET
11077
11078 netif_device_detach(dev);
a2fbb9ea 11079
da5a662a 11080 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11081
a2fbb9ea 11082 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11083
34f80b04
EG
11084 rtnl_unlock();
11085
a2fbb9ea
ET
11086 return 0;
11087}
11088
11089static int bnx2x_resume(struct pci_dev *pdev)
11090{
11091 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11092 struct bnx2x *bp;
a2fbb9ea
ET
11093 int rc;
11094
228241eb
ET
11095 if (!dev) {
11096 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11097 return -ENODEV;
11098 }
228241eb 11099 bp = netdev_priv(dev);
a2fbb9ea 11100
34f80b04
EG
11101 rtnl_lock();
11102
228241eb 11103 pci_restore_state(pdev);
34f80b04
EG
11104
11105 if (!netif_running(dev)) {
11106 rtnl_unlock();
11107 return 0;
11108 }
11109
a2fbb9ea
ET
11110 bnx2x_set_power_state(bp, PCI_D0);
11111 netif_device_attach(dev);
11112
da5a662a 11113 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11114
34f80b04
EG
11115 rtnl_unlock();
11116
11117 return rc;
a2fbb9ea
ET
11118}
11119
f8ef6e44
YG
11120static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11121{
11122 int i;
11123
11124 bp->state = BNX2X_STATE_ERROR;
11125
11126 bp->rx_mode = BNX2X_RX_MODE_NONE;
11127
11128 bnx2x_netif_stop(bp, 0);
11129
11130 del_timer_sync(&bp->timer);
11131 bp->stats_state = STATS_STATE_DISABLED;
11132 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11133
11134 /* Release IRQs */
11135 bnx2x_free_irq(bp);
11136
11137 if (CHIP_IS_E1(bp)) {
11138 struct mac_configuration_cmd *config =
11139 bnx2x_sp(bp, mcast_config);
11140
8d9c5f34 11141 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11142 CAM_INVALIDATE(config->config_table[i]);
11143 }
11144
11145 /* Free SKBs, SGEs, TPA pool and driver internals */
11146 bnx2x_free_skbs(bp);
555f6c78 11147 for_each_rx_queue(bp, i)
f8ef6e44 11148 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11149 for_each_rx_queue(bp, i)
7cde1c8b 11150 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11151 bnx2x_free_mem(bp);
11152
11153 bp->state = BNX2X_STATE_CLOSED;
11154
11155 netif_carrier_off(bp->dev);
11156
11157 return 0;
11158}
11159
11160static void bnx2x_eeh_recover(struct bnx2x *bp)
11161{
11162 u32 val;
11163
11164 mutex_init(&bp->port.phy_mutex);
11165
11166 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11167 bp->link_params.shmem_base = bp->common.shmem_base;
11168 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11169
11170 if (!bp->common.shmem_base ||
11171 (bp->common.shmem_base < 0xA0000) ||
11172 (bp->common.shmem_base >= 0xC0000)) {
11173 BNX2X_DEV_INFO("MCP not active\n");
11174 bp->flags |= NO_MCP_FLAG;
11175 return;
11176 }
11177
11178 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11179 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11180 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11181 BNX2X_ERR("BAD MCP validity signature\n");
11182
11183 if (!BP_NOMCP(bp)) {
11184 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11185 & DRV_MSG_SEQ_NUMBER_MASK);
11186 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11187 }
11188}
11189
493adb1f
WX
11190/**
11191 * bnx2x_io_error_detected - called when PCI error is detected
11192 * @pdev: Pointer to PCI device
11193 * @state: The current pci connection state
11194 *
11195 * This function is called after a PCI bus error affecting
11196 * this device has been detected.
11197 */
11198static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11199 pci_channel_state_t state)
11200{
11201 struct net_device *dev = pci_get_drvdata(pdev);
11202 struct bnx2x *bp = netdev_priv(dev);
11203
11204 rtnl_lock();
11205
11206 netif_device_detach(dev);
11207
11208 if (netif_running(dev))
f8ef6e44 11209 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11210
11211 pci_disable_device(pdev);
11212
11213 rtnl_unlock();
11214
11215 /* Request a slot reset */
11216 return PCI_ERS_RESULT_NEED_RESET;
11217}
11218
11219/**
11220 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11221 * @pdev: Pointer to PCI device
11222 *
11223 * Restart the card from scratch, as if from a cold-boot.
11224 */
11225static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11226{
11227 struct net_device *dev = pci_get_drvdata(pdev);
11228 struct bnx2x *bp = netdev_priv(dev);
11229
11230 rtnl_lock();
11231
11232 if (pci_enable_device(pdev)) {
11233 dev_err(&pdev->dev,
11234 "Cannot re-enable PCI device after reset\n");
11235 rtnl_unlock();
11236 return PCI_ERS_RESULT_DISCONNECT;
11237 }
11238
11239 pci_set_master(pdev);
11240 pci_restore_state(pdev);
11241
11242 if (netif_running(dev))
11243 bnx2x_set_power_state(bp, PCI_D0);
11244
11245 rtnl_unlock();
11246
11247 return PCI_ERS_RESULT_RECOVERED;
11248}
11249
11250/**
11251 * bnx2x_io_resume - called when traffic can start flowing again
11252 * @pdev: Pointer to PCI device
11253 *
11254 * This callback is called when the error recovery driver tells us that
11255 * its OK to resume normal operation.
11256 */
11257static void bnx2x_io_resume(struct pci_dev *pdev)
11258{
11259 struct net_device *dev = pci_get_drvdata(pdev);
11260 struct bnx2x *bp = netdev_priv(dev);
11261
11262 rtnl_lock();
11263
f8ef6e44
YG
11264 bnx2x_eeh_recover(bp);
11265
493adb1f 11266 if (netif_running(dev))
f8ef6e44 11267 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11268
11269 netif_device_attach(dev);
11270
11271 rtnl_unlock();
11272}
11273
11274static struct pci_error_handlers bnx2x_err_handler = {
11275 .error_detected = bnx2x_io_error_detected,
11276 .slot_reset = bnx2x_io_slot_reset,
11277 .resume = bnx2x_io_resume,
11278};
11279
a2fbb9ea 11280static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11281 .name = DRV_MODULE_NAME,
11282 .id_table = bnx2x_pci_tbl,
11283 .probe = bnx2x_init_one,
11284 .remove = __devexit_p(bnx2x_remove_one),
11285 .suspend = bnx2x_suspend,
11286 .resume = bnx2x_resume,
11287 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11288};
11289
11290static int __init bnx2x_init(void)
11291{
1cf167f2
EG
11292 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11293 if (bnx2x_wq == NULL) {
11294 printk(KERN_ERR PFX "Cannot create workqueue\n");
11295 return -ENOMEM;
11296 }
11297
a2fbb9ea
ET
11298 return pci_register_driver(&bnx2x_pci_driver);
11299}
11300
11301static void __exit bnx2x_cleanup(void)
11302{
11303 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11304
11305 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11306}
11307
11308module_init(bnx2x_init);
11309module_exit(bnx2x_cleanup);
11310