]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Adding XAUI CL73 autoneg support
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
573dd788
EG
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
9898f86d 104static int poll;
a2fbb9ea 105module_param(poll, int, 0);
9898f86d 106MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
107
108static int mrrs = -1;
109module_param(mrrs, int, 0);
110MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
9898f86d 112static int debug;
a2fbb9ea 113module_param(debug, int, 0);
9898f86d
EG
114MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 117
1cf167f2 118static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
119
120enum bnx2x_board_type {
121 BCM57710 = 0,
34f80b04
EG
122 BCM57711 = 1,
123 BCM57711E = 2,
a2fbb9ea
ET
124};
125
34f80b04 126/* indexed by board_type, above */
53a10565 127static struct {
a2fbb9ea
ET
128 char *name;
129} board_info[] __devinitdata = {
34f80b04
EG
130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
133};
134
34f80b04 135
a2fbb9ea
ET
136static const struct pci_device_id bnx2x_pci_tbl[] = {
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
155static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea
ET
174
175static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
183static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184 int idx)
185{
186 u32 cmd_offset;
187 int i;
188
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
ad8d3948
EG
193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
195 }
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
197}
198
ad8d3948
EG
199void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200 u32 len32)
a2fbb9ea 201{
ad8d3948 202 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
204 int cnt = 200;
205
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212 return;
213 }
214
215 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
216
217 memset(dmae, 0, sizeof(struct dmae_command));
218
219 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222#ifdef __BIG_ENDIAN
223 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224#else
225 DMAE_CMD_ENDIANITY_DW_SWAP |
226#endif
34f80b04
EG
227 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
229 dmae->src_addr_lo = U64_LO(dma_addr);
230 dmae->src_addr_hi = U64_HI(dma_addr);
231 dmae->dst_addr_lo = dst_addr >> 2;
232 dmae->dst_addr_hi = 0;
233 dmae->len = len32;
234 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 236 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 237
c3eefaf6 238 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
239 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 245 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
246 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
248
249 *wb_comp = 0;
250
34f80b04 251 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
252
253 udelay(5);
ad8d3948
EG
254
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
ad8d3948 258 if (!cnt) {
c3eefaf6 259 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
260 break;
261 }
ad8d3948 262 cnt--;
12469401
YG
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
265 msleep(100);
266 else
267 udelay(5);
a2fbb9ea 268 }
ad8d3948
EG
269
270 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
271}
272
c18487ee 273void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 274{
ad8d3948 275 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
277 int cnt = 200;
278
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
281 int i;
282
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287 return;
288 }
289
290 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
291
292 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293 memset(dmae, 0, sizeof(struct dmae_command));
294
295 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298#ifdef __BIG_ENDIAN
299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
300#else
301 DMAE_CMD_ENDIANITY_DW_SWAP |
302#endif
34f80b04
EG
303 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
305 dmae->src_addr_lo = src_addr >> 2;
306 dmae->src_addr_hi = 0;
307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309 dmae->len = len32;
310 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 312 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 313
c3eefaf6 314 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
315 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
321
322 *wb_comp = 0;
323
34f80b04 324 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
325
326 udelay(5);
ad8d3948
EG
327
328 while (*wb_comp != DMAE_COMP_VAL) {
329
ad8d3948 330 if (!cnt) {
c3eefaf6 331 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
332 break;
333 }
ad8d3948 334 cnt--;
12469401
YG
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
337 msleep(100);
338 else
339 udelay(5);
a2fbb9ea 340 }
ad8d3948 341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
344
345 mutex_unlock(&bp->dmae_mutex);
346}
347
348/* used only for slowpath so not inlined */
349static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350{
351 u32 wb_write[2];
352
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 356}
a2fbb9ea 357
ad8d3948
EG
358#ifdef USE_WB_RD
359static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360{
361 u32 wb_data[2];
362
363 REG_RD_DMAE(bp, reg, wb_data, 2);
364
365 return HILO_U64(wb_data[0], wb_data[1]);
366}
367#endif
368
a2fbb9ea
ET
369static int bnx2x_mc_assert(struct bnx2x *bp)
370{
a2fbb9ea 371 char last_idx;
34f80b04
EG
372 int i, rc = 0;
373 u32 row0, row1, row2, row3;
374
375 /* XSTORM */
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
378 if (last_idx)
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
397 rc++;
398 } else {
399 break;
400 }
401 }
402
403 /* TSTORM */
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
406 if (last_idx)
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
425 rc++;
426 } else {
427 break;
428 }
429 }
430
431 /* CSTORM */
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
434 if (last_idx)
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
453 rc++;
454 } else {
455 break;
456 }
457 }
458
459 /* USTORM */
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
462 if (last_idx)
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
481 rc++;
482 } else {
483 break;
a2fbb9ea
ET
484 }
485 }
34f80b04 486
a2fbb9ea
ET
487 return rc;
488}
c14423fe 489
a2fbb9ea
ET
490static void bnx2x_fw_dump(struct bnx2x *bp)
491{
492 u32 mark, offset;
4781bfad 493 __be32 data[9];
a2fbb9ea
ET
494 int word;
495
496 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 497 mark = ((mark + 0x3) & ~0x3);
ad361c98 498 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 499
ad361c98 500 printk(KERN_ERR PFX);
a2fbb9ea
ET
501 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502 for (word = 0; word < 8; word++)
503 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504 offset + 4*word));
505 data[8] = 0x0;
49d66772 506 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
507 }
508 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511 offset + 4*word));
512 data[8] = 0x0;
49d66772 513 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 514 }
ad361c98 515 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
516}
517
518static void bnx2x_panic_dump(struct bnx2x *bp)
519{
520 int i;
521 u16 j, start, end;
522
66e855f3
YG
523 bp->stats_state = STATS_STATE_DISABLED;
524 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
a2fbb9ea
ET
526 BNX2X_ERR("begin crash dump -----------------\n");
527
8440d2b6
EG
528 /* Indices */
529 /* Common */
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536 /* Rx */
537 for_each_rx_queue(bp, i) {
a2fbb9ea 538 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 539
c3eefaf6 540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 543 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
544 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp->rx_sge_prod, fp->last_max_sge,
549 le16_to_cpu(fp->fp_u_idx),
550 fp->status_blk->u_status_block.status_block_index);
551 }
a2fbb9ea 552
8440d2b6
EG
553 /* Tx */
554 for_each_tx_queue(bp, i) {
555 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 556
c3eefaf6 557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 563 fp->status_blk->c_status_block.status_block_index,
ca00392c 564 fp->tx_db.data.prod);
8440d2b6 565 }
a2fbb9ea 566
8440d2b6
EG
567 /* Rings */
568 /* Rx */
569 for_each_rx_queue(bp, i) {
570 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
571
572 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 574 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
575 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
c3eefaf6
EG
578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
580 }
581
3196a88a
EG
582 start = RX_SGE(fp->rx_sge_prod);
583 end = RX_SGE(fp->last_max_sge);
8440d2b6 584 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
585 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
c3eefaf6
EG
588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
590 }
591
a2fbb9ea
ET
592 start = RCQ_BD(fp->rx_comp_cons - 10);
593 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 594 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
595 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
c3eefaf6
EG
597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
599 }
600 }
601
8440d2b6
EG
602 /* Tx */
603 for_each_tx_queue(bp, i) {
604 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608 for (j = start; j != end; j = TX_BD(j + 1)) {
609 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
c3eefaf6
EG
611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
613 }
614
615 start = TX_BD(fp->tx_bd_cons - 10);
616 end = TX_BD(fp->tx_bd_cons + 254);
617 for (j = start; j != end; j = TX_BD(j + 1)) {
618 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
c3eefaf6
EG
620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
622 }
623 }
a2fbb9ea 624
34f80b04 625 bnx2x_fw_dump(bp);
a2fbb9ea
ET
626 bnx2x_mc_assert(bp);
627 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
628}
629
615f8fd9 630static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 631{
34f80b04 632 int port = BP_PORT(bp);
a2fbb9ea
ET
633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 636 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
637
638 if (msix) {
8badd27a
EG
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
641 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
643 } else if (msi) {
644 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
648 } else {
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
651 HC_CONFIG_0_REG_INT_LINE_EN_0 |
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 653
8badd27a
EG
654 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655 val, port, addr);
615f8fd9
ET
656
657 REG_WR(bp, addr, val);
658
a2fbb9ea
ET
659 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660 }
661
8badd27a
EG
662 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
664
665 REG_WR(bp, addr, val);
37dbbf32
EG
666 /*
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
668 */
669 mmiowb();
670 barrier();
34f80b04
EG
671
672 if (CHIP_IS_E1H(bp)) {
673 /* init leading/trailing edge */
674 if (IS_E1HMF(bp)) {
8badd27a 675 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 676 if (bp->port.pmf)
4acac6a5
EG
677 /* enable nig and gpio3 attention */
678 val |= 0x1100;
34f80b04
EG
679 } else
680 val = 0xffff;
681
682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684 }
37dbbf32
EG
685
686 /* Make sure that interrupts are indeed enabled from here on */
687 mmiowb();
a2fbb9ea
ET
688}
689
615f8fd9 690static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 691{
34f80b04 692 int port = BP_PORT(bp);
a2fbb9ea
ET
693 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694 u32 val = REG_RD(bp, addr);
695
696 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698 HC_CONFIG_0_REG_INT_LINE_EN_0 |
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702 val, port, addr);
703
8badd27a
EG
704 /* flush all outstanding writes */
705 mmiowb();
706
a2fbb9ea
ET
707 REG_WR(bp, addr, val);
708 if (REG_RD(bp, addr) != val)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 710
a2fbb9ea
ET
711}
712
f8ef6e44 713static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 714{
a2fbb9ea 715 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 716 int i, offset;
a2fbb9ea 717
34f80b04 718 /* disable interrupt handling */
a2fbb9ea 719 atomic_inc(&bp->intr_sem);
e1510706
EG
720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
f8ef6e44
YG
722 if (disable_hw)
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp);
a2fbb9ea
ET
725
726 /* make sure all ISRs are done */
727 if (msix) {
8badd27a
EG
728 synchronize_irq(bp->msix_table[0].vector);
729 offset = 1;
a2fbb9ea 730 for_each_queue(bp, i)
8badd27a 731 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
732 } else
733 synchronize_irq(bp->pdev->irq);
734
735 /* make sure sp_task is not running */
1cf167f2
EG
736 cancel_delayed_work(&bp->sp_task);
737 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
738}
739
34f80b04 740/* fast path */
a2fbb9ea
ET
741
742/*
34f80b04 743 * General service functions
a2fbb9ea
ET
744 */
745
34f80b04 746static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
747 u8 storm, u16 index, u8 op, u8 update)
748{
5c862848
EG
749 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
751 struct igu_ack_register igu_ack;
752
753 igu_ack.status_block_index = index;
754 igu_ack.sb_id_and_flags =
34f80b04 755 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
756 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
5c862848
EG
760 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32 *)&igu_ack), hc_addr);
762 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
763
764 /* Make sure that ACK is written */
765 mmiowb();
766 barrier();
a2fbb9ea
ET
767}
768
769static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770{
771 struct host_status_block *fpsb = fp->status_blk;
772 u16 rc = 0;
773
774 barrier(); /* status block is written to by the chip */
775 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777 rc |= 1;
778 }
779 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781 rc |= 2;
782 }
783 return rc;
784}
785
a2fbb9ea
ET
786static u16 bnx2x_ack_int(struct bnx2x *bp)
787{
5c862848
EG
788 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789 COMMAND_REG_SIMD_MASK);
790 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 791
5c862848
EG
792 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793 result, hc_addr);
a2fbb9ea 794
a2fbb9ea
ET
795 return result;
796}
797
798
799/*
800 * fast path service functions
801 */
802
e8b5fc51
VZ
803static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804{
805 /* Tell compiler that consumer and producer can change */
806 barrier();
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
808}
809
a2fbb9ea
ET
810/* free skb in the packet ring at pos idx
811 * return idx of last bd freed
812 */
813static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814 u16 idx)
815{
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 819 struct sk_buff *skb = tx_buf->skb;
34f80b04 820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
821 int nbd;
822
823 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
824 idx, tx_buf, skb);
825
826 /* unmap first bd */
827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 831
ca00392c 832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 833#ifdef BNX2X_STOP_ON_ERROR
ca00392c 834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 835 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
836 bnx2x_panic();
837 }
838#endif
ca00392c 839 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 840
ca00392c
EG
841 /* Get the next bd */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 843
ca00392c
EG
844 /* Skip a parse bd... */
845 --nbd;
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850 --nbd;
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
852 }
853
854 /* now free frags */
855 while (nbd > 0) {
856
857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
861 if (--nbd)
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863 }
864
865 /* release skb */
53e5e96e 866 WARN_ON(!skb);
ca00392c 867 dev_kfree_skb_any(skb);
a2fbb9ea
ET
868 tx_buf->first_bd = 0;
869 tx_buf->skb = NULL;
870
34f80b04 871 return new_cons;
a2fbb9ea
ET
872}
873
34f80b04 874static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 875{
34f80b04
EG
876 s16 used;
877 u16 prod;
878 u16 cons;
a2fbb9ea 879
34f80b04 880 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
881 prod = fp->tx_bd_prod;
882 cons = fp->tx_bd_cons;
883
34f80b04
EG
884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 887
34f80b04 888#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
889 WARN_ON(used < 0);
890 WARN_ON(used > fp->bp->tx_ring_size);
891 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 892#endif
a2fbb9ea 893
34f80b04 894 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
895}
896
7961f791 897static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
898{
899 struct bnx2x *bp = fp->bp;
555f6c78 900 struct netdev_queue *txq;
a2fbb9ea
ET
901 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902 int done = 0;
903
904#ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
906 return;
907#endif
908
ca00392c 909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911 sw_cons = fp->tx_pkt_cons;
912
913 while (sw_cons != hw_cons) {
914 u16 pkt_cons;
915
916 pkt_cons = TX_BD(sw_cons);
917
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
34f80b04 920 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
921 hw_cons, sw_cons, pkt_cons);
922
34f80b04 923/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
924 rmb();
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926 }
927*/
928 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929 sw_cons++;
930 done++;
a2fbb9ea
ET
931 }
932
933 fp->tx_pkt_cons = sw_cons;
934 fp->tx_bd_cons = bd_cons;
935
a2fbb9ea 936 /* TBD need a thresh? */
555f6c78 937 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 938
6044735d
EG
939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
943 * forever.
944 */
945 smp_mb();
946
555f6c78 947 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 948 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 950 netif_tx_wake_queue(txq);
a2fbb9ea
ET
951 }
952}
953
3196a88a 954
a2fbb9ea
ET
955static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956 union eth_rx_cqe *rr_cqe)
957{
958 struct bnx2x *bp = fp->bp;
959 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
34f80b04 962 DP(BNX2X_MSG_SP,
a2fbb9ea 963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 964 fp->index, cid, command, bp->state,
34f80b04 965 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
966
967 bp->spq_left++;
968
0626b899 969 if (fp->index) {
a2fbb9ea
ET
970 switch (command | fp->state) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972 BNX2X_FP_STATE_OPENING):
973 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974 cid);
975 fp->state = BNX2X_FP_STATE_OPEN;
976 break;
977
978 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980 cid);
981 fp->state = BNX2X_FP_STATE_HALTED;
982 break;
983
984 default:
34f80b04
EG
985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command, fp->state);
987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990 return;
991 }
c14423fe 992
a2fbb9ea
ET
993 switch (command | bp->state) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996 bp->state = BNX2X_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
a2fbb9ea 1005 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1006 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1007 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1008 break;
1009
3196a88a 1010
a2fbb9ea 1011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1013 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1014 bp->set_mac_pending = 0;
a2fbb9ea
ET
1015 break;
1016
49d66772 1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1020 break;
1021
a2fbb9ea 1022 default:
34f80b04 1023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1024 command, bp->state);
34f80b04 1025 break;
a2fbb9ea 1026 }
34f80b04 1027 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1028}
1029
7a9b2557
VZ
1030static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1032{
1033 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034 struct page *page = sw_buf->page;
1035 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037 /* Skip "next page" elements */
1038 if (!page)
1039 return;
1040
1041 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1042 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1043 __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045 sw_buf->page = NULL;
1046 sge->addr_hi = 0;
1047 sge->addr_lo = 0;
1048}
1049
1050static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051 struct bnx2x_fastpath *fp, int last)
1052{
1053 int i;
1054
1055 for (i = 0; i < last; i++)
1056 bnx2x_free_rx_sge(bp, fp, i);
1057}
1058
1059static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1061{
1062 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065 dma_addr_t mapping;
1066
1067 if (unlikely(page == NULL))
1068 return -ENOMEM;
1069
4f40f2cb 1070 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1071 PCI_DMA_FROMDEVICE);
8d8bb39b 1072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074 return -ENOMEM;
1075 }
1076
1077 sw_buf->page = page;
1078 pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083 return 0;
1084}
1085
a2fbb9ea
ET
1086static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, u16 index)
1088{
1089 struct sk_buff *skb;
1090 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092 dma_addr_t mapping;
1093
1094 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095 if (unlikely(skb == NULL))
1096 return -ENOMEM;
1097
437cf2f1 1098 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1099 PCI_DMA_FROMDEVICE);
8d8bb39b 1100 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1101 dev_kfree_skb(skb);
1102 return -ENOMEM;
1103 }
1104
1105 rx_buf->skb = skb;
1106 pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111 return 0;
1112}
1113
1114/* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1118 */
1119static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120 struct sk_buff *skb, u16 cons, u16 prod)
1121{
1122 struct bnx2x *bp = fp->bp;
1123 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128 pci_dma_sync_single_for_device(bp->pdev,
1129 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1130 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1131
1132 prod_rx_buf->skb = cons_rx_buf->skb;
1133 pci_unmap_addr_set(prod_rx_buf, mapping,
1134 pci_unmap_addr(cons_rx_buf, mapping));
1135 *prod_bd = *cons_bd;
1136}
1137
7a9b2557
VZ
1138static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139 u16 idx)
1140{
1141 u16 last_max = fp->last_max_sge;
1142
1143 if (SUB_S16(idx, last_max) > 0)
1144 fp->last_max_sge = idx;
1145}
1146
1147static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148{
1149 int i, j;
1150
1151 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152 int idx = RX_SGE_CNT * i - 1;
1153
1154 for (j = 0; j < 2; j++) {
1155 SGE_MASK_CLEAR_BIT(fp, idx);
1156 idx--;
1157 }
1158 }
1159}
1160
1161static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162 struct eth_fast_path_rx_cqe *fp_cqe)
1163{
1164 struct bnx2x *bp = fp->bp;
4f40f2cb 1165 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1166 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1167 SGE_PAGE_SHIFT;
7a9b2557
VZ
1168 u16 last_max, last_elem, first_elem;
1169 u16 delta = 0;
1170 u16 i;
1171
1172 if (!sge_len)
1173 return;
1174
1175 /* First mark all used pages */
1176 for (i = 0; i < sge_len; i++)
1177 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp->sge_mask));
1184 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 last_max = RX_SGE(fp->last_max_sge);
1187 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190 /* If ring is not full */
1191 if (last_elem + 1 != first_elem)
1192 last_elem++;
1193
1194 /* Now update the prod */
1195 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196 if (likely(fp->sge_mask[i]))
1197 break;
1198
1199 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200 delta += RX_SGE_MASK_ELEM_SZ;
1201 }
1202
1203 if (delta > 0) {
1204 fp->rx_sge_prod += delta;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp);
1207 }
1208
1209 DP(NETIF_MSG_RX_STATUS,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp->last_max_sge, fp->rx_sge_prod);
1212}
1213
1214static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215{
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp->sge_mask, 0xff,
1218 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
33471629
EG
1220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1225}
1226
1227static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228 struct sk_buff *skb, u16 cons, u16 prod)
1229{
1230 struct bnx2x *bp = fp->bp;
1231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234 dma_addr_t mapping;
1235
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1239 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1240 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249 fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251 /* point prod_bd to new skb */
1252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255#ifdef BNX2X_STOP_ON_ERROR
1256 fp->tpa_queue_used |= (1 << queue);
1257#ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259#else
1260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261#endif
1262 fp->tpa_queue_used);
1263#endif
1264}
1265
1266static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 struct sk_buff *skb,
1268 struct eth_fast_path_rx_cqe *fp_cqe,
1269 u16 cqe_idx)
1270{
1271 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273 u32 i, frag_len, frag_size, pages;
1274 int err;
1275 int j;
1276
1277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1279
1280 /* This is needed in order to enable forwarding support */
1281 if (frag_size)
4f40f2cb 1282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1283 max(frag_size, (u32)len_on_bd));
1284
1285#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1286 if (pages >
1287 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289 pages, cqe_idx);
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe->pkt_len, len_on_bd);
1292 bnx2x_panic();
1293 return -EINVAL;
1294 }
1295#endif
1296
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1304 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1305 old_rx_pg = *rx_pg;
1306
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310 if (unlikely(err)) {
de832a55 1311 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1312 return err;
1313 }
1314
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1317 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1318
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322 skb->data_len += frag_len;
1323 skb->truesize += frag_len;
1324 skb->len += frag_len;
1325
1326 frag_size -= frag_len;
1327 }
1328
1329 return 0;
1330}
1331
1332static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334 u16 cqe_idx)
1335{
1336 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337 struct sk_buff *skb = rx_buf->skb;
1338 /* alloc new skb */
1339 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343 fails. */
1344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1345 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1346
7a9b2557 1347 if (likely(new_skb)) {
66e855f3
YG
1348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
0c6671b0
EG
1350#ifdef BCM_VLAN
1351 int is_vlan_cqe =
1352 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353 PARSING_FLAGS_VLAN);
1354 int is_not_hwaccel_vlan_cqe =
1355 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356#endif
7a9b2557
VZ
1357
1358 prefetch(skb);
1359 prefetch(((char *)(skb)) + 128);
1360
7a9b2557
VZ
1361#ifdef BNX2X_STOP_ON_ERROR
1362 if (pad + len > bp->rx_buf_size) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad, len, bp->rx_buf_size);
1366 bnx2x_panic();
1367 return;
1368 }
1369#endif
1370
1371 skb_reserve(skb, pad);
1372 skb_put(skb, len);
1373
1374 skb->protocol = eth_type_trans(skb, bp->dev);
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377 {
1378 struct iphdr *iph;
1379
1380 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1381#ifdef BCM_VLAN
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe))
1385 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386#endif
7a9b2557
VZ
1387 iph->check = 0;
1388 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389 }
1390
1391 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392 &cqe->fast_path_cqe, cqe_idx)) {
1393#ifdef BCM_VLAN
0c6671b0
EG
1394 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1396 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397 le16_to_cpu(cqe->fast_path_cqe.
1398 vlan_tag));
1399 else
1400#endif
1401 netif_receive_skb(skb);
1402 } else {
1403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1405 dev_kfree_skb(skb);
1406 }
1407
7a9b2557
VZ
1408
1409 /* put new skb in bin */
1410 fp->tpa_pool[queue].skb = new_skb;
1411
1412 } else {
66e855f3 1413 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1414 DP(NETIF_MSG_RX_STATUS,
1415 "Failed to allocate new skb - dropping packet!\n");
de832a55 1416 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1417 }
1418
1419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420}
1421
1422static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423 struct bnx2x_fastpath *fp,
1424 u16 bd_prod, u16 rx_comp_prod,
1425 u16 rx_sge_prod)
1426{
8d9c5f34 1427 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1428 int i;
1429
1430 /* Update producers */
1431 rx_prods.bd_prod = bd_prod;
1432 rx_prods.cqe_prod = rx_comp_prod;
1433 rx_prods.sge_prod = rx_sge_prod;
1434
58f4c4cf
EG
1435 /*
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1438 * is updated.
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1442 */
1443 wmb();
1444
8d9c5f34
EG
1445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1448 ((u32 *)&rx_prods)[i]);
1449
58f4c4cf
EG
1450 mmiowb(); /* keep prod updates ordered */
1451
7a9b2557 1452 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1455}
1456
a2fbb9ea
ET
1457static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458{
1459 struct bnx2x *bp = fp->bp;
34f80b04 1460 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1461 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462 int rx_pkt = 0;
1463
1464#ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp->panic))
1466 return 0;
1467#endif
1468
34f80b04
EG
1469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
a2fbb9ea
ET
1471 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473 hw_comp_cons++;
1474
1475 bd_cons = fp->rx_bd_cons;
1476 bd_prod = fp->rx_bd_prod;
34f80b04 1477 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1478 sw_comp_cons = fp->rx_comp_cons;
1479 sw_comp_prod = fp->rx_comp_prod;
1480
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1483 */
1484 rmb();
1485
1486 DP(NETIF_MSG_RX_STATUS,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1488 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1489
1490 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1491 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1492 struct sk_buff *skb;
1493 union eth_rx_cqe *cqe;
34f80b04
EG
1494 u8 cqe_fp_flags;
1495 u16 len, pad;
a2fbb9ea
ET
1496
1497 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498 bd_prod = RX_BD(bd_prod);
1499 bd_cons = RX_BD(bd_cons);
1500
1501 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1502 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1503
a2fbb9ea 1504 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1505 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1506 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1507 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1508 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1510
1511 /* is this a slowpath msg? */
34f80b04 1512 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1513 bnx2x_sp_event(fp, cqe);
1514 goto next_cqe;
1515
1516 /* this is an rx packet */
1517 } else {
1518 rx_buf = &fp->rx_buf_ring[bd_cons];
1519 skb = rx_buf->skb;
a2fbb9ea
ET
1520 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521 pad = cqe->fast_path_cqe.placement_offset;
1522
7a9b2557
VZ
1523 /* If CQE is marked both TPA_START and TPA_END
1524 it is a non-TPA CQE */
1525 if ((!fp->disable_tpa) &&
1526 (TPA_TYPE(cqe_fp_flags) !=
1527 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1528 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1529
1530 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531 DP(NETIF_MSG_RX_STATUS,
1532 "calling tpa_start on queue %d\n",
1533 queue);
1534
1535 bnx2x_tpa_start(fp, queue, skb,
1536 bd_cons, bd_prod);
1537 goto next_rx;
1538 }
1539
1540 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541 DP(NETIF_MSG_RX_STATUS,
1542 "calling tpa_stop on queue %d\n",
1543 queue);
1544
1545 if (!BNX2X_RX_SUM_FIX(cqe))
1546 BNX2X_ERR("STOP on none TCP "
1547 "data\n");
1548
1549 /* This is a size of the linear data
1550 on this skb */
1551 len = le16_to_cpu(cqe->fast_path_cqe.
1552 len_on_bd);
1553 bnx2x_tpa_stop(bp, fp, queue, pad,
1554 len, cqe, comp_ring_cons);
1555#ifdef BNX2X_STOP_ON_ERROR
1556 if (bp->panic)
17cb4006 1557 return 0;
7a9b2557
VZ
1558#endif
1559
1560 bnx2x_update_sge_prod(fp,
1561 &cqe->fast_path_cqe);
1562 goto next_cqe;
1563 }
1564 }
1565
a2fbb9ea
ET
1566 pci_dma_sync_single_for_device(bp->pdev,
1567 pci_unmap_addr(rx_buf, mapping),
1568 pad + RX_COPY_THRESH,
1569 PCI_DMA_FROMDEVICE);
1570 prefetch(skb);
1571 prefetch(((char *)(skb)) + 128);
1572
1573 /* is this an error packet? */
34f80b04 1574 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1575 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1576 "ERROR flags %x rx packet %u\n",
1577 cqe_fp_flags, sw_comp_cons);
de832a55 1578 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1579 goto reuse_rx;
1580 }
1581
1582 /* Since we don't have a jumbo ring
1583 * copy small packets if mtu > 1500
1584 */
1585 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586 (len <= RX_COPY_THRESH)) {
1587 struct sk_buff *new_skb;
1588
1589 new_skb = netdev_alloc_skb(bp->dev,
1590 len + pad);
1591 if (new_skb == NULL) {
1592 DP(NETIF_MSG_RX_ERR,
34f80b04 1593 "ERROR packet dropped "
a2fbb9ea 1594 "because of alloc failure\n");
de832a55 1595 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1596 goto reuse_rx;
1597 }
1598
1599 /* aligned copy */
1600 skb_copy_from_linear_data_offset(skb, pad,
1601 new_skb->data + pad, len);
1602 skb_reserve(new_skb, pad);
1603 skb_put(new_skb, len);
1604
1605 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1606
1607 skb = new_skb;
1608
1609 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610 pci_unmap_single(bp->pdev,
1611 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1612 bp->rx_buf_size,
a2fbb9ea
ET
1613 PCI_DMA_FROMDEVICE);
1614 skb_reserve(skb, pad);
1615 skb_put(skb, len);
1616
1617 } else {
1618 DP(NETIF_MSG_RX_ERR,
34f80b04 1619 "ERROR packet dropped because "
a2fbb9ea 1620 "of alloc failure\n");
de832a55 1621 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1622reuse_rx:
1623 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1624 goto next_rx;
1625 }
1626
1627 skb->protocol = eth_type_trans(skb, bp->dev);
1628
1629 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1630 if (bp->rx_csum) {
1adcd8be
EG
1631 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1633 else
de832a55 1634 fp->eth_q_stats.hw_csum_err++;
66e855f3 1635 }
a2fbb9ea
ET
1636 }
1637
748e5439 1638 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1639#ifdef BCM_VLAN
0c6671b0 1640 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1641 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1643 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1645 else
1646#endif
34f80b04 1647 netif_receive_skb(skb);
a2fbb9ea 1648
a2fbb9ea
ET
1649
1650next_rx:
1651 rx_buf->skb = NULL;
1652
1653 bd_cons = NEXT_RX_IDX(bd_cons);
1654 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1655 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1656 rx_pkt++;
a2fbb9ea
ET
1657next_cqe:
1658 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1660
34f80b04 1661 if (rx_pkt == budget)
a2fbb9ea
ET
1662 break;
1663 } /* while */
1664
1665 fp->rx_bd_cons = bd_cons;
34f80b04 1666 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1667 fp->rx_comp_cons = sw_comp_cons;
1668 fp->rx_comp_prod = sw_comp_prod;
1669
7a9b2557
VZ
1670 /* Update producers */
1671 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1672 fp->rx_sge_prod);
a2fbb9ea
ET
1673
1674 fp->rx_pkt += rx_pkt;
1675 fp->rx_calls++;
1676
1677 return rx_pkt;
1678}
1679
1680static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1681{
1682 struct bnx2x_fastpath *fp = fp_cookie;
1683 struct bnx2x *bp = fp->bp;
a2fbb9ea 1684
da5a662a
VZ
1685 /* Return here if interrupt is disabled */
1686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1688 return IRQ_HANDLED;
1689 }
1690
34f80b04 1691 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1692 fp->index, fp->sb_id);
0626b899 1693 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1694
1695#ifdef BNX2X_STOP_ON_ERROR
1696 if (unlikely(bp->panic))
1697 return IRQ_HANDLED;
1698#endif
ca00392c
EG
1699 /* Handle Rx or Tx according to MSI-X vector */
1700 if (fp->is_rx_queue) {
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1703
ca00392c 1704 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1705
ca00392c
EG
1706 } else {
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709
1710 bnx2x_update_fpsb_idx(fp);
1711 rmb();
1712 bnx2x_tx_int(fp);
1713
1714 /* Re-enable interrupts */
1715 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1719 }
34f80b04 1720
a2fbb9ea
ET
1721 return IRQ_HANDLED;
1722}
1723
1724static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1725{
555f6c78 1726 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1727 u16 status = bnx2x_ack_int(bp);
34f80b04 1728 u16 mask;
ca00392c 1729 int i;
a2fbb9ea 1730
34f80b04 1731 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1732 if (unlikely(status == 0)) {
1733 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1734 return IRQ_NONE;
1735 }
f5372251 1736 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1737
34f80b04 1738 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1739 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1741 return IRQ_HANDLED;
1742 }
1743
3196a88a
EG
1744#ifdef BNX2X_STOP_ON_ERROR
1745 if (unlikely(bp->panic))
1746 return IRQ_HANDLED;
1747#endif
1748
ca00392c
EG
1749 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1751
ca00392c
EG
1752 mask = 0x2 << fp->sb_id;
1753 if (status & mask) {
1754 /* Handle Rx or Tx according to SB id */
1755 if (fp->is_rx_queue) {
1756 prefetch(fp->rx_cons_sb);
1757 prefetch(&fp->status_blk->u_status_block.
1758 status_block_index);
a2fbb9ea 1759
ca00392c 1760 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1761
ca00392c
EG
1762 } else {
1763 prefetch(fp->tx_cons_sb);
1764 prefetch(&fp->status_blk->c_status_block.
1765 status_block_index);
1766
1767 bnx2x_update_fpsb_idx(fp);
1768 rmb();
1769 bnx2x_tx_int(fp);
1770
1771 /* Re-enable interrupts */
1772 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773 le16_to_cpu(fp->fp_u_idx),
1774 IGU_INT_NOP, 1);
1775 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776 le16_to_cpu(fp->fp_c_idx),
1777 IGU_INT_ENABLE, 1);
1778 }
1779 status &= ~mask;
1780 }
a2fbb9ea
ET
1781 }
1782
a2fbb9ea 1783
34f80b04 1784 if (unlikely(status & 0x1)) {
1cf167f2 1785 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1786
1787 status &= ~0x1;
1788 if (!status)
1789 return IRQ_HANDLED;
1790 }
1791
34f80b04
EG
1792 if (status)
1793 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1794 status);
a2fbb9ea 1795
c18487ee 1796 return IRQ_HANDLED;
a2fbb9ea
ET
1797}
1798
c18487ee 1799/* end of fast path */
a2fbb9ea 1800
bb2a0f7a 1801static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1802
c18487ee
YR
1803/* Link */
1804
1805/*
1806 * General service functions
1807 */
a2fbb9ea 1808
4a37fb66 1809static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1810{
1811 u32 lock_status;
1812 u32 resource_bit = (1 << resource);
4a37fb66
YG
1813 int func = BP_FUNC(bp);
1814 u32 hw_lock_control_reg;
c18487ee 1815 int cnt;
a2fbb9ea 1816
c18487ee
YR
1817 /* Validating that the resource is within range */
1818 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1819 DP(NETIF_MSG_HW,
1820 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1822 return -EINVAL;
1823 }
a2fbb9ea 1824
4a37fb66
YG
1825 if (func <= 5) {
1826 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1827 } else {
1828 hw_lock_control_reg =
1829 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1830 }
1831
c18487ee 1832 /* Validating that the resource is not already taken */
4a37fb66 1833 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1834 if (lock_status & resource_bit) {
1835 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1836 lock_status, resource_bit);
1837 return -EEXIST;
1838 }
a2fbb9ea 1839
46230476
EG
1840 /* Try for 5 second every 5ms */
1841 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1842 /* Try to acquire the lock */
4a37fb66
YG
1843 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1845 if (lock_status & resource_bit)
1846 return 0;
a2fbb9ea 1847
c18487ee 1848 msleep(5);
a2fbb9ea 1849 }
c18487ee
YR
1850 DP(NETIF_MSG_HW, "Timeout\n");
1851 return -EAGAIN;
1852}
a2fbb9ea 1853
4a37fb66 1854static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1855{
1856 u32 lock_status;
1857 u32 resource_bit = (1 << resource);
4a37fb66
YG
1858 int func = BP_FUNC(bp);
1859 u32 hw_lock_control_reg;
a2fbb9ea 1860
c18487ee
YR
1861 /* Validating that the resource is within range */
1862 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1863 DP(NETIF_MSG_HW,
1864 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1866 return -EINVAL;
1867 }
1868
4a37fb66
YG
1869 if (func <= 5) {
1870 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1871 } else {
1872 hw_lock_control_reg =
1873 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1874 }
1875
c18487ee 1876 /* Validating that the resource is currently taken */
4a37fb66 1877 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1878 if (!(lock_status & resource_bit)) {
1879 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1880 lock_status, resource_bit);
1881 return -EFAULT;
a2fbb9ea
ET
1882 }
1883
4a37fb66 1884 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1885 return 0;
1886}
1887
1888/* HW Lock for shared dual port PHYs */
4a37fb66 1889static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1890{
34f80b04 1891 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1892
46c6a674
EG
1893 if (bp->port.need_hw_lock)
1894 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1895}
a2fbb9ea 1896
4a37fb66 1897static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1898{
46c6a674
EG
1899 if (bp->port.need_hw_lock)
1900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1901
34f80b04 1902 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1903}
a2fbb9ea 1904
4acac6a5
EG
1905int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1906{
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910 int gpio_shift = gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912 u32 gpio_mask = (1 << gpio_shift);
1913 u32 gpio_reg;
1914 int value;
1915
1916 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1918 return -EINVAL;
1919 }
1920
1921 /* read GPIO value */
1922 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1923
1924 /* get the requested pin value */
1925 if ((gpio_reg & gpio_mask) == gpio_mask)
1926 value = 1;
1927 else
1928 value = 0;
1929
1930 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1931
1932 return value;
1933}
1934
17de50b7 1935int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
a2fbb9ea 1944
c18487ee
YR
1945 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1947 return -EINVAL;
1948 }
a2fbb9ea 1949
4a37fb66 1950 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1951 /* read GPIO and mask except the float bits */
1952 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1953
c18487ee
YR
1954 switch (mode) {
1955 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957 gpio_num, gpio_shift);
1958 /* clear FLOAT and set CLR */
1959 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1961 break;
a2fbb9ea 1962
c18487ee
YR
1963 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965 gpio_num, gpio_shift);
1966 /* clear FLOAT and set SET */
1967 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1969 break;
a2fbb9ea 1970
17de50b7 1971 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1972 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973 gpio_num, gpio_shift);
1974 /* set FLOAT */
1975 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1976 break;
a2fbb9ea 1977
c18487ee
YR
1978 default:
1979 break;
a2fbb9ea
ET
1980 }
1981
c18487ee 1982 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1983 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1984
c18487ee 1985 return 0;
a2fbb9ea
ET
1986}
1987
4acac6a5
EG
1988int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989{
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1996 u32 gpio_reg;
1997
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000 return -EINVAL;
2001 }
2002
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004 /* read GPIO int */
2005 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2006
2007 switch (mode) {
2008 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010 "output low\n", gpio_num, gpio_shift);
2011 /* clear SET and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2014 break;
2015
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018 "output high\n", gpio_num, gpio_shift);
2019 /* clear CLR and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022 break;
2023
2024 default:
2025 break;
2026 }
2027
2028 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030
2031 return 0;
2032}
2033
c18487ee 2034static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2035{
c18487ee
YR
2036 u32 spio_mask = (1 << spio_num);
2037 u32 spio_reg;
a2fbb9ea 2038
c18487ee
YR
2039 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040 (spio_num > MISC_REGISTERS_SPIO_7)) {
2041 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2042 return -EINVAL;
a2fbb9ea
ET
2043 }
2044
4a37fb66 2045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2046 /* read SPIO and mask except the float bits */
2047 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2048
c18487ee 2049 switch (mode) {
6378c025 2050 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2051 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052 /* clear FLOAT and set CLR */
2053 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2055 break;
a2fbb9ea 2056
6378c025 2057 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2058 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059 /* clear FLOAT and set SET */
2060 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2062 break;
a2fbb9ea 2063
c18487ee
YR
2064 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2066 /* set FLOAT */
2067 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2068 break;
a2fbb9ea 2069
c18487ee
YR
2070 default:
2071 break;
a2fbb9ea
ET
2072 }
2073
c18487ee 2074 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2076
a2fbb9ea
ET
2077 return 0;
2078}
2079
c18487ee 2080static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2081{
ad33ea3a
EG
2082 switch (bp->link_vars.ieee_fc &
2083 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2084 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2085 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2086 ADVERTISED_Pause);
2087 break;
356e2385 2088
c18487ee 2089 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2090 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2091 ADVERTISED_Pause);
2092 break;
356e2385 2093
c18487ee 2094 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2095 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2096 break;
356e2385 2097
c18487ee 2098 default:
34f80b04 2099 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2100 ADVERTISED_Pause);
2101 break;
2102 }
2103}
f1410647 2104
c18487ee
YR
2105static void bnx2x_link_report(struct bnx2x *bp)
2106{
2691d51d
EG
2107 if (bp->state == BNX2X_STATE_DISABLED) {
2108 netif_carrier_off(bp->dev);
2109 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2110 return;
2111 }
2112
c18487ee
YR
2113 if (bp->link_vars.link_up) {
2114 if (bp->state == BNX2X_STATE_OPEN)
2115 netif_carrier_on(bp->dev);
2116 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2117
c18487ee 2118 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2119
c18487ee
YR
2120 if (bp->link_vars.duplex == DUPLEX_FULL)
2121 printk("full duplex");
2122 else
2123 printk("half duplex");
f1410647 2124
c0700f90
DM
2125 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2126 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2127 printk(", receive ");
356e2385
EG
2128 if (bp->link_vars.flow_ctrl &
2129 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2130 printk("& transmit ");
2131 } else {
2132 printk(", transmit ");
2133 }
2134 printk("flow control ON");
2135 }
2136 printk("\n");
f1410647 2137
c18487ee
YR
2138 } else { /* link_down */
2139 netif_carrier_off(bp->dev);
2140 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2141 }
c18487ee
YR
2142}
2143
b5bf9068 2144static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2145{
19680c48
EG
2146 if (!BP_NOMCP(bp)) {
2147 u8 rc;
a2fbb9ea 2148
19680c48 2149 /* Initialize link parameters structure variables */
8c99e7b0
YR
2150 /* It is recommended to turn off RX FC for jumbo frames
2151 for better performance */
0c593270 2152 if (bp->dev->mtu > 5000)
c0700f90 2153 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2154 else
c0700f90 2155 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2156
4a37fb66 2157 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2158
2159 if (load_mode == LOAD_DIAG)
2160 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2161
19680c48 2162 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2163
4a37fb66 2164 bnx2x_release_phy_lock(bp);
a2fbb9ea 2165
3c96c68b
EG
2166 bnx2x_calc_fc_adv(bp);
2167
b5bf9068
EG
2168 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2169 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2170 bnx2x_link_report(bp);
b5bf9068 2171 }
34f80b04 2172
19680c48
EG
2173 return rc;
2174 }
f5372251 2175 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2176 return -EINVAL;
a2fbb9ea
ET
2177}
2178
c18487ee 2179static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2180{
19680c48 2181 if (!BP_NOMCP(bp)) {
4a37fb66 2182 bnx2x_acquire_phy_lock(bp);
19680c48 2183 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2184 bnx2x_release_phy_lock(bp);
a2fbb9ea 2185
19680c48
EG
2186 bnx2x_calc_fc_adv(bp);
2187 } else
f5372251 2188 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2189}
a2fbb9ea 2190
c18487ee
YR
2191static void bnx2x__link_reset(struct bnx2x *bp)
2192{
19680c48 2193 if (!BP_NOMCP(bp)) {
4a37fb66 2194 bnx2x_acquire_phy_lock(bp);
589abe3a 2195 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2196 bnx2x_release_phy_lock(bp);
19680c48 2197 } else
f5372251 2198 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2199}
a2fbb9ea 2200
c18487ee
YR
2201static u8 bnx2x_link_test(struct bnx2x *bp)
2202{
2203 u8 rc;
a2fbb9ea 2204
4a37fb66 2205 bnx2x_acquire_phy_lock(bp);
c18487ee 2206 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2207 bnx2x_release_phy_lock(bp);
a2fbb9ea 2208
c18487ee
YR
2209 return rc;
2210}
a2fbb9ea 2211
8a1c38d1 2212static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2213{
8a1c38d1
EG
2214 u32 r_param = bp->link_vars.line_speed / 8;
2215 u32 fair_periodic_timeout_usec;
2216 u32 t_fair;
34f80b04 2217
8a1c38d1
EG
2218 memset(&(bp->cmng.rs_vars), 0,
2219 sizeof(struct rate_shaping_vars_per_port));
2220 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2221
8a1c38d1
EG
2222 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2223 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2224
8a1c38d1
EG
2225 /* this is the threshold below which no timer arming will occur
2226 1.25 coefficient is for the threshold to be a little bigger
2227 than the real time, to compensate for timer in-accuracy */
2228 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2229 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2230
8a1c38d1
EG
2231 /* resolution of fairness timer */
2232 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2233 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2234 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2235
8a1c38d1
EG
2236 /* this is the threshold below which we won't arm the timer anymore */
2237 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2238
8a1c38d1
EG
2239 /* we multiply by 1e3/8 to get bytes/msec.
2240 We don't want the credits to pass a credit
2241 of the t_fair*FAIR_MEM (algorithm resolution) */
2242 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2243 /* since each tick is 4 usec */
2244 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2245}
2246
2691d51d
EG
2247/* Calculates the sum of vn_min_rates.
2248 It's needed for further normalizing of the min_rates.
2249 Returns:
2250 sum of vn_min_rates.
2251 or
2252 0 - if all the min_rates are 0.
2253 In the later case fainess algorithm should be deactivated.
2254 If not all min_rates are zero then those that are zeroes will be set to 1.
2255 */
2256static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2257{
2258 int all_zero = 1;
2259 int port = BP_PORT(bp);
2260 int vn;
2261
2262 bp->vn_weight_sum = 0;
2263 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2264 int func = 2*vn + port;
2265 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2266 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2267 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2268
2269 /* Skip hidden vns */
2270 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2271 continue;
2272
2273 /* If min rate is zero - set it to 1 */
2274 if (!vn_min_rate)
2275 vn_min_rate = DEF_MIN_RATE;
2276 else
2277 all_zero = 0;
2278
2279 bp->vn_weight_sum += vn_min_rate;
2280 }
2281
2282 /* ... only if all min rates are zeros - disable fairness */
2283 if (all_zero)
2284 bp->vn_weight_sum = 0;
2285}
2286
8a1c38d1 2287static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2288{
2289 struct rate_shaping_vars_per_vn m_rs_vn;
2290 struct fairness_vars_per_vn m_fair_vn;
2291 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2292 u16 vn_min_rate, vn_max_rate;
2293 int i;
2294
2295 /* If function is hidden - set min and max to zeroes */
2296 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2297 vn_min_rate = 0;
2298 vn_max_rate = 0;
2299
2300 } else {
2301 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2302 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2303 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2304 if current min rate is zero - set it to 1.
33471629 2305 This is a requirement of the algorithm. */
8a1c38d1 2306 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2307 vn_min_rate = DEF_MIN_RATE;
2308 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2309 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2310 }
2311
8a1c38d1
EG
2312 DP(NETIF_MSG_IFUP,
2313 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2314 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2315
2316 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2317 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2318
2319 /* global vn counter - maximal Mbps for this vn */
2320 m_rs_vn.vn_counter.rate = vn_max_rate;
2321
2322 /* quota - number of bytes transmitted in this period */
2323 m_rs_vn.vn_counter.quota =
2324 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2325
8a1c38d1 2326 if (bp->vn_weight_sum) {
34f80b04
EG
2327 /* credit for each period of the fairness algorithm:
2328 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2329 vn_weight_sum should not be larger than 10000, thus
2330 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2331 than zero */
34f80b04 2332 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2333 max((u32)(vn_min_rate * (T_FAIR_COEF /
2334 (8 * bp->vn_weight_sum))),
2335 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2336 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2337 m_fair_vn.vn_credit_delta);
2338 }
2339
34f80b04
EG
2340 /* Store it to internal memory */
2341 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2342 REG_WR(bp, BAR_XSTRORM_INTMEM +
2343 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2344 ((u32 *)(&m_rs_vn))[i]);
2345
2346 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2347 REG_WR(bp, BAR_XSTRORM_INTMEM +
2348 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2349 ((u32 *)(&m_fair_vn))[i]);
2350}
2351
8a1c38d1 2352
c18487ee
YR
2353/* This function is called upon link interrupt */
2354static void bnx2x_link_attn(struct bnx2x *bp)
2355{
bb2a0f7a
YG
2356 /* Make sure that we are synced with the current statistics */
2357 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2358
c18487ee 2359 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2360
bb2a0f7a
YG
2361 if (bp->link_vars.link_up) {
2362
1c06328c
EG
2363 /* dropless flow control */
2364 if (CHIP_IS_E1H(bp)) {
2365 int port = BP_PORT(bp);
2366 u32 pause_enabled = 0;
2367
2368 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2369 pause_enabled = 1;
2370
2371 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2372 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2373 pause_enabled);
2374 }
2375
bb2a0f7a
YG
2376 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2377 struct host_port_stats *pstats;
2378
2379 pstats = bnx2x_sp(bp, port_stats);
2380 /* reset old bmac stats */
2381 memset(&(pstats->mac_stx[0]), 0,
2382 sizeof(struct mac_stx));
2383 }
2384 if ((bp->state == BNX2X_STATE_OPEN) ||
2385 (bp->state == BNX2X_STATE_DISABLED))
2386 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2387 }
2388
c18487ee
YR
2389 /* indicate link status */
2390 bnx2x_link_report(bp);
34f80b04
EG
2391
2392 if (IS_E1HMF(bp)) {
8a1c38d1 2393 int port = BP_PORT(bp);
34f80b04 2394 int func;
8a1c38d1 2395 int vn;
34f80b04
EG
2396
2397 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2398 if (vn == BP_E1HVN(bp))
2399 continue;
2400
8a1c38d1 2401 func = ((vn << 1) | port);
34f80b04
EG
2402
2403 /* Set the attention towards other drivers
2404 on the same port */
2405 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2406 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2407 }
34f80b04 2408
8a1c38d1
EG
2409 if (bp->link_vars.link_up) {
2410 int i;
2411
2412 /* Init rate shaping and fairness contexts */
2413 bnx2x_init_port_minmax(bp);
34f80b04 2414
34f80b04 2415 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2416 bnx2x_init_vn_minmax(bp, 2*vn + port);
2417
2418 /* Store it to internal memory */
2419 for (i = 0;
2420 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2421 REG_WR(bp, BAR_XSTRORM_INTMEM +
2422 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2423 ((u32 *)(&bp->cmng))[i]);
2424 }
34f80b04 2425 }
c18487ee 2426}
a2fbb9ea 2427
c18487ee
YR
2428static void bnx2x__link_status_update(struct bnx2x *bp)
2429{
2691d51d
EG
2430 int func = BP_FUNC(bp);
2431
c18487ee
YR
2432 if (bp->state != BNX2X_STATE_OPEN)
2433 return;
a2fbb9ea 2434
c18487ee 2435 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2436
bb2a0f7a
YG
2437 if (bp->link_vars.link_up)
2438 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2439 else
2440 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2441
2691d51d
EG
2442 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2443 bnx2x_calc_vn_weight_sum(bp);
2444
c18487ee
YR
2445 /* indicate link status */
2446 bnx2x_link_report(bp);
a2fbb9ea 2447}
a2fbb9ea 2448
34f80b04
EG
2449static void bnx2x_pmf_update(struct bnx2x *bp)
2450{
2451 int port = BP_PORT(bp);
2452 u32 val;
2453
2454 bp->port.pmf = 1;
2455 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2456
2457 /* enable nig attention */
2458 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2459 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2460 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2461
2462 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2463}
2464
c18487ee 2465/* end of Link */
a2fbb9ea
ET
2466
2467/* slow path */
2468
2469/*
2470 * General service functions
2471 */
2472
2691d51d
EG
2473/* send the MCP a request, block until there is a reply */
2474u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2475{
2476 int func = BP_FUNC(bp);
2477 u32 seq = ++bp->fw_seq;
2478 u32 rc = 0;
2479 u32 cnt = 1;
2480 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2481
2482 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2483 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2484
2485 do {
2486 /* let the FW do it's magic ... */
2487 msleep(delay);
2488
2489 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2490
2491 /* Give the FW up to 2 second (200*10ms) */
2492 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2493
2494 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2495 cnt*delay, rc, seq);
2496
2497 /* is this a reply to our command? */
2498 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2499 rc &= FW_MSG_CODE_MASK;
2500 else {
2501 /* FW BUG! */
2502 BNX2X_ERR("FW failed to respond!\n");
2503 bnx2x_fw_dump(bp);
2504 rc = 0;
2505 }
2506
2507 return rc;
2508}
2509
2510static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2511static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2512static void bnx2x_set_rx_mode(struct net_device *dev);
2513
2514static void bnx2x_e1h_disable(struct bnx2x *bp)
2515{
2516 int port = BP_PORT(bp);
2517 int i;
2518
2519 bp->rx_mode = BNX2X_RX_MODE_NONE;
2520 bnx2x_set_storm_rx_mode(bp);
2521
2522 netif_tx_disable(bp->dev);
2523 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2524
2525 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2526
2527 bnx2x_set_mac_addr_e1h(bp, 0);
2528
2529 for (i = 0; i < MC_HASH_SIZE; i++)
2530 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2531
2532 netif_carrier_off(bp->dev);
2533}
2534
2535static void bnx2x_e1h_enable(struct bnx2x *bp)
2536{
2537 int port = BP_PORT(bp);
2538
2539 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2540
2541 bnx2x_set_mac_addr_e1h(bp, 1);
2542
2543 /* Tx queue should be only reenabled */
2544 netif_tx_wake_all_queues(bp->dev);
2545
2546 /* Initialize the receive filter. */
2547 bnx2x_set_rx_mode(bp->dev);
2548}
2549
2550static void bnx2x_update_min_max(struct bnx2x *bp)
2551{
2552 int port = BP_PORT(bp);
2553 int vn, i;
2554
2555 /* Init rate shaping and fairness contexts */
2556 bnx2x_init_port_minmax(bp);
2557
2558 bnx2x_calc_vn_weight_sum(bp);
2559
2560 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2561 bnx2x_init_vn_minmax(bp, 2*vn + port);
2562
2563 if (bp->port.pmf) {
2564 int func;
2565
2566 /* Set the attention towards other drivers on the same port */
2567 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2568 if (vn == BP_E1HVN(bp))
2569 continue;
2570
2571 func = ((vn << 1) | port);
2572 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2573 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2574 }
2575
2576 /* Store it to internal memory */
2577 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2578 REG_WR(bp, BAR_XSTRORM_INTMEM +
2579 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2580 ((u32 *)(&bp->cmng))[i]);
2581 }
2582}
2583
2584static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2585{
2586 int func = BP_FUNC(bp);
2587
2588 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2589 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2590
2591 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2592
2593 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2594 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2595 bp->state = BNX2X_STATE_DISABLED;
2596
2597 bnx2x_e1h_disable(bp);
2598 } else {
2599 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2600 bp->state = BNX2X_STATE_OPEN;
2601
2602 bnx2x_e1h_enable(bp);
2603 }
2604 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2605 }
2606 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2607
2608 bnx2x_update_min_max(bp);
2609 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2610 }
2611
2612 /* Report results to MCP */
2613 if (dcc_event)
2614 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2615 else
2616 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2617}
2618
a2fbb9ea
ET
2619/* the slow path queue is odd since completions arrive on the fastpath ring */
2620static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2621 u32 data_hi, u32 data_lo, int common)
2622{
34f80b04 2623 int func = BP_FUNC(bp);
a2fbb9ea 2624
34f80b04
EG
2625 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2626 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2627 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2628 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2629 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2630
2631#ifdef BNX2X_STOP_ON_ERROR
2632 if (unlikely(bp->panic))
2633 return -EIO;
2634#endif
2635
34f80b04 2636 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2637
2638 if (!bp->spq_left) {
2639 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2640 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2641 bnx2x_panic();
2642 return -EBUSY;
2643 }
f1410647 2644
a2fbb9ea
ET
2645 /* CID needs port number to be encoded int it */
2646 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2647 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2648 HW_CID(bp, cid)));
2649 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2650 if (common)
2651 bp->spq_prod_bd->hdr.type |=
2652 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2653
2654 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2655 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2656
2657 bp->spq_left--;
2658
2659 if (bp->spq_prod_bd == bp->spq_last_bd) {
2660 bp->spq_prod_bd = bp->spq;
2661 bp->spq_prod_idx = 0;
2662 DP(NETIF_MSG_TIMER, "end of spq\n");
2663
2664 } else {
2665 bp->spq_prod_bd++;
2666 bp->spq_prod_idx++;
2667 }
2668
37dbbf32
EG
2669 /* Make sure that BD data is updated before writing the producer */
2670 wmb();
2671
34f80b04 2672 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2673 bp->spq_prod_idx);
2674
37dbbf32
EG
2675 mmiowb();
2676
34f80b04 2677 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2678 return 0;
2679}
2680
2681/* acquire split MCP access lock register */
4a37fb66 2682static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2683{
a2fbb9ea 2684 u32 i, j, val;
34f80b04 2685 int rc = 0;
a2fbb9ea
ET
2686
2687 might_sleep();
2688 i = 100;
2689 for (j = 0; j < i*10; j++) {
2690 val = (1UL << 31);
2691 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2692 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2693 if (val & (1L << 31))
2694 break;
2695
2696 msleep(5);
2697 }
a2fbb9ea 2698 if (!(val & (1L << 31))) {
19680c48 2699 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2700 rc = -EBUSY;
2701 }
2702
2703 return rc;
2704}
2705
4a37fb66
YG
2706/* release split MCP access lock register */
2707static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2708{
2709 u32 val = 0;
2710
2711 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2712}
2713
2714static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2715{
2716 struct host_def_status_block *def_sb = bp->def_status_blk;
2717 u16 rc = 0;
2718
2719 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2720 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2721 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2722 rc |= 1;
2723 }
2724 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2725 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2726 rc |= 2;
2727 }
2728 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2729 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2730 rc |= 4;
2731 }
2732 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2733 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2734 rc |= 8;
2735 }
2736 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2737 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2738 rc |= 16;
2739 }
2740 return rc;
2741}
2742
2743/*
2744 * slow path service functions
2745 */
2746
2747static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2748{
34f80b04 2749 int port = BP_PORT(bp);
5c862848
EG
2750 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2751 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2752 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2753 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2754 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2755 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2756 u32 aeu_mask;
87942b46 2757 u32 nig_mask = 0;
a2fbb9ea 2758
a2fbb9ea
ET
2759 if (bp->attn_state & asserted)
2760 BNX2X_ERR("IGU ERROR\n");
2761
3fcaf2e5
EG
2762 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763 aeu_mask = REG_RD(bp, aeu_addr);
2764
a2fbb9ea 2765 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2766 aeu_mask, asserted);
2767 aeu_mask &= ~(asserted & 0xff);
2768 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2769
3fcaf2e5
EG
2770 REG_WR(bp, aeu_addr, aeu_mask);
2771 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2772
3fcaf2e5 2773 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2774 bp->attn_state |= asserted;
3fcaf2e5 2775 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2776
2777 if (asserted & ATTN_HARD_WIRED_MASK) {
2778 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2779
a5e9a7cf
EG
2780 bnx2x_acquire_phy_lock(bp);
2781
877e9aa4 2782 /* save nig interrupt mask */
87942b46 2783 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2784 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2785
c18487ee 2786 bnx2x_link_attn(bp);
a2fbb9ea
ET
2787
2788 /* handle unicore attn? */
2789 }
2790 if (asserted & ATTN_SW_TIMER_4_FUNC)
2791 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2792
2793 if (asserted & GPIO_2_FUNC)
2794 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2795
2796 if (asserted & GPIO_3_FUNC)
2797 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2798
2799 if (asserted & GPIO_4_FUNC)
2800 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2801
2802 if (port == 0) {
2803 if (asserted & ATTN_GENERAL_ATTN_1) {
2804 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2805 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2806 }
2807 if (asserted & ATTN_GENERAL_ATTN_2) {
2808 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2809 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2810 }
2811 if (asserted & ATTN_GENERAL_ATTN_3) {
2812 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2813 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2814 }
2815 } else {
2816 if (asserted & ATTN_GENERAL_ATTN_4) {
2817 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2818 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2819 }
2820 if (asserted & ATTN_GENERAL_ATTN_5) {
2821 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2822 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2823 }
2824 if (asserted & ATTN_GENERAL_ATTN_6) {
2825 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2826 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2827 }
2828 }
2829
2830 } /* if hardwired */
2831
5c862848
EG
2832 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2833 asserted, hc_addr);
2834 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2835
2836 /* now set back the mask */
a5e9a7cf 2837 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2838 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2839 bnx2x_release_phy_lock(bp);
2840 }
a2fbb9ea
ET
2841}
2842
fd4ef40d
EG
2843static inline void bnx2x_fan_failure(struct bnx2x *bp)
2844{
2845 int port = BP_PORT(bp);
2846
2847 /* mark the failure */
2848 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2849 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2850 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2851 bp->link_params.ext_phy_config);
2852
2853 /* log the failure */
2854 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2855 " the driver to shutdown the card to prevent permanent"
2856 " damage. Please contact Dell Support for assistance\n",
2857 bp->dev->name);
2858}
877e9aa4 2859static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2860{
34f80b04 2861 int port = BP_PORT(bp);
877e9aa4 2862 int reg_offset;
4d295db0 2863 u32 val, swap_val, swap_override;
877e9aa4 2864
34f80b04
EG
2865 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2866 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2867
34f80b04 2868 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2869
2870 val = REG_RD(bp, reg_offset);
2871 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2872 REG_WR(bp, reg_offset, val);
2873
2874 BNX2X_ERR("SPIO5 hw attention\n");
2875
fd4ef40d 2876 /* Fan failure attention */
35b19ba5
EG
2877 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2879 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2880 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2881 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2882 /* The PHY reset is controlled by GPIO 1 */
2883 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2884 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2885 break;
2886
4d295db0
EG
2887 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2888 /* The PHY reset is controlled by GPIO 1 */
2889 /* fake the port number to cancel the swap done in
2890 set_gpio() */
2891 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2892 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2893 port = (swap_val && swap_override) ^ 1;
2894 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2895 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2896 break;
2897
877e9aa4
ET
2898 default:
2899 break;
2900 }
fd4ef40d 2901 bnx2x_fan_failure(bp);
877e9aa4 2902 }
34f80b04 2903
589abe3a
EG
2904 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2905 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2906 bnx2x_acquire_phy_lock(bp);
2907 bnx2x_handle_module_detect_int(&bp->link_params);
2908 bnx2x_release_phy_lock(bp);
2909 }
2910
34f80b04
EG
2911 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2912
2913 val = REG_RD(bp, reg_offset);
2914 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2915 REG_WR(bp, reg_offset, val);
2916
2917 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2918 (attn & HW_INTERRUT_ASSERT_SET_0));
2919 bnx2x_panic();
2920 }
877e9aa4
ET
2921}
2922
2923static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2924{
2925 u32 val;
2926
0626b899 2927 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2928
2929 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2930 BNX2X_ERR("DB hw attention 0x%x\n", val);
2931 /* DORQ discard attention */
2932 if (val & 0x2)
2933 BNX2X_ERR("FATAL error from DORQ\n");
2934 }
34f80b04
EG
2935
2936 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2937
2938 int port = BP_PORT(bp);
2939 int reg_offset;
2940
2941 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2942 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2943
2944 val = REG_RD(bp, reg_offset);
2945 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2946 REG_WR(bp, reg_offset, val);
2947
2948 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2949 (attn & HW_INTERRUT_ASSERT_SET_1));
2950 bnx2x_panic();
2951 }
877e9aa4
ET
2952}
2953
2954static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2955{
2956 u32 val;
2957
2958 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2959
2960 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2961 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2962 /* CFC error attention */
2963 if (val & 0x2)
2964 BNX2X_ERR("FATAL error from CFC\n");
2965 }
2966
2967 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2968
2969 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2970 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2971 /* RQ_USDMDP_FIFO_OVERFLOW */
2972 if (val & 0x18000)
2973 BNX2X_ERR("FATAL error from PXP\n");
2974 }
34f80b04
EG
2975
2976 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2977
2978 int port = BP_PORT(bp);
2979 int reg_offset;
2980
2981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2983
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2986 REG_WR(bp, reg_offset, val);
2987
2988 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2989 (attn & HW_INTERRUT_ASSERT_SET_2));
2990 bnx2x_panic();
2991 }
877e9aa4
ET
2992}
2993
2994static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2995{
34f80b04
EG
2996 u32 val;
2997
877e9aa4
ET
2998 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2999
34f80b04
EG
3000 if (attn & BNX2X_PMF_LINK_ASSERT) {
3001 int func = BP_FUNC(bp);
3002
3003 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3004 val = SHMEM_RD(bp, func_mb[func].drv_status);
3005 if (val & DRV_STATUS_DCC_EVENT_MASK)
3006 bnx2x_dcc_event(bp,
3007 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3008 bnx2x__link_status_update(bp);
2691d51d 3009 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3010 bnx2x_pmf_update(bp);
3011
3012 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3013
3014 BNX2X_ERR("MC assert!\n");
3015 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3016 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3017 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3018 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3019 bnx2x_panic();
3020
3021 } else if (attn & BNX2X_MCP_ASSERT) {
3022
3023 BNX2X_ERR("MCP assert!\n");
3024 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3025 bnx2x_fw_dump(bp);
877e9aa4
ET
3026
3027 } else
3028 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3029 }
3030
3031 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3032 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3033 if (attn & BNX2X_GRC_TIMEOUT) {
3034 val = CHIP_IS_E1H(bp) ?
3035 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3036 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3037 }
3038 if (attn & BNX2X_GRC_RSV) {
3039 val = CHIP_IS_E1H(bp) ?
3040 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3041 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3042 }
877e9aa4 3043 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3044 }
3045}
3046
3047static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3048{
a2fbb9ea
ET
3049 struct attn_route attn;
3050 struct attn_route group_mask;
34f80b04 3051 int port = BP_PORT(bp);
877e9aa4 3052 int index;
a2fbb9ea
ET
3053 u32 reg_addr;
3054 u32 val;
3fcaf2e5 3055 u32 aeu_mask;
a2fbb9ea
ET
3056
3057 /* need to take HW lock because MCP or other port might also
3058 try to handle this event */
4a37fb66 3059 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3060
3061 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3062 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3063 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3064 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3065 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3066 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3067
3068 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3069 if (deasserted & (1 << index)) {
3070 group_mask = bp->attn_group[index];
3071
34f80b04
EG
3072 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3073 index, group_mask.sig[0], group_mask.sig[1],
3074 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3075
877e9aa4
ET
3076 bnx2x_attn_int_deasserted3(bp,
3077 attn.sig[3] & group_mask.sig[3]);
3078 bnx2x_attn_int_deasserted1(bp,
3079 attn.sig[1] & group_mask.sig[1]);
3080 bnx2x_attn_int_deasserted2(bp,
3081 attn.sig[2] & group_mask.sig[2]);
3082 bnx2x_attn_int_deasserted0(bp,
3083 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3084
a2fbb9ea
ET
3085 if ((attn.sig[0] & group_mask.sig[0] &
3086 HW_PRTY_ASSERT_SET_0) ||
3087 (attn.sig[1] & group_mask.sig[1] &
3088 HW_PRTY_ASSERT_SET_1) ||
3089 (attn.sig[2] & group_mask.sig[2] &
3090 HW_PRTY_ASSERT_SET_2))
6378c025 3091 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3092 }
3093 }
3094
4a37fb66 3095 bnx2x_release_alr(bp);
a2fbb9ea 3096
5c862848 3097 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3098
3099 val = ~deasserted;
3fcaf2e5
EG
3100 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3101 val, reg_addr);
5c862848 3102 REG_WR(bp, reg_addr, val);
a2fbb9ea 3103
a2fbb9ea 3104 if (~bp->attn_state & deasserted)
3fcaf2e5 3105 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3106
3107 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3108 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3109
3fcaf2e5
EG
3110 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3111 aeu_mask = REG_RD(bp, reg_addr);
3112
3113 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3114 aeu_mask, deasserted);
3115 aeu_mask |= (deasserted & 0xff);
3116 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3117
3fcaf2e5
EG
3118 REG_WR(bp, reg_addr, aeu_mask);
3119 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3120
3121 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3122 bp->attn_state &= ~deasserted;
3123 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3124}
3125
3126static void bnx2x_attn_int(struct bnx2x *bp)
3127{
3128 /* read local copy of bits */
68d59484
EG
3129 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3130 attn_bits);
3131 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3132 attn_bits_ack);
a2fbb9ea
ET
3133 u32 attn_state = bp->attn_state;
3134
3135 /* look for changed bits */
3136 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3137 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3138
3139 DP(NETIF_MSG_HW,
3140 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3141 attn_bits, attn_ack, asserted, deasserted);
3142
3143 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3144 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3145
3146 /* handle bits that were raised */
3147 if (asserted)
3148 bnx2x_attn_int_asserted(bp, asserted);
3149
3150 if (deasserted)
3151 bnx2x_attn_int_deasserted(bp, deasserted);
3152}
3153
3154static void bnx2x_sp_task(struct work_struct *work)
3155{
1cf167f2 3156 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3157 u16 status;
3158
34f80b04 3159
a2fbb9ea
ET
3160 /* Return here if interrupt is disabled */
3161 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3162 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3163 return;
3164 }
3165
3166 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3167/* if (status == 0) */
3168/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3169
3196a88a 3170 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3171
877e9aa4
ET
3172 /* HW attentions */
3173 if (status & 0x1)
a2fbb9ea 3174 bnx2x_attn_int(bp);
a2fbb9ea 3175
68d59484 3176 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3177 IGU_INT_NOP, 1);
3178 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3179 IGU_INT_NOP, 1);
3180 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3181 IGU_INT_NOP, 1);
3182 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3183 IGU_INT_NOP, 1);
3184 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3185 IGU_INT_ENABLE, 1);
877e9aa4 3186
a2fbb9ea
ET
3187}
3188
3189static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3190{
3191 struct net_device *dev = dev_instance;
3192 struct bnx2x *bp = netdev_priv(dev);
3193
3194 /* Return here if interrupt is disabled */
3195 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3196 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3197 return IRQ_HANDLED;
3198 }
3199
8d9c5f34 3200 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3201
3202#ifdef BNX2X_STOP_ON_ERROR
3203 if (unlikely(bp->panic))
3204 return IRQ_HANDLED;
3205#endif
3206
1cf167f2 3207 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3208
3209 return IRQ_HANDLED;
3210}
3211
3212/* end of slow path */
3213
3214/* Statistics */
3215
3216/****************************************************************************
3217* Macros
3218****************************************************************************/
3219
a2fbb9ea
ET
3220/* sum[hi:lo] += add[hi:lo] */
3221#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3222 do { \
3223 s_lo += a_lo; \
f5ba6772 3224 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3225 } while (0)
3226
3227/* difference = minuend - subtrahend */
3228#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3229 do { \
bb2a0f7a
YG
3230 if (m_lo < s_lo) { \
3231 /* underflow */ \
a2fbb9ea 3232 d_hi = m_hi - s_hi; \
bb2a0f7a 3233 if (d_hi > 0) { \
6378c025 3234 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3235 d_hi--; \
3236 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3237 } else { \
6378c025 3238 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3239 d_hi = 0; \
3240 d_lo = 0; \
3241 } \
bb2a0f7a
YG
3242 } else { \
3243 /* m_lo >= s_lo */ \
a2fbb9ea 3244 if (m_hi < s_hi) { \
bb2a0f7a
YG
3245 d_hi = 0; \
3246 d_lo = 0; \
3247 } else { \
6378c025 3248 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3249 d_hi = m_hi - s_hi; \
3250 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3251 } \
3252 } \
3253 } while (0)
3254
bb2a0f7a 3255#define UPDATE_STAT64(s, t) \
a2fbb9ea 3256 do { \
bb2a0f7a
YG
3257 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3258 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3259 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3260 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3261 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3262 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3263 } while (0)
3264
bb2a0f7a 3265#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3266 do { \
bb2a0f7a
YG
3267 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3268 diff.lo, new->s##_lo, old->s##_lo); \
3269 ADD_64(estats->t##_hi, diff.hi, \
3270 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3271 } while (0)
3272
3273/* sum[hi:lo] += add */
3274#define ADD_EXTEND_64(s_hi, s_lo, a) \
3275 do { \
3276 s_lo += a; \
3277 s_hi += (s_lo < a) ? 1 : 0; \
3278 } while (0)
3279
bb2a0f7a 3280#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3281 do { \
bb2a0f7a
YG
3282 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3283 pstats->mac_stx[1].s##_lo, \
3284 new->s); \
a2fbb9ea
ET
3285 } while (0)
3286
bb2a0f7a 3287#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3288 do { \
4781bfad
EG
3289 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3290 old_tclient->s = tclient->s; \
de832a55
EG
3291 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3292 } while (0)
3293
3294#define UPDATE_EXTEND_USTAT(s, t) \
3295 do { \
3296 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3297 old_uclient->s = uclient->s; \
3298 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3299 } while (0)
3300
3301#define UPDATE_EXTEND_XSTAT(s, t) \
3302 do { \
4781bfad
EG
3303 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3304 old_xclient->s = xclient->s; \
de832a55
EG
3305 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3306 } while (0)
3307
3308/* minuend -= subtrahend */
3309#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3310 do { \
3311 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3312 } while (0)
3313
3314/* minuend[hi:lo] -= subtrahend */
3315#define SUB_EXTEND_64(m_hi, m_lo, s) \
3316 do { \
3317 SUB_64(m_hi, 0, m_lo, s); \
3318 } while (0)
3319
3320#define SUB_EXTEND_USTAT(s, t) \
3321 do { \
3322 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3323 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3324 } while (0)
3325
3326/*
3327 * General service functions
3328 */
3329
3330static inline long bnx2x_hilo(u32 *hiref)
3331{
3332 u32 lo = *(hiref + 1);
3333#if (BITS_PER_LONG == 64)
3334 u32 hi = *hiref;
3335
3336 return HILO_U64(hi, lo);
3337#else
3338 return lo;
3339#endif
3340}
3341
3342/*
3343 * Init service functions
3344 */
3345
bb2a0f7a
YG
3346static void bnx2x_storm_stats_post(struct bnx2x *bp)
3347{
3348 if (!bp->stats_pending) {
3349 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3350 int i, rc;
bb2a0f7a
YG
3351
3352 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3353 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3354 for_each_queue(bp, i)
3355 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3356
3357 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3358 ((u32 *)&ramrod_data)[1],
3359 ((u32 *)&ramrod_data)[0], 0);
3360 if (rc == 0) {
3361 /* stats ramrod has it's own slot on the spq */
3362 bp->spq_left++;
3363 bp->stats_pending = 1;
3364 }
3365 }
3366}
3367
3368static void bnx2x_stats_init(struct bnx2x *bp)
3369{
3370 int port = BP_PORT(bp);
de832a55 3371 int i;
bb2a0f7a 3372
de832a55 3373 bp->stats_pending = 0;
bb2a0f7a
YG
3374 bp->executer_idx = 0;
3375 bp->stats_counter = 0;
3376
3377 /* port stats */
3378 if (!BP_NOMCP(bp))
3379 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3380 else
3381 bp->port.port_stx = 0;
3382 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3383
3384 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3385 bp->port.old_nig_stats.brb_discard =
3386 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3387 bp->port.old_nig_stats.brb_truncate =
3388 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3389 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3390 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3391 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3392 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3393
3394 /* function stats */
de832a55
EG
3395 for_each_queue(bp, i) {
3396 struct bnx2x_fastpath *fp = &bp->fp[i];
3397
3398 memset(&fp->old_tclient, 0,
3399 sizeof(struct tstorm_per_client_stats));
3400 memset(&fp->old_uclient, 0,
3401 sizeof(struct ustorm_per_client_stats));
3402 memset(&fp->old_xclient, 0,
3403 sizeof(struct xstorm_per_client_stats));
3404 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3405 }
3406
bb2a0f7a 3407 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3408 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3409
3410 bp->stats_state = STATS_STATE_DISABLED;
3411 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3412 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3413}
3414
3415static void bnx2x_hw_stats_post(struct bnx2x *bp)
3416{
3417 struct dmae_command *dmae = &bp->stats_dmae;
3418 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3419
3420 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3421 if (CHIP_REV_IS_SLOW(bp))
3422 return;
bb2a0f7a
YG
3423
3424 /* loader */
3425 if (bp->executer_idx) {
3426 int loader_idx = PMF_DMAE_C(bp);
3427
3428 memset(dmae, 0, sizeof(struct dmae_command));
3429
3430 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3431 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3432 DMAE_CMD_DST_RESET |
3433#ifdef __BIG_ENDIAN
3434 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3435#else
3436 DMAE_CMD_ENDIANITY_DW_SWAP |
3437#endif
3438 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3439 DMAE_CMD_PORT_0) |
3440 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3441 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3442 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3443 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3444 sizeof(struct dmae_command) *
3445 (loader_idx + 1)) >> 2;
3446 dmae->dst_addr_hi = 0;
3447 dmae->len = sizeof(struct dmae_command) >> 2;
3448 if (CHIP_IS_E1(bp))
3449 dmae->len--;
3450 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3451 dmae->comp_addr_hi = 0;
3452 dmae->comp_val = 1;
3453
3454 *stats_comp = 0;
3455 bnx2x_post_dmae(bp, dmae, loader_idx);
3456
3457 } else if (bp->func_stx) {
3458 *stats_comp = 0;
3459 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3460 }
3461}
3462
3463static int bnx2x_stats_comp(struct bnx2x *bp)
3464{
3465 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3466 int cnt = 10;
3467
3468 might_sleep();
3469 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3470 if (!cnt) {
3471 BNX2X_ERR("timeout waiting for stats finished\n");
3472 break;
3473 }
3474 cnt--;
12469401 3475 msleep(1);
bb2a0f7a
YG
3476 }
3477 return 1;
3478}
3479
3480/*
3481 * Statistics service functions
3482 */
3483
3484static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3485{
3486 struct dmae_command *dmae;
3487 u32 opcode;
3488 int loader_idx = PMF_DMAE_C(bp);
3489 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3490
3491 /* sanity */
3492 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3493 BNX2X_ERR("BUG!\n");
3494 return;
3495 }
3496
3497 bp->executer_idx = 0;
3498
3499 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3500 DMAE_CMD_C_ENABLE |
3501 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3502#ifdef __BIG_ENDIAN
3503 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3504#else
3505 DMAE_CMD_ENDIANITY_DW_SWAP |
3506#endif
3507 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3508 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3509
3510 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3511 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3512 dmae->src_addr_lo = bp->port.port_stx >> 2;
3513 dmae->src_addr_hi = 0;
3514 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3515 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3516 dmae->len = DMAE_LEN32_RD_MAX;
3517 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3518 dmae->comp_addr_hi = 0;
3519 dmae->comp_val = 1;
3520
3521 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3522 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3523 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3524 dmae->src_addr_hi = 0;
7a9b2557
VZ
3525 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3526 DMAE_LEN32_RD_MAX * 4);
3527 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3528 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3529 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3530 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3531 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3532 dmae->comp_val = DMAE_COMP_VAL;
3533
3534 *stats_comp = 0;
3535 bnx2x_hw_stats_post(bp);
3536 bnx2x_stats_comp(bp);
3537}
3538
3539static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3540{
3541 struct dmae_command *dmae;
34f80b04 3542 int port = BP_PORT(bp);
bb2a0f7a 3543 int vn = BP_E1HVN(bp);
a2fbb9ea 3544 u32 opcode;
bb2a0f7a 3545 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3546 u32 mac_addr;
bb2a0f7a
YG
3547 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3548
3549 /* sanity */
3550 if (!bp->link_vars.link_up || !bp->port.pmf) {
3551 BNX2X_ERR("BUG!\n");
3552 return;
3553 }
a2fbb9ea
ET
3554
3555 bp->executer_idx = 0;
bb2a0f7a
YG
3556
3557 /* MCP */
3558 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3559 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3560 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3561#ifdef __BIG_ENDIAN
bb2a0f7a 3562 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3563#else
bb2a0f7a 3564 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3565#endif
bb2a0f7a
YG
3566 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3567 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3568
bb2a0f7a 3569 if (bp->port.port_stx) {
a2fbb9ea
ET
3570
3571 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3572 dmae->opcode = opcode;
bb2a0f7a
YG
3573 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3574 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3575 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3576 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3577 dmae->len = sizeof(struct host_port_stats) >> 2;
3578 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3579 dmae->comp_addr_hi = 0;
3580 dmae->comp_val = 1;
a2fbb9ea
ET
3581 }
3582
bb2a0f7a
YG
3583 if (bp->func_stx) {
3584
3585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586 dmae->opcode = opcode;
3587 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3588 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3589 dmae->dst_addr_lo = bp->func_stx >> 2;
3590 dmae->dst_addr_hi = 0;
3591 dmae->len = sizeof(struct host_func_stats) >> 2;
3592 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593 dmae->comp_addr_hi = 0;
3594 dmae->comp_val = 1;
a2fbb9ea
ET
3595 }
3596
bb2a0f7a 3597 /* MAC */
a2fbb9ea
ET
3598 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3599 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3600 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3601#ifdef __BIG_ENDIAN
3602 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3603#else
3604 DMAE_CMD_ENDIANITY_DW_SWAP |
3605#endif
bb2a0f7a
YG
3606 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3607 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3608
c18487ee 3609 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3610
3611 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3612 NIG_REG_INGRESS_BMAC0_MEM);
3613
3614 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3615 BIGMAC_REGISTER_TX_STAT_GTBYT */
3616 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617 dmae->opcode = opcode;
3618 dmae->src_addr_lo = (mac_addr +
3619 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3620 dmae->src_addr_hi = 0;
3621 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3622 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3623 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3624 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3625 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3626 dmae->comp_addr_hi = 0;
3627 dmae->comp_val = 1;
3628
3629 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3630 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3631 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3632 dmae->opcode = opcode;
3633 dmae->src_addr_lo = (mac_addr +
3634 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3635 dmae->src_addr_hi = 0;
3636 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3637 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3638 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3639 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3640 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3641 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3642 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3643 dmae->comp_addr_hi = 0;
3644 dmae->comp_val = 1;
3645
c18487ee 3646 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3647
3648 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3649
3650 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3651 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3652 dmae->opcode = opcode;
3653 dmae->src_addr_lo = (mac_addr +
3654 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3655 dmae->src_addr_hi = 0;
3656 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3657 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3658 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3659 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3660 dmae->comp_addr_hi = 0;
3661 dmae->comp_val = 1;
3662
3663 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3664 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3665 dmae->opcode = opcode;
3666 dmae->src_addr_lo = (mac_addr +
3667 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3668 dmae->src_addr_hi = 0;
3669 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3670 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3671 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3672 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3673 dmae->len = 1;
3674 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3675 dmae->comp_addr_hi = 0;
3676 dmae->comp_val = 1;
3677
3678 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3679 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3680 dmae->opcode = opcode;
3681 dmae->src_addr_lo = (mac_addr +
3682 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3683 dmae->src_addr_hi = 0;
3684 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3685 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3686 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3687 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3688 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3689 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3690 dmae->comp_addr_hi = 0;
3691 dmae->comp_val = 1;
3692 }
3693
3694 /* NIG */
bb2a0f7a
YG
3695 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3696 dmae->opcode = opcode;
3697 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3698 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3699 dmae->src_addr_hi = 0;
3700 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3701 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3702 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3703 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704 dmae->comp_addr_hi = 0;
3705 dmae->comp_val = 1;
3706
3707 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3708 dmae->opcode = opcode;
3709 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3710 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3711 dmae->src_addr_hi = 0;
3712 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3713 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3714 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3715 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3716 dmae->len = (2*sizeof(u32)) >> 2;
3717 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718 dmae->comp_addr_hi = 0;
3719 dmae->comp_val = 1;
3720
a2fbb9ea
ET
3721 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3723 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3724 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3725#ifdef __BIG_ENDIAN
3726 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3727#else
3728 DMAE_CMD_ENDIANITY_DW_SWAP |
3729#endif
bb2a0f7a
YG
3730 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3731 (vn << DMAE_CMD_E1HVN_SHIFT));
3732 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3733 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3734 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3735 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3736 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3737 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3738 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3739 dmae->len = (2*sizeof(u32)) >> 2;
3740 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3741 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3742 dmae->comp_val = DMAE_COMP_VAL;
3743
3744 *stats_comp = 0;
a2fbb9ea
ET
3745}
3746
bb2a0f7a 3747static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3748{
bb2a0f7a
YG
3749 struct dmae_command *dmae = &bp->stats_dmae;
3750 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3751
bb2a0f7a
YG
3752 /* sanity */
3753 if (!bp->func_stx) {
3754 BNX2X_ERR("BUG!\n");
3755 return;
3756 }
a2fbb9ea 3757
bb2a0f7a
YG
3758 bp->executer_idx = 0;
3759 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3760
bb2a0f7a
YG
3761 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3762 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3763 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3764#ifdef __BIG_ENDIAN
3765 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3766#else
3767 DMAE_CMD_ENDIANITY_DW_SWAP |
3768#endif
3769 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3770 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3771 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3772 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3773 dmae->dst_addr_lo = bp->func_stx >> 2;
3774 dmae->dst_addr_hi = 0;
3775 dmae->len = sizeof(struct host_func_stats) >> 2;
3776 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3777 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3778 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3779
bb2a0f7a
YG
3780 *stats_comp = 0;
3781}
a2fbb9ea 3782
bb2a0f7a
YG
3783static void bnx2x_stats_start(struct bnx2x *bp)
3784{
3785 if (bp->port.pmf)
3786 bnx2x_port_stats_init(bp);
3787
3788 else if (bp->func_stx)
3789 bnx2x_func_stats_init(bp);
3790
3791 bnx2x_hw_stats_post(bp);
3792 bnx2x_storm_stats_post(bp);
3793}
3794
3795static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3796{
3797 bnx2x_stats_comp(bp);
3798 bnx2x_stats_pmf_update(bp);
3799 bnx2x_stats_start(bp);
3800}
3801
3802static void bnx2x_stats_restart(struct bnx2x *bp)
3803{
3804 bnx2x_stats_comp(bp);
3805 bnx2x_stats_start(bp);
3806}
3807
3808static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3809{
3810 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3811 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3812 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3813 struct {
3814 u32 lo;
3815 u32 hi;
3816 } diff;
bb2a0f7a
YG
3817
3818 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3819 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3820 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3821 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3822 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3823 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3824 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3825 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3826 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3827 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3828 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3829 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3830 UPDATE_STAT64(tx_stat_gt127,
3831 tx_stat_etherstatspkts65octetsto127octets);
3832 UPDATE_STAT64(tx_stat_gt255,
3833 tx_stat_etherstatspkts128octetsto255octets);
3834 UPDATE_STAT64(tx_stat_gt511,
3835 tx_stat_etherstatspkts256octetsto511octets);
3836 UPDATE_STAT64(tx_stat_gt1023,
3837 tx_stat_etherstatspkts512octetsto1023octets);
3838 UPDATE_STAT64(tx_stat_gt1518,
3839 tx_stat_etherstatspkts1024octetsto1522octets);
3840 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3841 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3842 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3843 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3844 UPDATE_STAT64(tx_stat_gterr,
3845 tx_stat_dot3statsinternalmactransmiterrors);
3846 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3847
3848 estats->pause_frames_received_hi =
3849 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3850 estats->pause_frames_received_lo =
3851 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3852
3853 estats->pause_frames_sent_hi =
3854 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3855 estats->pause_frames_sent_lo =
3856 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3857}
3858
3859static void bnx2x_emac_stats_update(struct bnx2x *bp)
3860{
3861 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3862 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3863 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3864
3865 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3866 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3867 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3868 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3869 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3870 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3871 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3872 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3873 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3874 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3875 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3876 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3877 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3878 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3879 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3880 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3881 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3882 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3883 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3884 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3885 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3886 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3887 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3888 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3889 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3890 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3891 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3892 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3893 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3894 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3895 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3896
3897 estats->pause_frames_received_hi =
3898 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3899 estats->pause_frames_received_lo =
3900 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3901 ADD_64(estats->pause_frames_received_hi,
3902 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3903 estats->pause_frames_received_lo,
3904 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3905
3906 estats->pause_frames_sent_hi =
3907 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3908 estats->pause_frames_sent_lo =
3909 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3910 ADD_64(estats->pause_frames_sent_hi,
3911 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3912 estats->pause_frames_sent_lo,
3913 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3914}
3915
3916static int bnx2x_hw_stats_update(struct bnx2x *bp)
3917{
3918 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3919 struct nig_stats *old = &(bp->port.old_nig_stats);
3920 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3921 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3922 struct {
3923 u32 lo;
3924 u32 hi;
3925 } diff;
de832a55 3926 u32 nig_timer_max;
bb2a0f7a
YG
3927
3928 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3929 bnx2x_bmac_stats_update(bp);
3930
3931 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3932 bnx2x_emac_stats_update(bp);
3933
3934 else { /* unreached */
c3eefaf6 3935 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3936 return -1;
3937 }
a2fbb9ea 3938
bb2a0f7a
YG
3939 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3940 new->brb_discard - old->brb_discard);
66e855f3
YG
3941 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3942 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3943
bb2a0f7a
YG
3944 UPDATE_STAT64_NIG(egress_mac_pkt0,
3945 etherstatspkts1024octetsto1522octets);
3946 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3947
bb2a0f7a 3948 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3949
bb2a0f7a
YG
3950 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3951 sizeof(struct mac_stx));
3952 estats->brb_drop_hi = pstats->brb_drop_hi;
3953 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3954
bb2a0f7a 3955 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3956
de832a55
EG
3957 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3958 if (nig_timer_max != estats->nig_timer_max) {
3959 estats->nig_timer_max = nig_timer_max;
3960 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3961 }
3962
bb2a0f7a 3963 return 0;
a2fbb9ea
ET
3964}
3965
bb2a0f7a 3966static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3967{
3968 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3969 struct tstorm_per_port_stats *tport =
de832a55 3970 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3971 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3972 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3973 int i;
3974
3975 memset(&(fstats->total_bytes_received_hi), 0,
3976 sizeof(struct host_func_stats) - 2*sizeof(u32));
3977 estats->error_bytes_received_hi = 0;
3978 estats->error_bytes_received_lo = 0;
3979 estats->etherstatsoverrsizepkts_hi = 0;
3980 estats->etherstatsoverrsizepkts_lo = 0;
3981 estats->no_buff_discard_hi = 0;
3982 estats->no_buff_discard_lo = 0;
a2fbb9ea 3983
ca00392c 3984 for_each_rx_queue(bp, i) {
de832a55
EG
3985 struct bnx2x_fastpath *fp = &bp->fp[i];
3986 int cl_id = fp->cl_id;
3987 struct tstorm_per_client_stats *tclient =
3988 &stats->tstorm_common.client_statistics[cl_id];
3989 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3990 struct ustorm_per_client_stats *uclient =
3991 &stats->ustorm_common.client_statistics[cl_id];
3992 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3993 struct xstorm_per_client_stats *xclient =
3994 &stats->xstorm_common.client_statistics[cl_id];
3995 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3996 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3997 u32 diff;
3998
3999 /* are storm stats valid? */
4000 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4001 bp->stats_counter) {
de832a55
EG
4002 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4003 " xstorm counter (%d) != stats_counter (%d)\n",
4004 i, xclient->stats_counter, bp->stats_counter);
4005 return -1;
4006 }
4007 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4008 bp->stats_counter) {
de832a55
EG
4009 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4010 " tstorm counter (%d) != stats_counter (%d)\n",
4011 i, tclient->stats_counter, bp->stats_counter);
4012 return -2;
4013 }
4014 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4015 bp->stats_counter) {
4016 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4017 " ustorm counter (%d) != stats_counter (%d)\n",
4018 i, uclient->stats_counter, bp->stats_counter);
4019 return -4;
4020 }
a2fbb9ea 4021
de832a55 4022 qstats->total_bytes_received_hi =
ca00392c 4023 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4024 qstats->total_bytes_received_lo =
ca00392c
EG
4025 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4026
4027 ADD_64(qstats->total_bytes_received_hi,
4028 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4029 qstats->total_bytes_received_lo,
4030 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4031
4032 ADD_64(qstats->total_bytes_received_hi,
4033 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4034 qstats->total_bytes_received_lo,
4035 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4036
4037 qstats->valid_bytes_received_hi =
4038 qstats->total_bytes_received_hi;
de832a55 4039 qstats->valid_bytes_received_lo =
ca00392c 4040 qstats->total_bytes_received_lo;
bb2a0f7a 4041
de832a55 4042 qstats->error_bytes_received_hi =
bb2a0f7a 4043 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4044 qstats->error_bytes_received_lo =
bb2a0f7a 4045 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4046
de832a55
EG
4047 ADD_64(qstats->total_bytes_received_hi,
4048 qstats->error_bytes_received_hi,
4049 qstats->total_bytes_received_lo,
4050 qstats->error_bytes_received_lo);
4051
4052 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4053 total_unicast_packets_received);
4054 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4055 total_multicast_packets_received);
4056 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4057 total_broadcast_packets_received);
4058 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4059 etherstatsoverrsizepkts);
4060 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4061
4062 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4063 total_unicast_packets_received);
4064 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4065 total_multicast_packets_received);
4066 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4067 total_broadcast_packets_received);
4068 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4069 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4070 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4071
4072 qstats->total_bytes_transmitted_hi =
ca00392c 4073 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4074 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4075 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4076
4077 ADD_64(qstats->total_bytes_transmitted_hi,
4078 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4079 qstats->total_bytes_transmitted_lo,
4080 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4081
4082 ADD_64(qstats->total_bytes_transmitted_hi,
4083 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4084 qstats->total_bytes_transmitted_lo,
4085 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4086
de832a55
EG
4087 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4088 total_unicast_packets_transmitted);
4089 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4090 total_multicast_packets_transmitted);
4091 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4092 total_broadcast_packets_transmitted);
4093
4094 old_tclient->checksum_discard = tclient->checksum_discard;
4095 old_tclient->ttl0_discard = tclient->ttl0_discard;
4096
4097 ADD_64(fstats->total_bytes_received_hi,
4098 qstats->total_bytes_received_hi,
4099 fstats->total_bytes_received_lo,
4100 qstats->total_bytes_received_lo);
4101 ADD_64(fstats->total_bytes_transmitted_hi,
4102 qstats->total_bytes_transmitted_hi,
4103 fstats->total_bytes_transmitted_lo,
4104 qstats->total_bytes_transmitted_lo);
4105 ADD_64(fstats->total_unicast_packets_received_hi,
4106 qstats->total_unicast_packets_received_hi,
4107 fstats->total_unicast_packets_received_lo,
4108 qstats->total_unicast_packets_received_lo);
4109 ADD_64(fstats->total_multicast_packets_received_hi,
4110 qstats->total_multicast_packets_received_hi,
4111 fstats->total_multicast_packets_received_lo,
4112 qstats->total_multicast_packets_received_lo);
4113 ADD_64(fstats->total_broadcast_packets_received_hi,
4114 qstats->total_broadcast_packets_received_hi,
4115 fstats->total_broadcast_packets_received_lo,
4116 qstats->total_broadcast_packets_received_lo);
4117 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4118 qstats->total_unicast_packets_transmitted_hi,
4119 fstats->total_unicast_packets_transmitted_lo,
4120 qstats->total_unicast_packets_transmitted_lo);
4121 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4122 qstats->total_multicast_packets_transmitted_hi,
4123 fstats->total_multicast_packets_transmitted_lo,
4124 qstats->total_multicast_packets_transmitted_lo);
4125 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4126 qstats->total_broadcast_packets_transmitted_hi,
4127 fstats->total_broadcast_packets_transmitted_lo,
4128 qstats->total_broadcast_packets_transmitted_lo);
4129 ADD_64(fstats->valid_bytes_received_hi,
4130 qstats->valid_bytes_received_hi,
4131 fstats->valid_bytes_received_lo,
4132 qstats->valid_bytes_received_lo);
4133
4134 ADD_64(estats->error_bytes_received_hi,
4135 qstats->error_bytes_received_hi,
4136 estats->error_bytes_received_lo,
4137 qstats->error_bytes_received_lo);
4138 ADD_64(estats->etherstatsoverrsizepkts_hi,
4139 qstats->etherstatsoverrsizepkts_hi,
4140 estats->etherstatsoverrsizepkts_lo,
4141 qstats->etherstatsoverrsizepkts_lo);
4142 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4143 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4144 }
4145
4146 ADD_64(fstats->total_bytes_received_hi,
4147 estats->rx_stat_ifhcinbadoctets_hi,
4148 fstats->total_bytes_received_lo,
4149 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4150
4151 memcpy(estats, &(fstats->total_bytes_received_hi),
4152 sizeof(struct host_func_stats) - 2*sizeof(u32));
4153
de832a55
EG
4154 ADD_64(estats->etherstatsoverrsizepkts_hi,
4155 estats->rx_stat_dot3statsframestoolong_hi,
4156 estats->etherstatsoverrsizepkts_lo,
4157 estats->rx_stat_dot3statsframestoolong_lo);
4158 ADD_64(estats->error_bytes_received_hi,
4159 estats->rx_stat_ifhcinbadoctets_hi,
4160 estats->error_bytes_received_lo,
4161 estats->rx_stat_ifhcinbadoctets_lo);
4162
4163 if (bp->port.pmf) {
4164 estats->mac_filter_discard =
4165 le32_to_cpu(tport->mac_filter_discard);
4166 estats->xxoverflow_discard =
4167 le32_to_cpu(tport->xxoverflow_discard);
4168 estats->brb_truncate_discard =
bb2a0f7a 4169 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4170 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4171 }
bb2a0f7a
YG
4172
4173 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4174
de832a55
EG
4175 bp->stats_pending = 0;
4176
a2fbb9ea
ET
4177 return 0;
4178}
4179
bb2a0f7a 4180static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4181{
bb2a0f7a 4182 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4183 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4184 int i;
a2fbb9ea
ET
4185
4186 nstats->rx_packets =
4187 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4188 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4189 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4190
4191 nstats->tx_packets =
4192 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4193 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4194 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4195
de832a55 4196 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4197
0e39e645 4198 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4199
de832a55 4200 nstats->rx_dropped = estats->mac_discard;
ca00392c 4201 for_each_rx_queue(bp, i)
de832a55
EG
4202 nstats->rx_dropped +=
4203 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4204
a2fbb9ea
ET
4205 nstats->tx_dropped = 0;
4206
4207 nstats->multicast =
de832a55 4208 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4209
bb2a0f7a 4210 nstats->collisions =
de832a55 4211 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4212
4213 nstats->rx_length_errors =
de832a55
EG
4214 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4215 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4216 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4217 bnx2x_hilo(&estats->brb_truncate_hi);
4218 nstats->rx_crc_errors =
4219 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4220 nstats->rx_frame_errors =
4221 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4222 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4223 nstats->rx_missed_errors = estats->xxoverflow_discard;
4224
4225 nstats->rx_errors = nstats->rx_length_errors +
4226 nstats->rx_over_errors +
4227 nstats->rx_crc_errors +
4228 nstats->rx_frame_errors +
0e39e645
ET
4229 nstats->rx_fifo_errors +
4230 nstats->rx_missed_errors;
a2fbb9ea 4231
bb2a0f7a 4232 nstats->tx_aborted_errors =
de832a55
EG
4233 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4234 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4235 nstats->tx_carrier_errors =
4236 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4237 nstats->tx_fifo_errors = 0;
4238 nstats->tx_heartbeat_errors = 0;
4239 nstats->tx_window_errors = 0;
4240
4241 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4242 nstats->tx_carrier_errors +
4243 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4244}
4245
4246static void bnx2x_drv_stats_update(struct bnx2x *bp)
4247{
4248 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4249 int i;
4250
4251 estats->driver_xoff = 0;
4252 estats->rx_err_discard_pkt = 0;
4253 estats->rx_skb_alloc_failed = 0;
4254 estats->hw_csum_err = 0;
ca00392c 4255 for_each_rx_queue(bp, i) {
de832a55
EG
4256 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4257
4258 estats->driver_xoff += qstats->driver_xoff;
4259 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4260 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4261 estats->hw_csum_err += qstats->hw_csum_err;
4262 }
a2fbb9ea
ET
4263}
4264
bb2a0f7a 4265static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4266{
bb2a0f7a 4267 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4268
bb2a0f7a
YG
4269 if (*stats_comp != DMAE_COMP_VAL)
4270 return;
4271
4272 if (bp->port.pmf)
de832a55 4273 bnx2x_hw_stats_update(bp);
a2fbb9ea 4274
de832a55
EG
4275 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4276 BNX2X_ERR("storm stats were not updated for 3 times\n");
4277 bnx2x_panic();
4278 return;
a2fbb9ea
ET
4279 }
4280
de832a55
EG
4281 bnx2x_net_stats_update(bp);
4282 bnx2x_drv_stats_update(bp);
4283
a2fbb9ea 4284 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4285 struct bnx2x_fastpath *fp0_rx = bp->fp;
4286 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4287 struct tstorm_per_client_stats *old_tclient =
4288 &bp->fp->old_tclient;
4289 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4290 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4291 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4292 int i;
a2fbb9ea
ET
4293
4294 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4295 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4296 " tx pkt (%lx)\n",
ca00392c
EG
4297 bnx2x_tx_avail(fp0_tx),
4298 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4299 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4300 " rx pkt (%lx)\n",
ca00392c
EG
4301 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4302 fp0_rx->rx_comp_cons),
4303 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4304 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4305 "brb truncate %u\n",
4306 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4307 qstats->driver_xoff,
4308 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4309 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4310 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4311 "mac_discard %u mac_filter_discard %u "
4312 "xxovrflow_discard %u brb_truncate_discard %u "
4313 "ttl0_discard %u\n",
4781bfad 4314 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4315 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4316 bnx2x_hilo(&qstats->no_buff_discard_hi),
4317 estats->mac_discard, estats->mac_filter_discard,
4318 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4319 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4320
4321 for_each_queue(bp, i) {
4322 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4323 bnx2x_fp(bp, i, tx_pkt),
4324 bnx2x_fp(bp, i, rx_pkt),
4325 bnx2x_fp(bp, i, rx_calls));
4326 }
4327 }
4328
bb2a0f7a
YG
4329 bnx2x_hw_stats_post(bp);
4330 bnx2x_storm_stats_post(bp);
4331}
a2fbb9ea 4332
bb2a0f7a
YG
4333static void bnx2x_port_stats_stop(struct bnx2x *bp)
4334{
4335 struct dmae_command *dmae;
4336 u32 opcode;
4337 int loader_idx = PMF_DMAE_C(bp);
4338 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4339
bb2a0f7a 4340 bp->executer_idx = 0;
a2fbb9ea 4341
bb2a0f7a
YG
4342 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4343 DMAE_CMD_C_ENABLE |
4344 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4345#ifdef __BIG_ENDIAN
bb2a0f7a 4346 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4347#else
bb2a0f7a 4348 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4349#endif
bb2a0f7a
YG
4350 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4351 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4352
4353 if (bp->port.port_stx) {
4354
4355 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4356 if (bp->func_stx)
4357 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4358 else
4359 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4360 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4361 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4362 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4363 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4364 dmae->len = sizeof(struct host_port_stats) >> 2;
4365 if (bp->func_stx) {
4366 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4367 dmae->comp_addr_hi = 0;
4368 dmae->comp_val = 1;
4369 } else {
4370 dmae->comp_addr_lo =
4371 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4372 dmae->comp_addr_hi =
4373 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4374 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4375
bb2a0f7a
YG
4376 *stats_comp = 0;
4377 }
a2fbb9ea
ET
4378 }
4379
bb2a0f7a
YG
4380 if (bp->func_stx) {
4381
4382 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4383 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4384 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4385 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4386 dmae->dst_addr_lo = bp->func_stx >> 2;
4387 dmae->dst_addr_hi = 0;
4388 dmae->len = sizeof(struct host_func_stats) >> 2;
4389 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4390 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4391 dmae->comp_val = DMAE_COMP_VAL;
4392
4393 *stats_comp = 0;
a2fbb9ea 4394 }
bb2a0f7a
YG
4395}
4396
4397static void bnx2x_stats_stop(struct bnx2x *bp)
4398{
4399 int update = 0;
4400
4401 bnx2x_stats_comp(bp);
4402
4403 if (bp->port.pmf)
4404 update = (bnx2x_hw_stats_update(bp) == 0);
4405
4406 update |= (bnx2x_storm_stats_update(bp) == 0);
4407
4408 if (update) {
4409 bnx2x_net_stats_update(bp);
a2fbb9ea 4410
bb2a0f7a
YG
4411 if (bp->port.pmf)
4412 bnx2x_port_stats_stop(bp);
4413
4414 bnx2x_hw_stats_post(bp);
4415 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4416 }
4417}
4418
bb2a0f7a
YG
4419static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4420{
4421}
4422
4423static const struct {
4424 void (*action)(struct bnx2x *bp);
4425 enum bnx2x_stats_state next_state;
4426} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4427/* state event */
4428{
4429/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4430/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4431/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4432/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4433},
4434{
4435/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4436/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4437/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4438/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4439}
4440};
4441
4442static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4443{
4444 enum bnx2x_stats_state state = bp->stats_state;
4445
4446 bnx2x_stats_stm[state][event].action(bp);
4447 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4448
4449 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4450 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4451 state, event, bp->stats_state);
4452}
4453
a2fbb9ea
ET
4454static void bnx2x_timer(unsigned long data)
4455{
4456 struct bnx2x *bp = (struct bnx2x *) data;
4457
4458 if (!netif_running(bp->dev))
4459 return;
4460
4461 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4462 goto timer_restart;
a2fbb9ea
ET
4463
4464 if (poll) {
4465 struct bnx2x_fastpath *fp = &bp->fp[0];
4466 int rc;
4467
7961f791 4468 bnx2x_tx_int(fp);
a2fbb9ea
ET
4469 rc = bnx2x_rx_int(fp, 1000);
4470 }
4471
34f80b04
EG
4472 if (!BP_NOMCP(bp)) {
4473 int func = BP_FUNC(bp);
a2fbb9ea
ET
4474 u32 drv_pulse;
4475 u32 mcp_pulse;
4476
4477 ++bp->fw_drv_pulse_wr_seq;
4478 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4479 /* TBD - add SYSTEM_TIME */
4480 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4481 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4482
34f80b04 4483 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4484 MCP_PULSE_SEQ_MASK);
4485 /* The delta between driver pulse and mcp response
4486 * should be 1 (before mcp response) or 0 (after mcp response)
4487 */
4488 if ((drv_pulse != mcp_pulse) &&
4489 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4490 /* someone lost a heartbeat... */
4491 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4492 drv_pulse, mcp_pulse);
4493 }
4494 }
4495
bb2a0f7a
YG
4496 if ((bp->state == BNX2X_STATE_OPEN) ||
4497 (bp->state == BNX2X_STATE_DISABLED))
4498 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4499
f1410647 4500timer_restart:
a2fbb9ea
ET
4501 mod_timer(&bp->timer, jiffies + bp->current_interval);
4502}
4503
4504/* end of Statistics */
4505
4506/* nic init */
4507
4508/*
4509 * nic init service functions
4510 */
4511
34f80b04 4512static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4513{
34f80b04
EG
4514 int port = BP_PORT(bp);
4515
ca00392c
EG
4516 /* "CSTORM" */
4517 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4518 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4519 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4520 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4521 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4522 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4523}
4524
5c862848
EG
4525static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4526 dma_addr_t mapping, int sb_id)
34f80b04
EG
4527{
4528 int port = BP_PORT(bp);
bb2a0f7a 4529 int func = BP_FUNC(bp);
a2fbb9ea 4530 int index;
34f80b04 4531 u64 section;
a2fbb9ea
ET
4532
4533 /* USTORM */
4534 section = ((u64)mapping) + offsetof(struct host_status_block,
4535 u_status_block);
34f80b04 4536 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4537
ca00392c
EG
4538 REG_WR(bp, BAR_CSTRORM_INTMEM +
4539 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4540 REG_WR(bp, BAR_CSTRORM_INTMEM +
4541 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4542 U64_HI(section));
ca00392c
EG
4543 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4544 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4545
4546 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4547 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4548 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4549
4550 /* CSTORM */
4551 section = ((u64)mapping) + offsetof(struct host_status_block,
4552 c_status_block);
34f80b04 4553 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4554
4555 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4556 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4557 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4558 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4559 U64_HI(section));
7a9b2557 4560 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4561 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4562
4563 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4564 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4565 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4566
4567 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4568}
4569
4570static void bnx2x_zero_def_sb(struct bnx2x *bp)
4571{
4572 int func = BP_FUNC(bp);
a2fbb9ea 4573
ca00392c 4574 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4575 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4576 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4577 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4578 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4579 sizeof(struct cstorm_def_status_block_u)/4);
4580 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4581 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4582 sizeof(struct cstorm_def_status_block_c)/4);
4583 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4584 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4585 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4586}
4587
4588static void bnx2x_init_def_sb(struct bnx2x *bp,
4589 struct host_def_status_block *def_sb,
34f80b04 4590 dma_addr_t mapping, int sb_id)
a2fbb9ea 4591{
34f80b04
EG
4592 int port = BP_PORT(bp);
4593 int func = BP_FUNC(bp);
a2fbb9ea
ET
4594 int index, val, reg_offset;
4595 u64 section;
4596
4597 /* ATTN */
4598 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4599 atten_status_block);
34f80b04 4600 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4601
49d66772
ET
4602 bp->attn_state = 0;
4603
a2fbb9ea
ET
4604 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4605 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4606
34f80b04 4607 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4608 bp->attn_group[index].sig[0] = REG_RD(bp,
4609 reg_offset + 0x10*index);
4610 bp->attn_group[index].sig[1] = REG_RD(bp,
4611 reg_offset + 0x4 + 0x10*index);
4612 bp->attn_group[index].sig[2] = REG_RD(bp,
4613 reg_offset + 0x8 + 0x10*index);
4614 bp->attn_group[index].sig[3] = REG_RD(bp,
4615 reg_offset + 0xc + 0x10*index);
4616 }
4617
a2fbb9ea
ET
4618 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4619 HC_REG_ATTN_MSG0_ADDR_L);
4620
4621 REG_WR(bp, reg_offset, U64_LO(section));
4622 REG_WR(bp, reg_offset + 4, U64_HI(section));
4623
4624 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4625
4626 val = REG_RD(bp, reg_offset);
34f80b04 4627 val |= sb_id;
a2fbb9ea
ET
4628 REG_WR(bp, reg_offset, val);
4629
4630 /* USTORM */
4631 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4632 u_def_status_block);
34f80b04 4633 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4634
ca00392c
EG
4635 REG_WR(bp, BAR_CSTRORM_INTMEM +
4636 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4637 REG_WR(bp, BAR_CSTRORM_INTMEM +
4638 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4639 U64_HI(section));
ca00392c
EG
4640 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4641 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4642
4643 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4644 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4645 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4646
4647 /* CSTORM */
4648 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4649 c_def_status_block);
34f80b04 4650 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4651
4652 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4653 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4654 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4655 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4656 U64_HI(section));
5c862848 4657 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4658 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4659
4660 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4661 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4662 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4663
4664 /* TSTORM */
4665 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4666 t_def_status_block);
34f80b04 4667 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4668
4669 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4670 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4671 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4672 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4673 U64_HI(section));
5c862848 4674 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4675 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4676
4677 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4678 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4679 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4680
4681 /* XSTORM */
4682 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4683 x_def_status_block);
34f80b04 4684 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4685
4686 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4687 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4688 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4689 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4690 U64_HI(section));
5c862848 4691 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4692 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4693
4694 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4695 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4696 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4697
bb2a0f7a 4698 bp->stats_pending = 0;
66e855f3 4699 bp->set_mac_pending = 0;
bb2a0f7a 4700
34f80b04 4701 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4702}
4703
4704static void bnx2x_update_coalesce(struct bnx2x *bp)
4705{
34f80b04 4706 int port = BP_PORT(bp);
a2fbb9ea
ET
4707 int i;
4708
4709 for_each_queue(bp, i) {
34f80b04 4710 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4711
4712 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4713 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4714 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4715 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4716 bp->rx_ticks/12);
ca00392c
EG
4717 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4718 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4719 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4720 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4721
4722 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4723 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4724 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4725 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4726 bp->tx_ticks/12);
a2fbb9ea 4727 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4728 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4729 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4730 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4731 }
4732}
4733
7a9b2557
VZ
4734static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4735 struct bnx2x_fastpath *fp, int last)
4736{
4737 int i;
4738
4739 for (i = 0; i < last; i++) {
4740 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4741 struct sk_buff *skb = rx_buf->skb;
4742
4743 if (skb == NULL) {
4744 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4745 continue;
4746 }
4747
4748 if (fp->tpa_state[i] == BNX2X_TPA_START)
4749 pci_unmap_single(bp->pdev,
4750 pci_unmap_addr(rx_buf, mapping),
356e2385 4751 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4752
4753 dev_kfree_skb(skb);
4754 rx_buf->skb = NULL;
4755 }
4756}
4757
a2fbb9ea
ET
4758static void bnx2x_init_rx_rings(struct bnx2x *bp)
4759{
7a9b2557 4760 int func = BP_FUNC(bp);
32626230
EG
4761 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4762 ETH_MAX_AGGREGATION_QUEUES_E1H;
4763 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4764 int i, j;
a2fbb9ea 4765
87942b46 4766 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4767 DP(NETIF_MSG_IFUP,
4768 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4769
7a9b2557 4770 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4771
555f6c78 4772 for_each_rx_queue(bp, j) {
32626230 4773 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4774
32626230 4775 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4776 fp->tpa_pool[i].skb =
4777 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4778 if (!fp->tpa_pool[i].skb) {
4779 BNX2X_ERR("Failed to allocate TPA "
4780 "skb pool for queue[%d] - "
4781 "disabling TPA on this "
4782 "queue!\n", j);
4783 bnx2x_free_tpa_pool(bp, fp, i);
4784 fp->disable_tpa = 1;
4785 break;
4786 }
4787 pci_unmap_addr_set((struct sw_rx_bd *)
4788 &bp->fp->tpa_pool[i],
4789 mapping, 0);
4790 fp->tpa_state[i] = BNX2X_TPA_STOP;
4791 }
4792 }
4793 }
4794
555f6c78 4795 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4796 struct bnx2x_fastpath *fp = &bp->fp[j];
4797
4798 fp->rx_bd_cons = 0;
4799 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4800 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4801
ca00392c
EG
4802 /* Mark queue as Rx */
4803 fp->is_rx_queue = 1;
4804
7a9b2557
VZ
4805 /* "next page" elements initialization */
4806 /* SGE ring */
4807 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4808 struct eth_rx_sge *sge;
4809
4810 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4811 sge->addr_hi =
4812 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4813 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4814 sge->addr_lo =
4815 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4816 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4817 }
4818
4819 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4820
7a9b2557 4821 /* RX BD ring */
a2fbb9ea
ET
4822 for (i = 1; i <= NUM_RX_RINGS; i++) {
4823 struct eth_rx_bd *rx_bd;
4824
4825 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4826 rx_bd->addr_hi =
4827 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4828 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4829 rx_bd->addr_lo =
4830 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4831 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4832 }
4833
34f80b04 4834 /* CQ ring */
a2fbb9ea
ET
4835 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4836 struct eth_rx_cqe_next_page *nextpg;
4837
4838 nextpg = (struct eth_rx_cqe_next_page *)
4839 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4840 nextpg->addr_hi =
4841 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4842 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4843 nextpg->addr_lo =
4844 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4845 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4846 }
4847
7a9b2557
VZ
4848 /* Allocate SGEs and initialize the ring elements */
4849 for (i = 0, ring_prod = 0;
4850 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4851
7a9b2557
VZ
4852 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4853 BNX2X_ERR("was only able to allocate "
4854 "%d rx sges\n", i);
4855 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4856 /* Cleanup already allocated elements */
4857 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4858 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4859 fp->disable_tpa = 1;
4860 ring_prod = 0;
4861 break;
4862 }
4863 ring_prod = NEXT_SGE_IDX(ring_prod);
4864 }
4865 fp->rx_sge_prod = ring_prod;
4866
4867 /* Allocate BDs and initialize BD ring */
66e855f3 4868 fp->rx_comp_cons = 0;
7a9b2557 4869 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4870 for (i = 0; i < bp->rx_ring_size; i++) {
4871 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4872 BNX2X_ERR("was only able to allocate "
de832a55
EG
4873 "%d rx skbs on queue[%d]\n", i, j);
4874 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4875 break;
4876 }
4877 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4878 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4879 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4880 }
4881
7a9b2557
VZ
4882 fp->rx_bd_prod = ring_prod;
4883 /* must not have more available CQEs than BDs */
4884 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4885 cqe_ring_prod);
a2fbb9ea
ET
4886 fp->rx_pkt = fp->rx_calls = 0;
4887
7a9b2557
VZ
4888 /* Warning!
4889 * this will generate an interrupt (to the TSTORM)
4890 * must only be done after chip is initialized
4891 */
4892 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4893 fp->rx_sge_prod);
a2fbb9ea
ET
4894 if (j != 0)
4895 continue;
4896
4897 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4898 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4899 U64_LO(fp->rx_comp_mapping));
4900 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4901 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4902 U64_HI(fp->rx_comp_mapping));
4903 }
4904}
4905
4906static void bnx2x_init_tx_ring(struct bnx2x *bp)
4907{
4908 int i, j;
4909
555f6c78 4910 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4911 struct bnx2x_fastpath *fp = &bp->fp[j];
4912
4913 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
4914 struct eth_tx_next_bd *tx_next_bd =
4915 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 4916
ca00392c 4917 tx_next_bd->addr_hi =
a2fbb9ea 4918 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4919 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 4920 tx_next_bd->addr_lo =
a2fbb9ea 4921 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4922 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4923 }
4924
ca00392c
EG
4925 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
4926 fp->tx_db.data.zero_fill1 = 0;
4927 fp->tx_db.data.prod = 0;
4928
a2fbb9ea
ET
4929 fp->tx_pkt_prod = 0;
4930 fp->tx_pkt_cons = 0;
4931 fp->tx_bd_prod = 0;
4932 fp->tx_bd_cons = 0;
4933 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4934 fp->tx_pkt = 0;
4935 }
4936}
4937
4938static void bnx2x_init_sp_ring(struct bnx2x *bp)
4939{
34f80b04 4940 int func = BP_FUNC(bp);
a2fbb9ea
ET
4941
4942 spin_lock_init(&bp->spq_lock);
4943
4944 bp->spq_left = MAX_SPQ_PENDING;
4945 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4946 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4947 bp->spq_prod_bd = bp->spq;
4948 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4949
34f80b04 4950 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4951 U64_LO(bp->spq_mapping));
34f80b04
EG
4952 REG_WR(bp,
4953 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4954 U64_HI(bp->spq_mapping));
4955
34f80b04 4956 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4957 bp->spq_prod_idx);
4958}
4959
4960static void bnx2x_init_context(struct bnx2x *bp)
4961{
4962 int i;
4963
ca00392c 4964 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
4965 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4966 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4967 u8 cl_id = fp->cl_id;
a2fbb9ea 4968
34f80b04
EG
4969 context->ustorm_st_context.common.sb_index_numbers =
4970 BNX2X_RX_SB_INDEX_NUM;
0626b899 4971 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 4972 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 4973 context->ustorm_st_context.common.flags =
de832a55
EG
4974 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4975 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4976 context->ustorm_st_context.common.statistics_counter_id =
4977 cl_id;
8d9c5f34 4978 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4979 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4980 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4981 bp->rx_buf_size;
34f80b04 4982 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4983 U64_HI(fp->rx_desc_mapping);
34f80b04 4984 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4985 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4986 if (!fp->disable_tpa) {
4987 context->ustorm_st_context.common.flags |=
ca00392c 4988 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 4989 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4990 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4991 (u32)0xffff);
7a9b2557
VZ
4992 context->ustorm_st_context.common.sge_page_base_hi =
4993 U64_HI(fp->rx_sge_mapping);
4994 context->ustorm_st_context.common.sge_page_base_lo =
4995 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
4996
4997 context->ustorm_st_context.common.max_sges_for_packet =
4998 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
4999 context->ustorm_st_context.common.max_sges_for_packet =
5000 ((context->ustorm_st_context.common.
5001 max_sges_for_packet + PAGES_PER_SGE - 1) &
5002 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5003 }
5004
8d9c5f34
EG
5005 context->ustorm_ag_context.cdu_usage =
5006 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5007 CDU_REGION_NUMBER_UCM_AG,
5008 ETH_CONNECTION_TYPE);
5009
ca00392c
EG
5010 context->xstorm_ag_context.cdu_reserved =
5011 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5012 CDU_REGION_NUMBER_XCM_AG,
5013 ETH_CONNECTION_TYPE);
5014 }
5015
5016 for_each_tx_queue(bp, i) {
5017 struct bnx2x_fastpath *fp = &bp->fp[i];
5018 struct eth_context *context =
5019 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5020
5021 context->cstorm_st_context.sb_index_number =
5022 C_SB_ETH_TX_CQ_INDEX;
5023 context->cstorm_st_context.status_block_id = fp->sb_id;
5024
8d9c5f34
EG
5025 context->xstorm_st_context.tx_bd_page_base_hi =
5026 U64_HI(fp->tx_desc_mapping);
5027 context->xstorm_st_context.tx_bd_page_base_lo =
5028 U64_LO(fp->tx_desc_mapping);
ca00392c 5029 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5030 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5031 }
5032}
5033
5034static void bnx2x_init_ind_table(struct bnx2x *bp)
5035{
26c8fa4d 5036 int func = BP_FUNC(bp);
a2fbb9ea
ET
5037 int i;
5038
555f6c78 5039 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5040 return;
5041
555f6c78
EG
5042 DP(NETIF_MSG_IFUP,
5043 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5044 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5045 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5046 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5047 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5048}
5049
49d66772
ET
5050static void bnx2x_set_client_config(struct bnx2x *bp)
5051{
49d66772 5052 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5053 int port = BP_PORT(bp);
5054 int i;
49d66772 5055
e7799c5f 5056 tstorm_client.mtu = bp->dev->mtu;
49d66772 5057 tstorm_client.config_flags =
de832a55
EG
5058 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5059 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5060#ifdef BCM_VLAN
0c6671b0 5061 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5062 tstorm_client.config_flags |=
8d9c5f34 5063 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5064 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5065 }
5066#endif
49d66772
ET
5067
5068 for_each_queue(bp, i) {
de832a55
EG
5069 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5070
49d66772 5071 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5072 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5073 ((u32 *)&tstorm_client)[0]);
5074 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5075 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5076 ((u32 *)&tstorm_client)[1]);
5077 }
5078
34f80b04
EG
5079 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5080 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5081}
5082
a2fbb9ea
ET
5083static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5084{
a2fbb9ea 5085 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
5086 int mode = bp->rx_mode;
5087 int mask = (1 << BP_L_ID(bp));
5088 int func = BP_FUNC(bp);
581ce43d 5089 int port = BP_PORT(bp);
a2fbb9ea 5090 int i;
581ce43d
EG
5091 /* All but management unicast packets should pass to the host as well */
5092 u32 llh_mask =
5093 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5094 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5095 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5096 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5097
3196a88a 5098 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5099
5100 switch (mode) {
5101 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5102 tstorm_mac_filter.ucast_drop_all = mask;
5103 tstorm_mac_filter.mcast_drop_all = mask;
5104 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5105 break;
356e2385 5106
a2fbb9ea 5107 case BNX2X_RX_MODE_NORMAL:
34f80b04 5108 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5109 break;
356e2385 5110
a2fbb9ea 5111 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5112 tstorm_mac_filter.mcast_accept_all = mask;
5113 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5114 break;
356e2385 5115
a2fbb9ea 5116 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5117 tstorm_mac_filter.ucast_accept_all = mask;
5118 tstorm_mac_filter.mcast_accept_all = mask;
5119 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5120 /* pass management unicast packets as well */
5121 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5122 break;
356e2385 5123
a2fbb9ea 5124 default:
34f80b04
EG
5125 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5126 break;
a2fbb9ea
ET
5127 }
5128
581ce43d
EG
5129 REG_WR(bp,
5130 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5131 llh_mask);
5132
a2fbb9ea
ET
5133 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5134 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5135 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5136 ((u32 *)&tstorm_mac_filter)[i]);
5137
34f80b04 5138/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5139 ((u32 *)&tstorm_mac_filter)[i]); */
5140 }
a2fbb9ea 5141
49d66772
ET
5142 if (mode != BNX2X_RX_MODE_NONE)
5143 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5144}
5145
471de716
EG
5146static void bnx2x_init_internal_common(struct bnx2x *bp)
5147{
5148 int i;
5149
5150 /* Zero this manually as its initialization is
5151 currently missing in the initTool */
5152 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5153 REG_WR(bp, BAR_USTRORM_INTMEM +
5154 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5155}
5156
5157static void bnx2x_init_internal_port(struct bnx2x *bp)
5158{
5159 int port = BP_PORT(bp);
5160
ca00392c
EG
5161 REG_WR(bp,
5162 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5163 REG_WR(bp,
5164 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5165 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5166 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5167}
5168
5169static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5170{
a2fbb9ea
ET
5171 struct tstorm_eth_function_common_config tstorm_config = {0};
5172 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5173 int port = BP_PORT(bp);
5174 int func = BP_FUNC(bp);
de832a55
EG
5175 int i, j;
5176 u32 offset;
471de716 5177 u16 max_agg_size;
a2fbb9ea
ET
5178
5179 if (is_multi(bp)) {
555f6c78 5180 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5181 tstorm_config.rss_result_mask = MULTI_MASK;
5182 }
ca00392c
EG
5183
5184 /* Enable TPA if needed */
5185 if (bp->flags & TPA_ENABLE_FLAG)
5186 tstorm_config.config_flags |=
5187 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5188
8d9c5f34
EG
5189 if (IS_E1HMF(bp))
5190 tstorm_config.config_flags |=
5191 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5192
34f80b04
EG
5193 tstorm_config.leading_client_id = BP_L_ID(bp);
5194
a2fbb9ea 5195 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5196 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5197 (*(u32 *)&tstorm_config));
5198
c14423fe 5199 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5200 bnx2x_set_storm_rx_mode(bp);
5201
de832a55
EG
5202 for_each_queue(bp, i) {
5203 u8 cl_id = bp->fp[i].cl_id;
5204
5205 /* reset xstorm per client statistics */
5206 offset = BAR_XSTRORM_INTMEM +
5207 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5208 for (j = 0;
5209 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5210 REG_WR(bp, offset + j*4, 0);
5211
5212 /* reset tstorm per client statistics */
5213 offset = BAR_TSTRORM_INTMEM +
5214 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5215 for (j = 0;
5216 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5217 REG_WR(bp, offset + j*4, 0);
5218
5219 /* reset ustorm per client statistics */
5220 offset = BAR_USTRORM_INTMEM +
5221 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5222 for (j = 0;
5223 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5224 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5225 }
5226
5227 /* Init statistics related context */
34f80b04 5228 stats_flags.collect_eth = 1;
a2fbb9ea 5229
66e855f3 5230 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5231 ((u32 *)&stats_flags)[0]);
66e855f3 5232 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5233 ((u32 *)&stats_flags)[1]);
5234
66e855f3 5235 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5236 ((u32 *)&stats_flags)[0]);
66e855f3 5237 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5238 ((u32 *)&stats_flags)[1]);
5239
de832a55
EG
5240 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5241 ((u32 *)&stats_flags)[0]);
5242 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5243 ((u32 *)&stats_flags)[1]);
5244
66e855f3 5245 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5246 ((u32 *)&stats_flags)[0]);
66e855f3 5247 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5248 ((u32 *)&stats_flags)[1]);
5249
66e855f3
YG
5250 REG_WR(bp, BAR_XSTRORM_INTMEM +
5251 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5252 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5253 REG_WR(bp, BAR_XSTRORM_INTMEM +
5254 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5255 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5256
5257 REG_WR(bp, BAR_TSTRORM_INTMEM +
5258 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5259 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5260 REG_WR(bp, BAR_TSTRORM_INTMEM +
5261 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5262 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5263
de832a55
EG
5264 REG_WR(bp, BAR_USTRORM_INTMEM +
5265 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5266 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5267 REG_WR(bp, BAR_USTRORM_INTMEM +
5268 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5269 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5270
34f80b04
EG
5271 if (CHIP_IS_E1H(bp)) {
5272 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5273 IS_E1HMF(bp));
5274 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5275 IS_E1HMF(bp));
5276 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5277 IS_E1HMF(bp));
5278 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5279 IS_E1HMF(bp));
5280
7a9b2557
VZ
5281 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5282 bp->e1hov);
34f80b04
EG
5283 }
5284
4f40f2cb
EG
5285 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5286 max_agg_size =
5287 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5288 SGE_PAGE_SIZE * PAGES_PER_SGE),
5289 (u32)0xffff);
555f6c78 5290 for_each_rx_queue(bp, i) {
7a9b2557 5291 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5292
5293 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5294 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5295 U64_LO(fp->rx_comp_mapping));
5296 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5297 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5298 U64_HI(fp->rx_comp_mapping));
5299
ca00392c
EG
5300 /* Next page */
5301 REG_WR(bp, BAR_USTRORM_INTMEM +
5302 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5303 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5304 REG_WR(bp, BAR_USTRORM_INTMEM +
5305 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5306 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5307
7a9b2557 5308 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5309 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5310 max_agg_size);
5311 }
8a1c38d1 5312
1c06328c
EG
5313 /* dropless flow control */
5314 if (CHIP_IS_E1H(bp)) {
5315 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5316
5317 rx_pause.bd_thr_low = 250;
5318 rx_pause.cqe_thr_low = 250;
5319 rx_pause.cos = 1;
5320 rx_pause.sge_thr_low = 0;
5321 rx_pause.bd_thr_high = 350;
5322 rx_pause.cqe_thr_high = 350;
5323 rx_pause.sge_thr_high = 0;
5324
5325 for_each_rx_queue(bp, i) {
5326 struct bnx2x_fastpath *fp = &bp->fp[i];
5327
5328 if (!fp->disable_tpa) {
5329 rx_pause.sge_thr_low = 150;
5330 rx_pause.sge_thr_high = 250;
5331 }
5332
5333
5334 offset = BAR_USTRORM_INTMEM +
5335 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5336 fp->cl_id);
5337 for (j = 0;
5338 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5339 j++)
5340 REG_WR(bp, offset + j*4,
5341 ((u32 *)&rx_pause)[j]);
5342 }
5343 }
5344
8a1c38d1
EG
5345 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5346
5347 /* Init rate shaping and fairness contexts */
5348 if (IS_E1HMF(bp)) {
5349 int vn;
5350
5351 /* During init there is no active link
5352 Until link is up, set link rate to 10Gbps */
5353 bp->link_vars.line_speed = SPEED_10000;
5354 bnx2x_init_port_minmax(bp);
5355
5356 bnx2x_calc_vn_weight_sum(bp);
5357
5358 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5359 bnx2x_init_vn_minmax(bp, 2*vn + port);
5360
5361 /* Enable rate shaping and fairness */
5362 bp->cmng.flags.cmng_enables =
5363 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5364 if (bp->vn_weight_sum)
5365 bp->cmng.flags.cmng_enables |=
5366 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5367 else
5368 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5369 " fairness will be disabled\n");
5370 } else {
5371 /* rate shaping and fairness are disabled */
5372 DP(NETIF_MSG_IFUP,
5373 "single function mode minmax will be disabled\n");
5374 }
5375
5376
5377 /* Store it to internal memory */
5378 if (bp->port.pmf)
5379 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5380 REG_WR(bp, BAR_XSTRORM_INTMEM +
5381 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5382 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5383}
5384
471de716
EG
5385static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5386{
5387 switch (load_code) {
5388 case FW_MSG_CODE_DRV_LOAD_COMMON:
5389 bnx2x_init_internal_common(bp);
5390 /* no break */
5391
5392 case FW_MSG_CODE_DRV_LOAD_PORT:
5393 bnx2x_init_internal_port(bp);
5394 /* no break */
5395
5396 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5397 bnx2x_init_internal_func(bp);
5398 break;
5399
5400 default:
5401 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5402 break;
5403 }
5404}
5405
5406static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5407{
5408 int i;
5409
5410 for_each_queue(bp, i) {
5411 struct bnx2x_fastpath *fp = &bp->fp[i];
5412
34f80b04 5413 fp->bp = bp;
a2fbb9ea 5414 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5415 fp->index = i;
34f80b04
EG
5416 fp->cl_id = BP_L_ID(bp) + i;
5417 fp->sb_id = fp->cl_id;
ca00392c
EG
5418 /* Suitable Rx and Tx SBs are served by the same client */
5419 if (i >= bp->num_rx_queues)
5420 fp->cl_id -= bp->num_rx_queues;
34f80b04 5421 DP(NETIF_MSG_IFUP,
f5372251
EG
5422 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5423 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5424 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5425 fp->sb_id);
5c862848 5426 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5427 }
5428
16119785
EG
5429 /* ensure status block indices were read */
5430 rmb();
5431
5432
5c862848
EG
5433 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5434 DEF_SB_ID);
5435 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5436 bnx2x_update_coalesce(bp);
5437 bnx2x_init_rx_rings(bp);
5438 bnx2x_init_tx_ring(bp);
5439 bnx2x_init_sp_ring(bp);
5440 bnx2x_init_context(bp);
471de716 5441 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5442 bnx2x_init_ind_table(bp);
0ef00459
EG
5443 bnx2x_stats_init(bp);
5444
5445 /* At this point, we are ready for interrupts */
5446 atomic_set(&bp->intr_sem, 0);
5447
5448 /* flush all before enabling interrupts */
5449 mb();
5450 mmiowb();
5451
615f8fd9 5452 bnx2x_int_enable(bp);
eb8da205
EG
5453
5454 /* Check for SPIO5 */
5455 bnx2x_attn_int_deasserted0(bp,
5456 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5457 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5458}
5459
5460/* end of nic init */
5461
5462/*
5463 * gzip service functions
5464 */
5465
5466static int bnx2x_gunzip_init(struct bnx2x *bp)
5467{
5468 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5469 &bp->gunzip_mapping);
5470 if (bp->gunzip_buf == NULL)
5471 goto gunzip_nomem1;
5472
5473 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5474 if (bp->strm == NULL)
5475 goto gunzip_nomem2;
5476
5477 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5478 GFP_KERNEL);
5479 if (bp->strm->workspace == NULL)
5480 goto gunzip_nomem3;
5481
5482 return 0;
5483
5484gunzip_nomem3:
5485 kfree(bp->strm);
5486 bp->strm = NULL;
5487
5488gunzip_nomem2:
5489 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5490 bp->gunzip_mapping);
5491 bp->gunzip_buf = NULL;
5492
5493gunzip_nomem1:
5494 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5495 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5496 return -ENOMEM;
5497}
5498
5499static void bnx2x_gunzip_end(struct bnx2x *bp)
5500{
5501 kfree(bp->strm->workspace);
5502
5503 kfree(bp->strm);
5504 bp->strm = NULL;
5505
5506 if (bp->gunzip_buf) {
5507 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5508 bp->gunzip_mapping);
5509 bp->gunzip_buf = NULL;
5510 }
5511}
5512
94a78b79 5513static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5514{
5515 int n, rc;
5516
5517 /* check gzip header */
94a78b79
VZ
5518 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5519 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5520 return -EINVAL;
94a78b79 5521 }
a2fbb9ea
ET
5522
5523 n = 10;
5524
34f80b04 5525#define FNAME 0x8
a2fbb9ea
ET
5526
5527 if (zbuf[3] & FNAME)
5528 while ((zbuf[n++] != 0) && (n < len));
5529
94a78b79 5530 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5531 bp->strm->avail_in = len - n;
5532 bp->strm->next_out = bp->gunzip_buf;
5533 bp->strm->avail_out = FW_BUF_SIZE;
5534
5535 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5536 if (rc != Z_OK)
5537 return rc;
5538
5539 rc = zlib_inflate(bp->strm, Z_FINISH);
5540 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5541 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5542 bp->dev->name, bp->strm->msg);
5543
5544 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5545 if (bp->gunzip_outlen & 0x3)
5546 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5547 " gunzip_outlen (%d) not aligned\n",
5548 bp->dev->name, bp->gunzip_outlen);
5549 bp->gunzip_outlen >>= 2;
5550
5551 zlib_inflateEnd(bp->strm);
5552
5553 if (rc == Z_STREAM_END)
5554 return 0;
5555
5556 return rc;
5557}
5558
5559/* nic load/unload */
5560
5561/*
34f80b04 5562 * General service functions
a2fbb9ea
ET
5563 */
5564
5565/* send a NIG loopback debug packet */
5566static void bnx2x_lb_pckt(struct bnx2x *bp)
5567{
a2fbb9ea 5568 u32 wb_write[3];
a2fbb9ea
ET
5569
5570 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5571 wb_write[0] = 0x55555555;
5572 wb_write[1] = 0x55555555;
34f80b04 5573 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5574 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5575
5576 /* NON-IP protocol */
a2fbb9ea
ET
5577 wb_write[0] = 0x09000000;
5578 wb_write[1] = 0x55555555;
34f80b04 5579 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5580 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5581}
5582
5583/* some of the internal memories
5584 * are not directly readable from the driver
5585 * to test them we send debug packets
5586 */
5587static int bnx2x_int_mem_test(struct bnx2x *bp)
5588{
5589 int factor;
5590 int count, i;
5591 u32 val = 0;
5592
ad8d3948 5593 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5594 factor = 120;
ad8d3948
EG
5595 else if (CHIP_REV_IS_EMUL(bp))
5596 factor = 200;
5597 else
a2fbb9ea 5598 factor = 1;
a2fbb9ea
ET
5599
5600 DP(NETIF_MSG_HW, "start part1\n");
5601
5602 /* Disable inputs of parser neighbor blocks */
5603 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5604 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5605 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5606 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5607
5608 /* Write 0 to parser credits for CFC search request */
5609 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5610
5611 /* send Ethernet packet */
5612 bnx2x_lb_pckt(bp);
5613
5614 /* TODO do i reset NIG statistic? */
5615 /* Wait until NIG register shows 1 packet of size 0x10 */
5616 count = 1000 * factor;
5617 while (count) {
34f80b04 5618
a2fbb9ea
ET
5619 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5620 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5621 if (val == 0x10)
5622 break;
5623
5624 msleep(10);
5625 count--;
5626 }
5627 if (val != 0x10) {
5628 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5629 return -1;
5630 }
5631
5632 /* Wait until PRS register shows 1 packet */
5633 count = 1000 * factor;
5634 while (count) {
5635 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5636 if (val == 1)
5637 break;
5638
5639 msleep(10);
5640 count--;
5641 }
5642 if (val != 0x1) {
5643 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5644 return -2;
5645 }
5646
5647 /* Reset and init BRB, PRS */
34f80b04 5648 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5649 msleep(50);
34f80b04 5650 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5651 msleep(50);
94a78b79
VZ
5652 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5653 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5654
5655 DP(NETIF_MSG_HW, "part2\n");
5656
5657 /* Disable inputs of parser neighbor blocks */
5658 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5659 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5660 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5661 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5662
5663 /* Write 0 to parser credits for CFC search request */
5664 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5665
5666 /* send 10 Ethernet packets */
5667 for (i = 0; i < 10; i++)
5668 bnx2x_lb_pckt(bp);
5669
5670 /* Wait until NIG register shows 10 + 1
5671 packets of size 11*0x10 = 0xb0 */
5672 count = 1000 * factor;
5673 while (count) {
34f80b04 5674
a2fbb9ea
ET
5675 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5676 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5677 if (val == 0xb0)
5678 break;
5679
5680 msleep(10);
5681 count--;
5682 }
5683 if (val != 0xb0) {
5684 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5685 return -3;
5686 }
5687
5688 /* Wait until PRS register shows 2 packets */
5689 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5690 if (val != 2)
5691 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5692
5693 /* Write 1 to parser credits for CFC search request */
5694 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5695
5696 /* Wait until PRS register shows 3 packets */
5697 msleep(10 * factor);
5698 /* Wait until NIG register shows 1 packet of size 0x10 */
5699 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5700 if (val != 3)
5701 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5702
5703 /* clear NIG EOP FIFO */
5704 for (i = 0; i < 11; i++)
5705 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5706 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5707 if (val != 1) {
5708 BNX2X_ERR("clear of NIG failed\n");
5709 return -4;
5710 }
5711
5712 /* Reset and init BRB, PRS, NIG */
5713 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5714 msleep(50);
5715 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5716 msleep(50);
94a78b79
VZ
5717 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5718 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5719#ifndef BCM_ISCSI
5720 /* set NIC mode */
5721 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5722#endif
5723
5724 /* Enable inputs of parser neighbor blocks */
5725 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5726 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5727 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5728 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5729
5730 DP(NETIF_MSG_HW, "done\n");
5731
5732 return 0; /* OK */
5733}
5734
5735static void enable_blocks_attention(struct bnx2x *bp)
5736{
5737 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5738 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5739 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5740 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5741 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5742 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5743 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5744 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5745 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5746/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5747/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5748 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5749 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5750 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5751/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5752/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5753 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5754 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5755 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5756 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5757/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5758/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5759 if (CHIP_REV_IS_FPGA(bp))
5760 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5761 else
5762 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5763 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5764 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5765 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5766/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5767/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5768 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5769 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5770/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5771 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5772}
5773
34f80b04 5774
81f75bbf
EG
5775static void bnx2x_reset_common(struct bnx2x *bp)
5776{
5777 /* reset_common */
5778 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5779 0xd3ffff7f);
5780 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5781}
5782
fd4ef40d
EG
5783
5784static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5785{
5786 u32 val;
5787 u8 port;
5788 u8 is_required = 0;
5789
5790 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5791 SHARED_HW_CFG_FAN_FAILURE_MASK;
5792
5793 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5794 is_required = 1;
5795
5796 /*
5797 * The fan failure mechanism is usually related to the PHY type since
5798 * the power consumption of the board is affected by the PHY. Currently,
5799 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5800 */
5801 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5802 for (port = PORT_0; port < PORT_MAX; port++) {
5803 u32 phy_type =
5804 SHMEM_RD(bp, dev_info.port_hw_config[port].
5805 external_phy_config) &
5806 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5807 is_required |=
5808 ((phy_type ==
5809 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5810 (phy_type ==
5811 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5812 (phy_type ==
5813 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5814 }
5815
5816 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5817
5818 if (is_required == 0)
5819 return;
5820
5821 /* Fan failure is indicated by SPIO 5 */
5822 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5823 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5824
5825 /* set to active low mode */
5826 val = REG_RD(bp, MISC_REG_SPIO_INT);
5827 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5828 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5829 REG_WR(bp, MISC_REG_SPIO_INT, val);
5830
5831 /* enable interrupt to signal the IGU */
5832 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5833 val |= (1 << MISC_REGISTERS_SPIO_5);
5834 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5835}
5836
34f80b04 5837static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5838{
a2fbb9ea 5839 u32 val, i;
a2fbb9ea 5840
34f80b04 5841 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5842
81f75bbf 5843 bnx2x_reset_common(bp);
34f80b04
EG
5844 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5845 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5846
94a78b79 5847 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5848 if (CHIP_IS_E1H(bp))
5849 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5850
34f80b04
EG
5851 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5852 msleep(30);
5853 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5854
94a78b79 5855 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5856 if (CHIP_IS_E1(bp)) {
5857 /* enable HW interrupt from PXP on USDM overflow
5858 bit 16 on INT_MASK_0 */
5859 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5860 }
a2fbb9ea 5861
94a78b79 5862 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5863 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5864
5865#ifdef __BIG_ENDIAN
34f80b04
EG
5866 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5867 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5868 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5869 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5870 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5871 /* make sure this value is 0 */
5872 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5873
5874/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5875 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5876 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5877 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5878 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5879#endif
5880
34f80b04 5881 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5882#ifdef BCM_ISCSI
34f80b04
EG
5883 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5884 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5885 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5886#endif
5887
34f80b04
EG
5888 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5889 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5890
34f80b04
EG
5891 /* let the HW do it's magic ... */
5892 msleep(100);
5893 /* finish PXP init */
5894 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5895 if (val != 1) {
5896 BNX2X_ERR("PXP2 CFG failed\n");
5897 return -EBUSY;
5898 }
5899 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5900 if (val != 1) {
5901 BNX2X_ERR("PXP2 RD_INIT failed\n");
5902 return -EBUSY;
5903 }
a2fbb9ea 5904
34f80b04
EG
5905 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5906 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5907
94a78b79 5908 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5909
34f80b04
EG
5910 /* clean the DMAE memory */
5911 bp->dmae_ready = 1;
5912 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5913
94a78b79
VZ
5914 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5915 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5916 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5917 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5918
34f80b04
EG
5919 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5920 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5921 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5922 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5923
94a78b79 5924 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
5925 /* soft reset pulse */
5926 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5927 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5928
5929#ifdef BCM_ISCSI
94a78b79 5930 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5931#endif
a2fbb9ea 5932
94a78b79 5933 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5934 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5935 if (!CHIP_REV_IS_SLOW(bp)) {
5936 /* enable hw interrupt from doorbell Q */
5937 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5938 }
a2fbb9ea 5939
94a78b79
VZ
5940 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5941 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5942 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5943 /* set NIC mode */
5944 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5945 if (CHIP_IS_E1H(bp))
5946 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5947
94a78b79
VZ
5948 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5949 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5950 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5951 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5952
ca00392c
EG
5953 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5954 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5955 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5956 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5957
94a78b79
VZ
5958 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5959 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5960 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5961 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5962
34f80b04
EG
5963 /* sync semi rtc */
5964 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5965 0x80000000);
5966 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5967 0x80000000);
a2fbb9ea 5968
94a78b79
VZ
5969 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5970 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5971 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5972
34f80b04
EG
5973 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5974 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5975 REG_WR(bp, i, 0xc0cac01a);
5976 /* TODO: replace with something meaningful */
5977 }
94a78b79 5978 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 5979 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5980
34f80b04
EG
5981 if (sizeof(union cdu_context) != 1024)
5982 /* we currently assume that a context is 1024 bytes */
5983 printk(KERN_ALERT PFX "please adjust the size of"
5984 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5985
94a78b79 5986 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5987 val = (4 << 24) + (0 << 12) + 1024;
5988 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5989
94a78b79 5990 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5991 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5992 /* enable context validation interrupt from CFC */
5993 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5994
5995 /* set the thresholds to prevent CFC/CDU race */
5996 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5997
94a78b79
VZ
5998 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5999 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6000
94a78b79 6001 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6002 /* Reset PCIE errors for debug */
6003 REG_WR(bp, 0x2814, 0xffffffff);
6004 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6005
94a78b79 6006 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6007 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6008 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6009 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6010
94a78b79 6011 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6012 if (CHIP_IS_E1H(bp)) {
6013 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6014 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6015 }
6016
6017 if (CHIP_REV_IS_SLOW(bp))
6018 msleep(200);
6019
6020 /* finish CFC init */
6021 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6022 if (val != 1) {
6023 BNX2X_ERR("CFC LL_INIT failed\n");
6024 return -EBUSY;
6025 }
6026 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6027 if (val != 1) {
6028 BNX2X_ERR("CFC AC_INIT failed\n");
6029 return -EBUSY;
6030 }
6031 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6032 if (val != 1) {
6033 BNX2X_ERR("CFC CAM_INIT failed\n");
6034 return -EBUSY;
6035 }
6036 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6037
34f80b04
EG
6038 /* read NIG statistic
6039 to see if this is our first up since powerup */
6040 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6041 val = *bnx2x_sp(bp, wb_data[0]);
6042
6043 /* do internal memory self test */
6044 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6045 BNX2X_ERR("internal mem self test failed\n");
6046 return -EBUSY;
6047 }
6048
35b19ba5 6049 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6050 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6051 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6052 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6053 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6054 bp->port.need_hw_lock = 1;
6055 break;
6056
34f80b04
EG
6057 default:
6058 break;
6059 }
f1410647 6060
fd4ef40d
EG
6061 bnx2x_setup_fan_failure_detection(bp);
6062
34f80b04
EG
6063 /* clear PXP2 attentions */
6064 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6065
34f80b04 6066 enable_blocks_attention(bp);
a2fbb9ea 6067
6bbca910
YR
6068 if (!BP_NOMCP(bp)) {
6069 bnx2x_acquire_phy_lock(bp);
6070 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6071 bnx2x_release_phy_lock(bp);
6072 } else
6073 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6074
34f80b04
EG
6075 return 0;
6076}
a2fbb9ea 6077
34f80b04
EG
6078static int bnx2x_init_port(struct bnx2x *bp)
6079{
6080 int port = BP_PORT(bp);
94a78b79 6081 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6082 u32 low, high;
34f80b04 6083 u32 val;
a2fbb9ea 6084
34f80b04
EG
6085 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6086
6087 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6088
94a78b79 6089 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6090 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6091
6092 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6093 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6094 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
6095#ifdef BCM_ISCSI
6096 /* Port0 1
6097 * Port1 385 */
6098 i++;
6099 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6100 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6101 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6102 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6103
6104 /* Port0 2
6105 * Port1 386 */
6106 i++;
6107 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6108 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6109 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6110 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6111
6112 /* Port0 3
6113 * Port1 387 */
6114 i++;
6115 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6116 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6117 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6118 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6119#endif
94a78b79 6120 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6121
a2fbb9ea
ET
6122#ifdef BCM_ISCSI
6123 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6124 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6125
94a78b79 6126 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 6127#endif
94a78b79 6128 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6129
94a78b79 6130 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6131 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6132 /* no pause for emulation and FPGA */
6133 low = 0;
6134 high = 513;
6135 } else {
6136 if (IS_E1HMF(bp))
6137 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6138 else if (bp->dev->mtu > 4096) {
6139 if (bp->flags & ONE_PORT_FLAG)
6140 low = 160;
6141 else {
6142 val = bp->dev->mtu;
6143 /* (24*1024 + val*4)/256 */
6144 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6145 }
6146 } else
6147 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6148 high = low + 56; /* 14*1024/256 */
6149 }
6150 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6151 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6152
6153
94a78b79 6154 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6155
94a78b79 6156 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6157 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6158 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6159 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6160
94a78b79
VZ
6161 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6162 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6163 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6164 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6165
94a78b79 6166 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6167 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6168
94a78b79 6169 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6170
6171 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6172 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6173
6174 /* update threshold */
34f80b04 6175 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6176 /* update init credit */
34f80b04 6177 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6178
6179 /* probe changes */
34f80b04 6180 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6181 msleep(5);
34f80b04 6182 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6183
6184#ifdef BCM_ISCSI
6185 /* tell the searcher where the T2 table is */
6186 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6187
6188 wb_write[0] = U64_LO(bp->t2_mapping);
6189 wb_write[1] = U64_HI(bp->t2_mapping);
6190 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6191 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6192 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6193 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6194
6195 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6196#endif
94a78b79 6197 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6198 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6199
6200 if (CHIP_IS_E1(bp)) {
6201 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6202 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6203 }
94a78b79 6204 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6205
94a78b79 6206 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6207 /* init aeu_mask_attn_func_0/1:
6208 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6209 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6210 * bits 4-7 are used for "per vn group attention" */
6211 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6212 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6213
94a78b79 6214 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6215 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6216 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6217 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6218 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6219
94a78b79 6220 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6221
6222 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6223
6224 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6225 /* 0x2 disable e1hov, 0x1 enable */
6226 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6227 (IS_E1HMF(bp) ? 0x1 : 0x2));
6228
1c06328c
EG
6229 /* support pause requests from USDM, TSDM and BRB */
6230 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6231
6232 {
6233 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6234 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6235 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6236 }
34f80b04
EG
6237 }
6238
94a78b79 6239 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6240 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6241
35b19ba5 6242 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6244 {
6245 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6246
6247 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6248 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6249
6250 /* The GPIO should be swapped if the swap register is
6251 set and active */
6252 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6253 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6254
6255 /* Select function upon port-swap configuration */
6256 if (port == 0) {
6257 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6258 aeu_gpio_mask = (swap_val && swap_override) ?
6259 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6260 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6261 } else {
6262 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6263 aeu_gpio_mask = (swap_val && swap_override) ?
6264 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6265 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6266 }
6267 val = REG_RD(bp, offset);
6268 /* add GPIO3 to group */
6269 val |= aeu_gpio_mask;
6270 REG_WR(bp, offset, val);
6271 }
6272 break;
6273
35b19ba5 6274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6276 /* add SPIO 5 to group 0 */
4d295db0
EG
6277 {
6278 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6279 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6280 val = REG_RD(bp, reg_addr);
f1410647 6281 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6282 REG_WR(bp, reg_addr, val);
6283 }
f1410647
ET
6284 break;
6285
6286 default:
6287 break;
6288 }
6289
c18487ee 6290 bnx2x__link_reset(bp);
a2fbb9ea 6291
34f80b04
EG
6292 return 0;
6293}
6294
6295#define ILT_PER_FUNC (768/2)
6296#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6297/* the phys address is shifted right 12 bits and has an added
6298 1=valid bit added to the 53rd bit
6299 then since this is a wide register(TM)
6300 we split it into two 32 bit writes
6301 */
6302#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6303#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6304#define PXP_ONE_ILT(x) (((x) << 10) | x)
6305#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6306
6307#define CNIC_ILT_LINES 0
6308
6309static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6310{
6311 int reg;
6312
6313 if (CHIP_IS_E1H(bp))
6314 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6315 else /* E1 */
6316 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6317
6318 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6319}
6320
6321static int bnx2x_init_func(struct bnx2x *bp)
6322{
6323 int port = BP_PORT(bp);
6324 int func = BP_FUNC(bp);
8badd27a 6325 u32 addr, val;
34f80b04
EG
6326 int i;
6327
6328 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6329
8badd27a
EG
6330 /* set MSI reconfigure capability */
6331 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6332 val = REG_RD(bp, addr);
6333 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6334 REG_WR(bp, addr, val);
6335
34f80b04
EG
6336 i = FUNC_ILT_BASE(func);
6337
6338 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6339 if (CHIP_IS_E1H(bp)) {
6340 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6341 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6342 } else /* E1 */
6343 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6344 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6345
6346
6347 if (CHIP_IS_E1H(bp)) {
6348 for (i = 0; i < 9; i++)
6349 bnx2x_init_block(bp,
94a78b79 6350 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6351
6352 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6353 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6354 }
6355
6356 /* HC init per function */
6357 if (CHIP_IS_E1H(bp)) {
6358 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6359
6360 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6361 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6362 }
94a78b79 6363 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6364
c14423fe 6365 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6366 REG_WR(bp, 0x2114, 0xffffffff);
6367 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6368
34f80b04
EG
6369 return 0;
6370}
6371
6372static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6373{
6374 int i, rc = 0;
a2fbb9ea 6375
34f80b04
EG
6376 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6377 BP_FUNC(bp), load_code);
a2fbb9ea 6378
34f80b04
EG
6379 bp->dmae_ready = 0;
6380 mutex_init(&bp->dmae_mutex);
6381 bnx2x_gunzip_init(bp);
a2fbb9ea 6382
34f80b04
EG
6383 switch (load_code) {
6384 case FW_MSG_CODE_DRV_LOAD_COMMON:
6385 rc = bnx2x_init_common(bp);
6386 if (rc)
6387 goto init_hw_err;
6388 /* no break */
6389
6390 case FW_MSG_CODE_DRV_LOAD_PORT:
6391 bp->dmae_ready = 1;
6392 rc = bnx2x_init_port(bp);
6393 if (rc)
6394 goto init_hw_err;
6395 /* no break */
6396
6397 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6398 bp->dmae_ready = 1;
6399 rc = bnx2x_init_func(bp);
6400 if (rc)
6401 goto init_hw_err;
6402 break;
6403
6404 default:
6405 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6406 break;
6407 }
6408
6409 if (!BP_NOMCP(bp)) {
6410 int func = BP_FUNC(bp);
a2fbb9ea
ET
6411
6412 bp->fw_drv_pulse_wr_seq =
34f80b04 6413 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6414 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6415 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6416 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6417 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6418 } else
6419 bp->func_stx = 0;
a2fbb9ea 6420
34f80b04
EG
6421 /* this needs to be done before gunzip end */
6422 bnx2x_zero_def_sb(bp);
6423 for_each_queue(bp, i)
6424 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6425
6426init_hw_err:
6427 bnx2x_gunzip_end(bp);
6428
6429 return rc;
a2fbb9ea
ET
6430}
6431
a2fbb9ea
ET
6432static void bnx2x_free_mem(struct bnx2x *bp)
6433{
6434
6435#define BNX2X_PCI_FREE(x, y, size) \
6436 do { \
6437 if (x) { \
6438 pci_free_consistent(bp->pdev, size, x, y); \
6439 x = NULL; \
6440 y = 0; \
6441 } \
6442 } while (0)
6443
6444#define BNX2X_FREE(x) \
6445 do { \
6446 if (x) { \
6447 vfree(x); \
6448 x = NULL; \
6449 } \
6450 } while (0)
6451
6452 int i;
6453
6454 /* fastpath */
555f6c78 6455 /* Common */
a2fbb9ea
ET
6456 for_each_queue(bp, i) {
6457
555f6c78 6458 /* status blocks */
a2fbb9ea
ET
6459 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6460 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6461 sizeof(struct host_status_block));
555f6c78
EG
6462 }
6463 /* Rx */
6464 for_each_rx_queue(bp, i) {
a2fbb9ea 6465
555f6c78 6466 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6467 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6468 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6469 bnx2x_fp(bp, i, rx_desc_mapping),
6470 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6471
6472 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6473 bnx2x_fp(bp, i, rx_comp_mapping),
6474 sizeof(struct eth_fast_path_rx_cqe) *
6475 NUM_RCQ_BD);
a2fbb9ea 6476
7a9b2557 6477 /* SGE ring */
32626230 6478 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6479 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6480 bnx2x_fp(bp, i, rx_sge_mapping),
6481 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6482 }
555f6c78
EG
6483 /* Tx */
6484 for_each_tx_queue(bp, i) {
6485
6486 /* fastpath tx rings: tx_buf tx_desc */
6487 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6488 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6489 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6490 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6491 }
a2fbb9ea
ET
6492 /* end of fastpath */
6493
6494 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6495 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6496
6497 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6498 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6499
6500#ifdef BCM_ISCSI
6501 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6502 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6503 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6504 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6505#endif
7a9b2557 6506 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6507
6508#undef BNX2X_PCI_FREE
6509#undef BNX2X_KFREE
6510}
6511
6512static int bnx2x_alloc_mem(struct bnx2x *bp)
6513{
6514
6515#define BNX2X_PCI_ALLOC(x, y, size) \
6516 do { \
6517 x = pci_alloc_consistent(bp->pdev, size, y); \
6518 if (x == NULL) \
6519 goto alloc_mem_err; \
6520 memset(x, 0, size); \
6521 } while (0)
6522
6523#define BNX2X_ALLOC(x, size) \
6524 do { \
6525 x = vmalloc(size); \
6526 if (x == NULL) \
6527 goto alloc_mem_err; \
6528 memset(x, 0, size); \
6529 } while (0)
6530
6531 int i;
6532
6533 /* fastpath */
555f6c78 6534 /* Common */
a2fbb9ea
ET
6535 for_each_queue(bp, i) {
6536 bnx2x_fp(bp, i, bp) = bp;
6537
555f6c78 6538 /* status blocks */
a2fbb9ea
ET
6539 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6540 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6541 sizeof(struct host_status_block));
555f6c78
EG
6542 }
6543 /* Rx */
6544 for_each_rx_queue(bp, i) {
a2fbb9ea 6545
555f6c78 6546 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6547 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6548 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6549 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6550 &bnx2x_fp(bp, i, rx_desc_mapping),
6551 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6552
6553 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6554 &bnx2x_fp(bp, i, rx_comp_mapping),
6555 sizeof(struct eth_fast_path_rx_cqe) *
6556 NUM_RCQ_BD);
6557
7a9b2557
VZ
6558 /* SGE ring */
6559 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6560 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6561 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6562 &bnx2x_fp(bp, i, rx_sge_mapping),
6563 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6564 }
555f6c78
EG
6565 /* Tx */
6566 for_each_tx_queue(bp, i) {
6567
555f6c78
EG
6568 /* fastpath tx rings: tx_buf tx_desc */
6569 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6570 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6571 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6572 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6573 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6574 }
a2fbb9ea
ET
6575 /* end of fastpath */
6576
6577 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6578 sizeof(struct host_def_status_block));
6579
6580 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6581 sizeof(struct bnx2x_slowpath));
6582
6583#ifdef BCM_ISCSI
6584 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6585
6586 /* Initialize T1 */
6587 for (i = 0; i < 64*1024; i += 64) {
6588 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6589 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6590 }
6591
6592 /* allocate searcher T2 table
6593 we allocate 1/4 of alloc num for T2
6594 (which is not entered into the ILT) */
6595 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6596
6597 /* Initialize T2 */
6598 for (i = 0; i < 16*1024; i += 64)
6599 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6600
c14423fe 6601 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6602 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6603
6604 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6605 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6606
6607 /* QM queues (128*MAX_CONN) */
6608 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6609#endif
6610
6611 /* Slow path ring */
6612 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6613
6614 return 0;
6615
6616alloc_mem_err:
6617 bnx2x_free_mem(bp);
6618 return -ENOMEM;
6619
6620#undef BNX2X_PCI_ALLOC
6621#undef BNX2X_ALLOC
6622}
6623
6624static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6625{
6626 int i;
6627
555f6c78 6628 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6629 struct bnx2x_fastpath *fp = &bp->fp[i];
6630
6631 u16 bd_cons = fp->tx_bd_cons;
6632 u16 sw_prod = fp->tx_pkt_prod;
6633 u16 sw_cons = fp->tx_pkt_cons;
6634
a2fbb9ea
ET
6635 while (sw_cons != sw_prod) {
6636 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6637 sw_cons++;
6638 }
6639 }
6640}
6641
6642static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6643{
6644 int i, j;
6645
555f6c78 6646 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6647 struct bnx2x_fastpath *fp = &bp->fp[j];
6648
a2fbb9ea
ET
6649 for (i = 0; i < NUM_RX_BD; i++) {
6650 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6651 struct sk_buff *skb = rx_buf->skb;
6652
6653 if (skb == NULL)
6654 continue;
6655
6656 pci_unmap_single(bp->pdev,
6657 pci_unmap_addr(rx_buf, mapping),
356e2385 6658 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6659
6660 rx_buf->skb = NULL;
6661 dev_kfree_skb(skb);
6662 }
7a9b2557 6663 if (!fp->disable_tpa)
32626230
EG
6664 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6665 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6666 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6667 }
6668}
6669
6670static void bnx2x_free_skbs(struct bnx2x *bp)
6671{
6672 bnx2x_free_tx_skbs(bp);
6673 bnx2x_free_rx_skbs(bp);
6674}
6675
6676static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6677{
34f80b04 6678 int i, offset = 1;
a2fbb9ea
ET
6679
6680 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6681 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6682 bp->msix_table[0].vector);
6683
6684 for_each_queue(bp, i) {
c14423fe 6685 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6686 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6687 bnx2x_fp(bp, i, state));
6688
34f80b04 6689 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6690 }
a2fbb9ea
ET
6691}
6692
6693static void bnx2x_free_irq(struct bnx2x *bp)
6694{
a2fbb9ea 6695 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6696 bnx2x_free_msix_irqs(bp);
6697 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6698 bp->flags &= ~USING_MSIX_FLAG;
6699
8badd27a
EG
6700 } else if (bp->flags & USING_MSI_FLAG) {
6701 free_irq(bp->pdev->irq, bp->dev);
6702 pci_disable_msi(bp->pdev);
6703 bp->flags &= ~USING_MSI_FLAG;
6704
a2fbb9ea
ET
6705 } else
6706 free_irq(bp->pdev->irq, bp->dev);
6707}
6708
6709static int bnx2x_enable_msix(struct bnx2x *bp)
6710{
8badd27a
EG
6711 int i, rc, offset = 1;
6712 int igu_vec = 0;
a2fbb9ea 6713
8badd27a
EG
6714 bp->msix_table[0].entry = igu_vec;
6715 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6716
34f80b04 6717 for_each_queue(bp, i) {
8badd27a 6718 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6719 bp->msix_table[i + offset].entry = igu_vec;
6720 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6721 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6722 }
6723
34f80b04 6724 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6725 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6726 if (rc) {
8badd27a
EG
6727 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6728 return rc;
34f80b04 6729 }
8badd27a 6730
a2fbb9ea
ET
6731 bp->flags |= USING_MSIX_FLAG;
6732
6733 return 0;
a2fbb9ea
ET
6734}
6735
a2fbb9ea
ET
6736static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6737{
34f80b04 6738 int i, rc, offset = 1;
a2fbb9ea 6739
a2fbb9ea
ET
6740 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6741 bp->dev->name, bp->dev);
a2fbb9ea
ET
6742 if (rc) {
6743 BNX2X_ERR("request sp irq failed\n");
6744 return -EBUSY;
6745 }
6746
6747 for_each_queue(bp, i) {
555f6c78
EG
6748 struct bnx2x_fastpath *fp = &bp->fp[i];
6749
ca00392c
EG
6750 if (i < bp->num_rx_queues)
6751 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6752 else
6753 sprintf(fp->name, "%s-tx-%d",
6754 bp->dev->name, i - bp->num_rx_queues);
6755
34f80b04 6756 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6757 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6758 if (rc) {
555f6c78 6759 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6760 bnx2x_free_msix_irqs(bp);
6761 return -EBUSY;
6762 }
6763
555f6c78 6764 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6765 }
6766
555f6c78 6767 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6768 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6769 " ... fp[%d] %d\n",
6770 bp->dev->name, bp->msix_table[0].vector,
6771 0, bp->msix_table[offset].vector,
6772 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6773
a2fbb9ea 6774 return 0;
a2fbb9ea
ET
6775}
6776
8badd27a
EG
6777static int bnx2x_enable_msi(struct bnx2x *bp)
6778{
6779 int rc;
6780
6781 rc = pci_enable_msi(bp->pdev);
6782 if (rc) {
6783 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6784 return -1;
6785 }
6786 bp->flags |= USING_MSI_FLAG;
6787
6788 return 0;
6789}
6790
a2fbb9ea
ET
6791static int bnx2x_req_irq(struct bnx2x *bp)
6792{
8badd27a 6793 unsigned long flags;
34f80b04 6794 int rc;
a2fbb9ea 6795
8badd27a
EG
6796 if (bp->flags & USING_MSI_FLAG)
6797 flags = 0;
6798 else
6799 flags = IRQF_SHARED;
6800
6801 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6802 bp->dev->name, bp->dev);
a2fbb9ea
ET
6803 if (!rc)
6804 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6805
6806 return rc;
a2fbb9ea
ET
6807}
6808
65abd74d
YG
6809static void bnx2x_napi_enable(struct bnx2x *bp)
6810{
6811 int i;
6812
555f6c78 6813 for_each_rx_queue(bp, i)
65abd74d
YG
6814 napi_enable(&bnx2x_fp(bp, i, napi));
6815}
6816
6817static void bnx2x_napi_disable(struct bnx2x *bp)
6818{
6819 int i;
6820
555f6c78 6821 for_each_rx_queue(bp, i)
65abd74d
YG
6822 napi_disable(&bnx2x_fp(bp, i, napi));
6823}
6824
6825static void bnx2x_netif_start(struct bnx2x *bp)
6826{
e1510706
EG
6827 int intr_sem;
6828
6829 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6830 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6831
6832 if (intr_sem) {
65abd74d 6833 if (netif_running(bp->dev)) {
65abd74d
YG
6834 bnx2x_napi_enable(bp);
6835 bnx2x_int_enable(bp);
555f6c78
EG
6836 if (bp->state == BNX2X_STATE_OPEN)
6837 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6838 }
6839 }
6840}
6841
f8ef6e44 6842static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6843{
f8ef6e44 6844 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6845 bnx2x_napi_disable(bp);
762d5f6c
EG
6846 netif_tx_disable(bp->dev);
6847 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6848}
6849
a2fbb9ea
ET
6850/*
6851 * Init service functions
6852 */
6853
3101c2bc 6854static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6855{
6856 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6857 int port = BP_PORT(bp);
a2fbb9ea
ET
6858
6859 /* CAM allocation
6860 * unicasts 0-31:port0 32-63:port1
6861 * multicast 64-127:port0 128-191:port1
6862 */
8d9c5f34 6863 config->hdr.length = 2;
af246401 6864 config->hdr.offset = port ? 32 : 0;
0626b899 6865 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6866 config->hdr.reserved1 = 0;
6867
6868 /* primary MAC */
6869 config->config_table[0].cam_entry.msb_mac_addr =
6870 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6871 config->config_table[0].cam_entry.middle_mac_addr =
6872 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6873 config->config_table[0].cam_entry.lsb_mac_addr =
6874 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6875 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6876 if (set)
6877 config->config_table[0].target_table_entry.flags = 0;
6878 else
6879 CAM_INVALIDATE(config->config_table[0]);
ca00392c
EG
6880 config->config_table[0].target_table_entry.clients_bit_vector =
6881 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
6882 config->config_table[0].target_table_entry.vlan_id = 0;
6883
3101c2bc
YG
6884 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6885 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6886 config->config_table[0].cam_entry.msb_mac_addr,
6887 config->config_table[0].cam_entry.middle_mac_addr,
6888 config->config_table[0].cam_entry.lsb_mac_addr);
6889
6890 /* broadcast */
4781bfad
EG
6891 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6892 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6893 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6894 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6895 if (set)
6896 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6897 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6898 else
6899 CAM_INVALIDATE(config->config_table[1]);
ca00392c
EG
6900 config->config_table[1].target_table_entry.clients_bit_vector =
6901 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
6902 config->config_table[1].target_table_entry.vlan_id = 0;
6903
6904 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6905 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6906 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6907}
6908
3101c2bc 6909static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6910{
6911 struct mac_configuration_cmd_e1h *config =
6912 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6913
34f80b04
EG
6914 /* CAM allocation for E1H
6915 * unicasts: by func number
6916 * multicast: 20+FUNC*20, 20 each
6917 */
8d9c5f34 6918 config->hdr.length = 1;
34f80b04 6919 config->hdr.offset = BP_FUNC(bp);
0626b899 6920 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6921 config->hdr.reserved1 = 0;
6922
6923 /* primary MAC */
6924 config->config_table[0].msb_mac_addr =
6925 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6926 config->config_table[0].middle_mac_addr =
6927 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6928 config->config_table[0].lsb_mac_addr =
6929 swab16(*(u16 *)&bp->dev->dev_addr[4]);
ca00392c
EG
6930 config->config_table[0].clients_bit_vector =
6931 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
6932 config->config_table[0].vlan_id = 0;
6933 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6934 if (set)
6935 config->config_table[0].flags = BP_PORT(bp);
6936 else
6937 config->config_table[0].flags =
6938 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6939
3101c2bc
YG
6940 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6941 (set ? "setting" : "clearing"),
34f80b04
EG
6942 config->config_table[0].msb_mac_addr,
6943 config->config_table[0].middle_mac_addr,
6944 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6945
6946 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6947 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6948 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6949}
6950
a2fbb9ea
ET
6951static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6952 int *state_p, int poll)
6953{
6954 /* can take a while if any port is running */
8b3a0f0b 6955 int cnt = 5000;
a2fbb9ea 6956
c14423fe
ET
6957 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6958 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6959
6960 might_sleep();
34f80b04 6961 while (cnt--) {
a2fbb9ea
ET
6962 if (poll) {
6963 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6964 /* if index is different from 0
6965 * the reply for some commands will
3101c2bc 6966 * be on the non default queue
a2fbb9ea
ET
6967 */
6968 if (idx)
6969 bnx2x_rx_int(&bp->fp[idx], 10);
6970 }
a2fbb9ea 6971
3101c2bc 6972 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6973 if (*state_p == state) {
6974#ifdef BNX2X_STOP_ON_ERROR
6975 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6976#endif
a2fbb9ea 6977 return 0;
8b3a0f0b 6978 }
a2fbb9ea 6979
a2fbb9ea 6980 msleep(1);
a2fbb9ea
ET
6981 }
6982
a2fbb9ea 6983 /* timeout! */
49d66772
ET
6984 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6985 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6986#ifdef BNX2X_STOP_ON_ERROR
6987 bnx2x_panic();
6988#endif
a2fbb9ea 6989
49d66772 6990 return -EBUSY;
a2fbb9ea
ET
6991}
6992
6993static int bnx2x_setup_leading(struct bnx2x *bp)
6994{
34f80b04 6995 int rc;
a2fbb9ea 6996
c14423fe 6997 /* reset IGU state */
34f80b04 6998 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6999
7000 /* SETUP ramrod */
7001 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7002
34f80b04
EG
7003 /* Wait for completion */
7004 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7005
34f80b04 7006 return rc;
a2fbb9ea
ET
7007}
7008
7009static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7010{
555f6c78
EG
7011 struct bnx2x_fastpath *fp = &bp->fp[index];
7012
a2fbb9ea 7013 /* reset IGU state */
555f6c78 7014 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7015
228241eb 7016 /* SETUP ramrod */
555f6c78
EG
7017 fp->state = BNX2X_FP_STATE_OPENING;
7018 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7019 fp->cl_id, 0);
a2fbb9ea
ET
7020
7021 /* Wait for completion */
7022 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7023 &(fp->state), 0);
a2fbb9ea
ET
7024}
7025
a2fbb9ea 7026static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7027
ca00392c
EG
7028static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7029 int *num_tx_queues_out)
7030{
7031 int _num_rx_queues = 0, _num_tx_queues = 0;
7032
7033 switch (bp->multi_mode) {
7034 case ETH_RSS_MODE_DISABLED:
7035 _num_rx_queues = 1;
7036 _num_tx_queues = 1;
7037 break;
7038
7039 case ETH_RSS_MODE_REGULAR:
7040 if (num_rx_queues)
7041 _num_rx_queues = min_t(u32, num_rx_queues,
7042 BNX2X_MAX_QUEUES(bp));
7043 else
7044 _num_rx_queues = min_t(u32, num_online_cpus(),
7045 BNX2X_MAX_QUEUES(bp));
7046
7047 if (num_tx_queues)
7048 _num_tx_queues = min_t(u32, num_tx_queues,
7049 BNX2X_MAX_QUEUES(bp));
7050 else
7051 _num_tx_queues = min_t(u32, num_online_cpus(),
7052 BNX2X_MAX_QUEUES(bp));
7053
7054 /* There must be not more Tx queues than Rx queues */
7055 if (_num_tx_queues > _num_rx_queues) {
7056 BNX2X_ERR("number of tx queues (%d) > "
7057 "number of rx queues (%d)"
7058 " defaulting to %d\n",
7059 _num_tx_queues, _num_rx_queues,
7060 _num_rx_queues);
7061 _num_tx_queues = _num_rx_queues;
7062 }
7063 break;
7064
7065
7066 default:
7067 _num_rx_queues = 1;
7068 _num_tx_queues = 1;
7069 break;
7070 }
7071
7072 *num_rx_queues_out = _num_rx_queues;
7073 *num_tx_queues_out = _num_tx_queues;
7074}
7075
7076static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7077{
ca00392c 7078 int rc = 0;
a2fbb9ea 7079
8badd27a
EG
7080 switch (int_mode) {
7081 case INT_MODE_INTx:
7082 case INT_MODE_MSI:
ca00392c
EG
7083 bp->num_rx_queues = 1;
7084 bp->num_tx_queues = 1;
7085 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7086 break;
7087
7088 case INT_MODE_MSIX:
7089 default:
ca00392c
EG
7090 /* Set interrupt mode according to bp->multi_mode value */
7091 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7092 &bp->num_tx_queues);
7093
7094 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7095 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7096
2dfe0e1f
EG
7097 /* if we can't use MSI-X we only need one fp,
7098 * so try to enable MSI-X with the requested number of fp's
7099 * and fallback to MSI or legacy INTx with one fp
7100 */
ca00392c
EG
7101 rc = bnx2x_enable_msix(bp);
7102 if (rc) {
34f80b04 7103 /* failed to enable MSI-X */
555f6c78
EG
7104 if (bp->multi_mode)
7105 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7106 "enable MSI-X (rx %d tx %d), "
7107 "set number of queues to 1\n",
7108 bp->num_rx_queues, bp->num_tx_queues);
7109 bp->num_rx_queues = 1;
7110 bp->num_tx_queues = 1;
a2fbb9ea 7111 }
8badd27a 7112 break;
a2fbb9ea 7113 }
555f6c78 7114 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7115 return rc;
8badd27a
EG
7116}
7117
8badd27a
EG
7118
7119/* must be called with rtnl_lock */
7120static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7121{
7122 u32 load_code;
ca00392c
EG
7123 int i, rc;
7124
8badd27a 7125#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7126 if (unlikely(bp->panic))
7127 return -EPERM;
7128#endif
7129
7130 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7131
ca00392c 7132 rc = bnx2x_set_int_mode(bp);
c14423fe 7133
a2fbb9ea
ET
7134 if (bnx2x_alloc_mem(bp))
7135 return -ENOMEM;
7136
555f6c78 7137 for_each_rx_queue(bp, i)
7a9b2557
VZ
7138 bnx2x_fp(bp, i, disable_tpa) =
7139 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7140
555f6c78 7141 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7142 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7143 bnx2x_poll, 128);
7144
2dfe0e1f
EG
7145 bnx2x_napi_enable(bp);
7146
34f80b04
EG
7147 if (bp->flags & USING_MSIX_FLAG) {
7148 rc = bnx2x_req_msix_irqs(bp);
7149 if (rc) {
7150 pci_disable_msix(bp->pdev);
2dfe0e1f 7151 goto load_error1;
34f80b04
EG
7152 }
7153 } else {
ca00392c
EG
7154 /* Fall to INTx if failed to enable MSI-X due to lack of
7155 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7156 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7157 bnx2x_enable_msi(bp);
34f80b04
EG
7158 bnx2x_ack_int(bp);
7159 rc = bnx2x_req_irq(bp);
7160 if (rc) {
2dfe0e1f 7161 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7162 if (bp->flags & USING_MSI_FLAG)
7163 pci_disable_msi(bp->pdev);
2dfe0e1f 7164 goto load_error1;
a2fbb9ea 7165 }
8badd27a
EG
7166 if (bp->flags & USING_MSI_FLAG) {
7167 bp->dev->irq = bp->pdev->irq;
7168 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7169 bp->dev->name, bp->pdev->irq);
7170 }
a2fbb9ea
ET
7171 }
7172
2dfe0e1f
EG
7173 /* Send LOAD_REQUEST command to MCP
7174 Returns the type of LOAD command:
7175 if it is the first port to be initialized
7176 common blocks should be initialized, otherwise - not
7177 */
7178 if (!BP_NOMCP(bp)) {
7179 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7180 if (!load_code) {
7181 BNX2X_ERR("MCP response failure, aborting\n");
7182 rc = -EBUSY;
7183 goto load_error2;
7184 }
7185 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7186 rc = -EBUSY; /* other port in diagnostic mode */
7187 goto load_error2;
7188 }
7189
7190 } else {
7191 int port = BP_PORT(bp);
7192
f5372251 7193 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7194 load_count[0], load_count[1], load_count[2]);
7195 load_count[0]++;
7196 load_count[1 + port]++;
f5372251 7197 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7198 load_count[0], load_count[1], load_count[2]);
7199 if (load_count[0] == 1)
7200 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7201 else if (load_count[1 + port] == 1)
7202 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7203 else
7204 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7205 }
7206
7207 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7208 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7209 bp->port.pmf = 1;
7210 else
7211 bp->port.pmf = 0;
7212 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7213
a2fbb9ea 7214 /* Initialize HW */
34f80b04
EG
7215 rc = bnx2x_init_hw(bp, load_code);
7216 if (rc) {
a2fbb9ea 7217 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7218 goto load_error2;
a2fbb9ea
ET
7219 }
7220
a2fbb9ea 7221 /* Setup NIC internals and enable interrupts */
471de716 7222 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7223
2691d51d
EG
7224 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7225 (bp->common.shmem2_base))
7226 SHMEM2_WR(bp, dcc_support,
7227 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7228 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7229
a2fbb9ea 7230 /* Send LOAD_DONE command to MCP */
34f80b04 7231 if (!BP_NOMCP(bp)) {
228241eb
ET
7232 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7233 if (!load_code) {
da5a662a 7234 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7235 rc = -EBUSY;
2dfe0e1f 7236 goto load_error3;
a2fbb9ea
ET
7237 }
7238 }
7239
7240 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7241
34f80b04
EG
7242 rc = bnx2x_setup_leading(bp);
7243 if (rc) {
da5a662a 7244 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7245 goto load_error3;
34f80b04 7246 }
a2fbb9ea 7247
34f80b04
EG
7248 if (CHIP_IS_E1H(bp))
7249 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7250 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7251 bp->state = BNX2X_STATE_DISABLED;
7252 }
a2fbb9ea 7253
ca00392c 7254 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7255 for_each_nondefault_queue(bp, i) {
7256 rc = bnx2x_setup_multi(bp, i);
7257 if (rc)
2dfe0e1f 7258 goto load_error3;
34f80b04 7259 }
a2fbb9ea 7260
ca00392c
EG
7261 if (CHIP_IS_E1(bp))
7262 bnx2x_set_mac_addr_e1(bp, 1);
7263 else
7264 bnx2x_set_mac_addr_e1h(bp, 1);
7265 }
34f80b04
EG
7266
7267 if (bp->port.pmf)
b5bf9068 7268 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7269
7270 /* Start fast path */
34f80b04
EG
7271 switch (load_mode) {
7272 case LOAD_NORMAL:
ca00392c
EG
7273 if (bp->state == BNX2X_STATE_OPEN) {
7274 /* Tx queue should be only reenabled */
7275 netif_tx_wake_all_queues(bp->dev);
7276 }
2dfe0e1f 7277 /* Initialize the receive filter. */
34f80b04
EG
7278 bnx2x_set_rx_mode(bp->dev);
7279 break;
7280
7281 case LOAD_OPEN:
555f6c78 7282 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7283 if (bp->state != BNX2X_STATE_OPEN)
7284 netif_tx_disable(bp->dev);
2dfe0e1f 7285 /* Initialize the receive filter. */
34f80b04 7286 bnx2x_set_rx_mode(bp->dev);
34f80b04 7287 break;
a2fbb9ea 7288
34f80b04 7289 case LOAD_DIAG:
2dfe0e1f 7290 /* Initialize the receive filter. */
a2fbb9ea 7291 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7292 bp->state = BNX2X_STATE_DIAG;
7293 break;
7294
7295 default:
7296 break;
a2fbb9ea
ET
7297 }
7298
34f80b04
EG
7299 if (!bp->port.pmf)
7300 bnx2x__link_status_update(bp);
7301
a2fbb9ea
ET
7302 /* start the timer */
7303 mod_timer(&bp->timer, jiffies + bp->current_interval);
7304
34f80b04 7305
a2fbb9ea
ET
7306 return 0;
7307
2dfe0e1f
EG
7308load_error3:
7309 bnx2x_int_disable_sync(bp, 1);
7310 if (!BP_NOMCP(bp)) {
7311 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7312 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7313 }
7314 bp->port.pmf = 0;
7a9b2557
VZ
7315 /* Free SKBs, SGEs, TPA pool and driver internals */
7316 bnx2x_free_skbs(bp);
555f6c78 7317 for_each_rx_queue(bp, i)
3196a88a 7318 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7319load_error2:
d1014634
YG
7320 /* Release IRQs */
7321 bnx2x_free_irq(bp);
2dfe0e1f
EG
7322load_error1:
7323 bnx2x_napi_disable(bp);
555f6c78 7324 for_each_rx_queue(bp, i)
7cde1c8b 7325 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7326 bnx2x_free_mem(bp);
7327
34f80b04 7328 return rc;
a2fbb9ea
ET
7329}
7330
7331static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7332{
555f6c78 7333 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7334 int rc;
7335
c14423fe 7336 /* halt the connection */
555f6c78
EG
7337 fp->state = BNX2X_FP_STATE_HALTING;
7338 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7339
34f80b04 7340 /* Wait for completion */
a2fbb9ea 7341 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7342 &(fp->state), 1);
c14423fe 7343 if (rc) /* timeout */
a2fbb9ea
ET
7344 return rc;
7345
7346 /* delete cfc entry */
7347 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7348
34f80b04
EG
7349 /* Wait for completion */
7350 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7351 &(fp->state), 1);
34f80b04 7352 return rc;
a2fbb9ea
ET
7353}
7354
da5a662a 7355static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7356{
4781bfad 7357 __le16 dsb_sp_prod_idx;
c14423fe 7358 /* if the other port is handling traffic,
a2fbb9ea 7359 this can take a lot of time */
34f80b04
EG
7360 int cnt = 500;
7361 int rc;
a2fbb9ea
ET
7362
7363 might_sleep();
7364
7365 /* Send HALT ramrod */
7366 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7367 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7368
34f80b04
EG
7369 /* Wait for completion */
7370 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7371 &(bp->fp[0].state), 1);
7372 if (rc) /* timeout */
da5a662a 7373 return rc;
a2fbb9ea 7374
49d66772 7375 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7376
228241eb 7377 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7378 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7379
49d66772 7380 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7381 we are going to reset the chip anyway
7382 so there is not much to do if this times out
7383 */
34f80b04 7384 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7385 if (!cnt) {
7386 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7387 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7388 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7389#ifdef BNX2X_STOP_ON_ERROR
7390 bnx2x_panic();
7391#endif
36e552ab 7392 rc = -EBUSY;
34f80b04
EG
7393 break;
7394 }
7395 cnt--;
da5a662a 7396 msleep(1);
5650d9d4 7397 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7398 }
7399 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7400 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7401
7402 return rc;
a2fbb9ea
ET
7403}
7404
34f80b04
EG
7405static void bnx2x_reset_func(struct bnx2x *bp)
7406{
7407 int port = BP_PORT(bp);
7408 int func = BP_FUNC(bp);
7409 int base, i;
7410
7411 /* Configure IGU */
7412 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7413 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7414
34f80b04
EG
7415 /* Clear ILT */
7416 base = FUNC_ILT_BASE(func);
7417 for (i = base; i < base + ILT_PER_FUNC; i++)
7418 bnx2x_ilt_wr(bp, i, 0);
7419}
7420
7421static void bnx2x_reset_port(struct bnx2x *bp)
7422{
7423 int port = BP_PORT(bp);
7424 u32 val;
7425
7426 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7427
7428 /* Do not rcv packets to BRB */
7429 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7430 /* Do not direct rcv packets that are not for MCP to the BRB */
7431 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7432 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7433
7434 /* Configure AEU */
7435 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7436
7437 msleep(100);
7438 /* Check for BRB port occupancy */
7439 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7440 if (val)
7441 DP(NETIF_MSG_IFDOWN,
33471629 7442 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7443
7444 /* TODO: Close Doorbell port? */
7445}
7446
34f80b04
EG
7447static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7448{
7449 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7450 BP_FUNC(bp), reset_code);
7451
7452 switch (reset_code) {
7453 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7454 bnx2x_reset_port(bp);
7455 bnx2x_reset_func(bp);
7456 bnx2x_reset_common(bp);
7457 break;
7458
7459 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7460 bnx2x_reset_port(bp);
7461 bnx2x_reset_func(bp);
7462 break;
7463
7464 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7465 bnx2x_reset_func(bp);
7466 break;
49d66772 7467
34f80b04
EG
7468 default:
7469 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7470 break;
7471 }
7472}
7473
33471629 7474/* must be called with rtnl_lock */
34f80b04 7475static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7476{
da5a662a 7477 int port = BP_PORT(bp);
a2fbb9ea 7478 u32 reset_code = 0;
da5a662a 7479 int i, cnt, rc;
a2fbb9ea
ET
7480
7481 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7482
228241eb
ET
7483 bp->rx_mode = BNX2X_RX_MODE_NONE;
7484 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7485
f8ef6e44 7486 bnx2x_netif_stop(bp, 1);
e94d8af3 7487
34f80b04
EG
7488 del_timer_sync(&bp->timer);
7489 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7490 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7491 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7492
70b9986c
EG
7493 /* Release IRQs */
7494 bnx2x_free_irq(bp);
7495
555f6c78
EG
7496 /* Wait until tx fastpath tasks complete */
7497 for_each_tx_queue(bp, i) {
228241eb
ET
7498 struct bnx2x_fastpath *fp = &bp->fp[i];
7499
34f80b04 7500 cnt = 1000;
e8b5fc51 7501 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7502
7961f791 7503 bnx2x_tx_int(fp);
34f80b04
EG
7504 if (!cnt) {
7505 BNX2X_ERR("timeout waiting for queue[%d]\n",
7506 i);
7507#ifdef BNX2X_STOP_ON_ERROR
7508 bnx2x_panic();
7509 return -EBUSY;
7510#else
7511 break;
7512#endif
7513 }
7514 cnt--;
da5a662a 7515 msleep(1);
34f80b04 7516 }
228241eb 7517 }
da5a662a
VZ
7518 /* Give HW time to discard old tx messages */
7519 msleep(1);
a2fbb9ea 7520
3101c2bc
YG
7521 if (CHIP_IS_E1(bp)) {
7522 struct mac_configuration_cmd *config =
7523 bnx2x_sp(bp, mcast_config);
7524
7525 bnx2x_set_mac_addr_e1(bp, 0);
7526
8d9c5f34 7527 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7528 CAM_INVALIDATE(config->config_table[i]);
7529
8d9c5f34 7530 config->hdr.length = i;
3101c2bc
YG
7531 if (CHIP_REV_IS_SLOW(bp))
7532 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7533 else
7534 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7535 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7536 config->hdr.reserved1 = 0;
7537
7538 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7539 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7540 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7541
7542 } else { /* E1H */
65abd74d
YG
7543 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7544
3101c2bc
YG
7545 bnx2x_set_mac_addr_e1h(bp, 0);
7546
7547 for (i = 0; i < MC_HASH_SIZE; i++)
7548 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7549
7550 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7551 }
7552
65abd74d
YG
7553 if (unload_mode == UNLOAD_NORMAL)
7554 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7555
7d0446c2 7556 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7557 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7558
7d0446c2 7559 else if (bp->wol) {
65abd74d
YG
7560 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7561 u8 *mac_addr = bp->dev->dev_addr;
7562 u32 val;
7563 /* The mac address is written to entries 1-4 to
7564 preserve entry 0 which is used by the PMF */
7565 u8 entry = (BP_E1HVN(bp) + 1)*8;
7566
7567 val = (mac_addr[0] << 8) | mac_addr[1];
7568 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7569
7570 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7571 (mac_addr[4] << 8) | mac_addr[5];
7572 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7573
7574 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7575
7576 } else
7577 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7578
34f80b04
EG
7579 /* Close multi and leading connections
7580 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7581 for_each_nondefault_queue(bp, i)
7582 if (bnx2x_stop_multi(bp, i))
228241eb 7583 goto unload_error;
a2fbb9ea 7584
da5a662a
VZ
7585 rc = bnx2x_stop_leading(bp);
7586 if (rc) {
34f80b04 7587 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7588#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7589 return -EBUSY;
da5a662a
VZ
7590#else
7591 goto unload_error;
34f80b04 7592#endif
228241eb
ET
7593 }
7594
7595unload_error:
34f80b04 7596 if (!BP_NOMCP(bp))
228241eb 7597 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7598 else {
f5372251 7599 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7600 load_count[0], load_count[1], load_count[2]);
7601 load_count[0]--;
da5a662a 7602 load_count[1 + port]--;
f5372251 7603 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7604 load_count[0], load_count[1], load_count[2]);
7605 if (load_count[0] == 0)
7606 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7607 else if (load_count[1 + port] == 0)
34f80b04
EG
7608 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7609 else
7610 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7611 }
a2fbb9ea 7612
34f80b04
EG
7613 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7614 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7615 bnx2x__link_reset(bp);
a2fbb9ea
ET
7616
7617 /* Reset the chip */
228241eb 7618 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7619
7620 /* Report UNLOAD_DONE to MCP */
34f80b04 7621 if (!BP_NOMCP(bp))
a2fbb9ea 7622 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7623
9a035440 7624 bp->port.pmf = 0;
a2fbb9ea 7625
7a9b2557 7626 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7627 bnx2x_free_skbs(bp);
555f6c78 7628 for_each_rx_queue(bp, i)
3196a88a 7629 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7630 for_each_rx_queue(bp, i)
7cde1c8b 7631 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7632 bnx2x_free_mem(bp);
7633
7634 bp->state = BNX2X_STATE_CLOSED;
228241eb 7635
a2fbb9ea
ET
7636 netif_carrier_off(bp->dev);
7637
7638 return 0;
7639}
7640
34f80b04
EG
7641static void bnx2x_reset_task(struct work_struct *work)
7642{
7643 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7644
7645#ifdef BNX2X_STOP_ON_ERROR
7646 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7647 " so reset not done to allow debug dump,\n"
ad361c98 7648 " you will need to reboot when done\n");
34f80b04
EG
7649 return;
7650#endif
7651
7652 rtnl_lock();
7653
7654 if (!netif_running(bp->dev))
7655 goto reset_task_exit;
7656
7657 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7658 bnx2x_nic_load(bp, LOAD_NORMAL);
7659
7660reset_task_exit:
7661 rtnl_unlock();
7662}
7663
a2fbb9ea
ET
7664/* end of nic load/unload */
7665
7666/* ethtool_ops */
7667
7668/*
7669 * Init service functions
7670 */
7671
f1ef27ef
EG
7672static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7673{
7674 switch (func) {
7675 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7676 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7677 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7678 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7679 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7680 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7681 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7682 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7683 default:
7684 BNX2X_ERR("Unsupported function index: %d\n", func);
7685 return (u32)(-1);
7686 }
7687}
7688
7689static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7690{
7691 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7692
7693 /* Flush all outstanding writes */
7694 mmiowb();
7695
7696 /* Pretend to be function 0 */
7697 REG_WR(bp, reg, 0);
7698 /* Flush the GRC transaction (in the chip) */
7699 new_val = REG_RD(bp, reg);
7700 if (new_val != 0) {
7701 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7702 new_val);
7703 BUG();
7704 }
7705
7706 /* From now we are in the "like-E1" mode */
7707 bnx2x_int_disable(bp);
7708
7709 /* Flush all outstanding writes */
7710 mmiowb();
7711
7712 /* Restore the original funtion settings */
7713 REG_WR(bp, reg, orig_func);
7714 new_val = REG_RD(bp, reg);
7715 if (new_val != orig_func) {
7716 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7717 orig_func, new_val);
7718 BUG();
7719 }
7720}
7721
7722static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7723{
7724 if (CHIP_IS_E1H(bp))
7725 bnx2x_undi_int_disable_e1h(bp, func);
7726 else
7727 bnx2x_int_disable(bp);
7728}
7729
34f80b04
EG
7730static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7731{
7732 u32 val;
7733
7734 /* Check if there is any driver already loaded */
7735 val = REG_RD(bp, MISC_REG_UNPREPARED);
7736 if (val == 0x1) {
7737 /* Check if it is the UNDI driver
7738 * UNDI driver initializes CID offset for normal bell to 0x7
7739 */
4a37fb66 7740 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7741 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7742 if (val == 0x7) {
7743 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7744 /* save our func */
34f80b04 7745 int func = BP_FUNC(bp);
da5a662a
VZ
7746 u32 swap_en;
7747 u32 swap_val;
34f80b04 7748
b4661739
EG
7749 /* clear the UNDI indication */
7750 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7751
34f80b04
EG
7752 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7753
7754 /* try unload UNDI on port 0 */
7755 bp->func = 0;
da5a662a
VZ
7756 bp->fw_seq =
7757 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7758 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7759 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7760
7761 /* if UNDI is loaded on the other port */
7762 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7763
da5a662a
VZ
7764 /* send "DONE" for previous unload */
7765 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7766
7767 /* unload UNDI on port 1 */
34f80b04 7768 bp->func = 1;
da5a662a
VZ
7769 bp->fw_seq =
7770 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7771 DRV_MSG_SEQ_NUMBER_MASK);
7772 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7773
7774 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7775 }
7776
b4661739
EG
7777 /* now it's safe to release the lock */
7778 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7779
f1ef27ef 7780 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7781
7782 /* close input traffic and wait for it */
7783 /* Do not rcv packets to BRB */
7784 REG_WR(bp,
7785 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7786 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7787 /* Do not direct rcv packets that are not for MCP to
7788 * the BRB */
7789 REG_WR(bp,
7790 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7791 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7792 /* clear AEU */
7793 REG_WR(bp,
7794 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7795 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7796 msleep(10);
7797
7798 /* save NIG port swap info */
7799 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7800 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7801 /* reset device */
7802 REG_WR(bp,
7803 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7804 0xd3ffffff);
34f80b04
EG
7805 REG_WR(bp,
7806 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7807 0x1403);
da5a662a
VZ
7808 /* take the NIG out of reset and restore swap values */
7809 REG_WR(bp,
7810 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7811 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7812 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7813 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7814
7815 /* send unload done to the MCP */
7816 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7817
7818 /* restore our func and fw_seq */
7819 bp->func = func;
7820 bp->fw_seq =
7821 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7822 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7823
7824 } else
7825 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7826 }
7827}
7828
7829static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7830{
7831 u32 val, val2, val3, val4, id;
72ce58c3 7832 u16 pmc;
34f80b04
EG
7833
7834 /* Get the chip revision id and number. */
7835 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7836 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7837 id = ((val & 0xffff) << 16);
7838 val = REG_RD(bp, MISC_REG_CHIP_REV);
7839 id |= ((val & 0xf) << 12);
7840 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7841 id |= ((val & 0xff) << 4);
5a40e08e 7842 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7843 id |= (val & 0xf);
7844 bp->common.chip_id = id;
7845 bp->link_params.chip_id = bp->common.chip_id;
7846 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7847
1c06328c
EG
7848 val = (REG_RD(bp, 0x2874) & 0x55);
7849 if ((bp->common.chip_id & 0x1) ||
7850 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7851 bp->flags |= ONE_PORT_FLAG;
7852 BNX2X_DEV_INFO("single port device\n");
7853 }
7854
34f80b04
EG
7855 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7856 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7857 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7858 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7859 bp->common.flash_size, bp->common.flash_size);
7860
7861 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 7862 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 7863 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
7864 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7865 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
7866
7867 if (!bp->common.shmem_base ||
7868 (bp->common.shmem_base < 0xA0000) ||
7869 (bp->common.shmem_base >= 0xC0000)) {
7870 BNX2X_DEV_INFO("MCP not active\n");
7871 bp->flags |= NO_MCP_FLAG;
7872 return;
7873 }
7874
7875 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7876 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7877 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7878 BNX2X_ERR("BAD MCP validity signature\n");
7879
7880 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7881 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7882
7883 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7884 SHARED_HW_CFG_LED_MODE_MASK) >>
7885 SHARED_HW_CFG_LED_MODE_SHIFT);
7886
c2c8b03e
EG
7887 bp->link_params.feature_config_flags = 0;
7888 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7889 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7890 bp->link_params.feature_config_flags |=
7891 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7892 else
7893 bp->link_params.feature_config_flags &=
7894 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7895
34f80b04
EG
7896 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7897 bp->common.bc_ver = val;
7898 BNX2X_DEV_INFO("bc_ver %X\n", val);
7899 if (val < BNX2X_BC_VER) {
7900 /* for now only warn
7901 * later we might need to enforce this */
7902 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7903 " please upgrade BC\n", BNX2X_BC_VER, val);
7904 }
4d295db0
EG
7905 bp->link_params.feature_config_flags |=
7906 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7907 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
7908
7909 if (BP_E1HVN(bp) == 0) {
7910 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7911 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7912 } else {
7913 /* no WOL capability for E1HVN != 0 */
7914 bp->flags |= NO_WOL_FLAG;
7915 }
7916 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7917 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7918
7919 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7920 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7921 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7922 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7923
7924 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7925 val, val2, val3, val4);
7926}
7927
7928static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7929 u32 switch_cfg)
a2fbb9ea 7930{
34f80b04 7931 int port = BP_PORT(bp);
a2fbb9ea
ET
7932 u32 ext_phy_type;
7933
a2fbb9ea
ET
7934 switch (switch_cfg) {
7935 case SWITCH_CFG_1G:
7936 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7937
c18487ee
YR
7938 ext_phy_type =
7939 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7940 switch (ext_phy_type) {
7941 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7942 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7943 ext_phy_type);
7944
34f80b04
EG
7945 bp->port.supported |= (SUPPORTED_10baseT_Half |
7946 SUPPORTED_10baseT_Full |
7947 SUPPORTED_100baseT_Half |
7948 SUPPORTED_100baseT_Full |
7949 SUPPORTED_1000baseT_Full |
7950 SUPPORTED_2500baseX_Full |
7951 SUPPORTED_TP |
7952 SUPPORTED_FIBRE |
7953 SUPPORTED_Autoneg |
7954 SUPPORTED_Pause |
7955 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7956 break;
7957
7958 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7959 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7960 ext_phy_type);
7961
34f80b04
EG
7962 bp->port.supported |= (SUPPORTED_10baseT_Half |
7963 SUPPORTED_10baseT_Full |
7964 SUPPORTED_100baseT_Half |
7965 SUPPORTED_100baseT_Full |
7966 SUPPORTED_1000baseT_Full |
7967 SUPPORTED_TP |
7968 SUPPORTED_FIBRE |
7969 SUPPORTED_Autoneg |
7970 SUPPORTED_Pause |
7971 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7972 break;
7973
7974 default:
7975 BNX2X_ERR("NVRAM config error. "
7976 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7977 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7978 return;
7979 }
7980
34f80b04
EG
7981 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7982 port*0x10);
7983 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7984 break;
7985
7986 case SWITCH_CFG_10G:
7987 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7988
c18487ee
YR
7989 ext_phy_type =
7990 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7991 switch (ext_phy_type) {
7992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7993 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7994 ext_phy_type);
7995
34f80b04
EG
7996 bp->port.supported |= (SUPPORTED_10baseT_Half |
7997 SUPPORTED_10baseT_Full |
7998 SUPPORTED_100baseT_Half |
7999 SUPPORTED_100baseT_Full |
8000 SUPPORTED_1000baseT_Full |
8001 SUPPORTED_2500baseX_Full |
8002 SUPPORTED_10000baseT_Full |
8003 SUPPORTED_TP |
8004 SUPPORTED_FIBRE |
8005 SUPPORTED_Autoneg |
8006 SUPPORTED_Pause |
8007 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8008 break;
8009
589abe3a
EG
8010 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8011 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8012 ext_phy_type);
f1410647 8013
34f80b04 8014 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8015 SUPPORTED_1000baseT_Full |
34f80b04 8016 SUPPORTED_FIBRE |
589abe3a 8017 SUPPORTED_Autoneg |
34f80b04
EG
8018 SUPPORTED_Pause |
8019 SUPPORTED_Asym_Pause);
f1410647
ET
8020 break;
8021
589abe3a
EG
8022 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8023 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8024 ext_phy_type);
8025
34f80b04 8026 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8027 SUPPORTED_2500baseX_Full |
34f80b04 8028 SUPPORTED_1000baseT_Full |
589abe3a
EG
8029 SUPPORTED_FIBRE |
8030 SUPPORTED_Autoneg |
8031 SUPPORTED_Pause |
8032 SUPPORTED_Asym_Pause);
8033 break;
8034
8035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8036 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8037 ext_phy_type);
8038
8039 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8040 SUPPORTED_FIBRE |
8041 SUPPORTED_Pause |
8042 SUPPORTED_Asym_Pause);
f1410647
ET
8043 break;
8044
589abe3a
EG
8045 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8046 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8047 ext_phy_type);
8048
34f80b04
EG
8049 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8050 SUPPORTED_1000baseT_Full |
8051 SUPPORTED_FIBRE |
34f80b04
EG
8052 SUPPORTED_Pause |
8053 SUPPORTED_Asym_Pause);
f1410647
ET
8054 break;
8055
589abe3a
EG
8056 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8057 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8058 ext_phy_type);
8059
34f80b04 8060 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8061 SUPPORTED_1000baseT_Full |
34f80b04 8062 SUPPORTED_Autoneg |
589abe3a 8063 SUPPORTED_FIBRE |
34f80b04
EG
8064 SUPPORTED_Pause |
8065 SUPPORTED_Asym_Pause);
c18487ee
YR
8066 break;
8067
4d295db0
EG
8068 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8069 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8070 ext_phy_type);
8071
8072 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8073 SUPPORTED_1000baseT_Full |
8074 SUPPORTED_Autoneg |
8075 SUPPORTED_FIBRE |
8076 SUPPORTED_Pause |
8077 SUPPORTED_Asym_Pause);
8078 break;
8079
f1410647
ET
8080 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8081 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8082 ext_phy_type);
8083
34f80b04
EG
8084 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8085 SUPPORTED_TP |
8086 SUPPORTED_Autoneg |
8087 SUPPORTED_Pause |
8088 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8089 break;
8090
28577185
EG
8091 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8092 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8093 ext_phy_type);
8094
8095 bp->port.supported |= (SUPPORTED_10baseT_Half |
8096 SUPPORTED_10baseT_Full |
8097 SUPPORTED_100baseT_Half |
8098 SUPPORTED_100baseT_Full |
8099 SUPPORTED_1000baseT_Full |
8100 SUPPORTED_10000baseT_Full |
8101 SUPPORTED_TP |
8102 SUPPORTED_Autoneg |
8103 SUPPORTED_Pause |
8104 SUPPORTED_Asym_Pause);
8105 break;
8106
c18487ee
YR
8107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8108 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8109 bp->link_params.ext_phy_config);
8110 break;
8111
a2fbb9ea
ET
8112 default:
8113 BNX2X_ERR("NVRAM config error. "
8114 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8115 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8116 return;
8117 }
8118
34f80b04
EG
8119 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8120 port*0x18);
8121 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8122
a2fbb9ea
ET
8123 break;
8124
8125 default:
8126 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8127 bp->port.link_config);
a2fbb9ea
ET
8128 return;
8129 }
34f80b04 8130 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8131
8132 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8133 if (!(bp->link_params.speed_cap_mask &
8134 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8135 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8136
c18487ee
YR
8137 if (!(bp->link_params.speed_cap_mask &
8138 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8139 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8140
c18487ee
YR
8141 if (!(bp->link_params.speed_cap_mask &
8142 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8143 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8144
c18487ee
YR
8145 if (!(bp->link_params.speed_cap_mask &
8146 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8147 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8148
c18487ee
YR
8149 if (!(bp->link_params.speed_cap_mask &
8150 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8151 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8152 SUPPORTED_1000baseT_Full);
a2fbb9ea 8153
c18487ee
YR
8154 if (!(bp->link_params.speed_cap_mask &
8155 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8156 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8157
c18487ee
YR
8158 if (!(bp->link_params.speed_cap_mask &
8159 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8160 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8161
34f80b04 8162 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8163}
8164
34f80b04 8165static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8166{
c18487ee 8167 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8168
34f80b04 8169 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8170 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8171 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8172 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8173 bp->port.advertising = bp->port.supported;
a2fbb9ea 8174 } else {
c18487ee
YR
8175 u32 ext_phy_type =
8176 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8177
8178 if ((ext_phy_type ==
8179 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8180 (ext_phy_type ==
8181 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8182 /* force 10G, no AN */
c18487ee 8183 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8184 bp->port.advertising =
a2fbb9ea
ET
8185 (ADVERTISED_10000baseT_Full |
8186 ADVERTISED_FIBRE);
8187 break;
8188 }
8189 BNX2X_ERR("NVRAM config error. "
8190 "Invalid link_config 0x%x"
8191 " Autoneg not supported\n",
34f80b04 8192 bp->port.link_config);
a2fbb9ea
ET
8193 return;
8194 }
8195 break;
8196
8197 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8198 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8199 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8200 bp->port.advertising = (ADVERTISED_10baseT_Full |
8201 ADVERTISED_TP);
a2fbb9ea
ET
8202 } else {
8203 BNX2X_ERR("NVRAM config error. "
8204 "Invalid link_config 0x%x"
8205 " speed_cap_mask 0x%x\n",
34f80b04 8206 bp->port.link_config,
c18487ee 8207 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8208 return;
8209 }
8210 break;
8211
8212 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8213 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8214 bp->link_params.req_line_speed = SPEED_10;
8215 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8216 bp->port.advertising = (ADVERTISED_10baseT_Half |
8217 ADVERTISED_TP);
a2fbb9ea
ET
8218 } else {
8219 BNX2X_ERR("NVRAM config error. "
8220 "Invalid link_config 0x%x"
8221 " speed_cap_mask 0x%x\n",
34f80b04 8222 bp->port.link_config,
c18487ee 8223 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8224 return;
8225 }
8226 break;
8227
8228 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8229 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8230 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8231 bp->port.advertising = (ADVERTISED_100baseT_Full |
8232 ADVERTISED_TP);
a2fbb9ea
ET
8233 } else {
8234 BNX2X_ERR("NVRAM config error. "
8235 "Invalid link_config 0x%x"
8236 " speed_cap_mask 0x%x\n",
34f80b04 8237 bp->port.link_config,
c18487ee 8238 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8239 return;
8240 }
8241 break;
8242
8243 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8244 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8245 bp->link_params.req_line_speed = SPEED_100;
8246 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8247 bp->port.advertising = (ADVERTISED_100baseT_Half |
8248 ADVERTISED_TP);
a2fbb9ea
ET
8249 } else {
8250 BNX2X_ERR("NVRAM config error. "
8251 "Invalid link_config 0x%x"
8252 " speed_cap_mask 0x%x\n",
34f80b04 8253 bp->port.link_config,
c18487ee 8254 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8255 return;
8256 }
8257 break;
8258
8259 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8260 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8261 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8262 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8263 ADVERTISED_TP);
a2fbb9ea
ET
8264 } else {
8265 BNX2X_ERR("NVRAM config error. "
8266 "Invalid link_config 0x%x"
8267 " speed_cap_mask 0x%x\n",
34f80b04 8268 bp->port.link_config,
c18487ee 8269 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8270 return;
8271 }
8272 break;
8273
8274 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8275 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8276 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8277 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8278 ADVERTISED_TP);
a2fbb9ea
ET
8279 } else {
8280 BNX2X_ERR("NVRAM config error. "
8281 "Invalid link_config 0x%x"
8282 " speed_cap_mask 0x%x\n",
34f80b04 8283 bp->port.link_config,
c18487ee 8284 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8285 return;
8286 }
8287 break;
8288
8289 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8290 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8291 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8292 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8293 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8294 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8295 ADVERTISED_FIBRE);
a2fbb9ea
ET
8296 } else {
8297 BNX2X_ERR("NVRAM config error. "
8298 "Invalid link_config 0x%x"
8299 " speed_cap_mask 0x%x\n",
34f80b04 8300 bp->port.link_config,
c18487ee 8301 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8302 return;
8303 }
8304 break;
8305
8306 default:
8307 BNX2X_ERR("NVRAM config error. "
8308 "BAD link speed link_config 0x%x\n",
34f80b04 8309 bp->port.link_config);
c18487ee 8310 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8311 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8312 break;
8313 }
a2fbb9ea 8314
34f80b04
EG
8315 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8316 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8317 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8318 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8319 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8320
c18487ee 8321 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8322 " advertising 0x%x\n",
c18487ee
YR
8323 bp->link_params.req_line_speed,
8324 bp->link_params.req_duplex,
34f80b04 8325 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8326}
8327
34f80b04 8328static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8329{
34f80b04
EG
8330 int port = BP_PORT(bp);
8331 u32 val, val2;
589abe3a 8332 u32 config;
c2c8b03e 8333 u16 i;
a2fbb9ea 8334
c18487ee 8335 bp->link_params.bp = bp;
34f80b04 8336 bp->link_params.port = port;
c18487ee 8337
c18487ee 8338 bp->link_params.lane_config =
a2fbb9ea 8339 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8340 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8341 SHMEM_RD(bp,
8342 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8343 /* BCM8727_NOC => BCM8727 no over current */
8344 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8345 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8346 bp->link_params.ext_phy_config &=
8347 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8348 bp->link_params.ext_phy_config |=
8349 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8350 bp->link_params.feature_config_flags |=
8351 FEATURE_CONFIG_BCM8727_NOC;
8352 }
8353
c18487ee 8354 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8355 SHMEM_RD(bp,
8356 dev_info.port_hw_config[port].speed_capability_mask);
8357
34f80b04 8358 bp->port.link_config =
a2fbb9ea
ET
8359 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8360
c2c8b03e
EG
8361 /* Get the 4 lanes xgxs config rx and tx */
8362 for (i = 0; i < 2; i++) {
8363 val = SHMEM_RD(bp,
8364 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8365 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8366 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8367
8368 val = SHMEM_RD(bp,
8369 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8370 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8371 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8372 }
8373
3ce2c3f9
EG
8374 /* If the device is capable of WoL, set the default state according
8375 * to the HW
8376 */
4d295db0 8377 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8378 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8379 (config & PORT_FEATURE_WOL_ENABLED));
8380
c2c8b03e
EG
8381 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8382 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8383 bp->link_params.lane_config,
8384 bp->link_params.ext_phy_config,
34f80b04 8385 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8386
4d295db0
EG
8387 bp->link_params.switch_cfg |= (bp->port.link_config &
8388 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8389 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8390
8391 bnx2x_link_settings_requested(bp);
8392
8393 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8394 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8395 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8396 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8397 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8398 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8399 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8400 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8401 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8402 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8403}
8404
8405static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8406{
8407 int func = BP_FUNC(bp);
8408 u32 val, val2;
8409 int rc = 0;
a2fbb9ea 8410
34f80b04 8411 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8412
34f80b04
EG
8413 bp->e1hov = 0;
8414 bp->e1hmf = 0;
8415 if (CHIP_IS_E1H(bp)) {
8416 bp->mf_config =
8417 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8418
2691d51d 8419 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8420 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8421 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8422 bp->e1hmf = 1;
2691d51d
EG
8423 BNX2X_DEV_INFO("%s function mode\n",
8424 IS_E1HMF(bp) ? "multi" : "single");
8425
8426 if (IS_E1HMF(bp)) {
8427 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8428 e1hov_tag) &
8429 FUNC_MF_CFG_E1HOV_TAG_MASK);
8430 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8431 bp->e1hov = val;
8432 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8433 "(0x%04x)\n",
8434 func, bp->e1hov, bp->e1hov);
8435 } else {
34f80b04
EG
8436 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8437 " aborting\n", func);
8438 rc = -EPERM;
8439 }
2691d51d
EG
8440 } else {
8441 if (BP_E1HVN(bp)) {
8442 BNX2X_ERR("!!! VN %d in single function mode,"
8443 " aborting\n", BP_E1HVN(bp));
8444 rc = -EPERM;
8445 }
34f80b04
EG
8446 }
8447 }
a2fbb9ea 8448
34f80b04
EG
8449 if (!BP_NOMCP(bp)) {
8450 bnx2x_get_port_hwinfo(bp);
8451
8452 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8453 DRV_MSG_SEQ_NUMBER_MASK);
8454 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8455 }
8456
8457 if (IS_E1HMF(bp)) {
8458 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8459 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8460 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8461 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8462 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8463 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8464 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8465 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8466 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8467 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8468 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8469 ETH_ALEN);
8470 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8471 ETH_ALEN);
a2fbb9ea 8472 }
34f80b04
EG
8473
8474 return rc;
a2fbb9ea
ET
8475 }
8476
34f80b04
EG
8477 if (BP_NOMCP(bp)) {
8478 /* only supposed to happen on emulation/FPGA */
33471629 8479 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8480 random_ether_addr(bp->dev->dev_addr);
8481 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8482 }
a2fbb9ea 8483
34f80b04
EG
8484 return rc;
8485}
8486
8487static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8488{
8489 int func = BP_FUNC(bp);
87942b46 8490 int timer_interval;
34f80b04
EG
8491 int rc;
8492
da5a662a
VZ
8493 /* Disable interrupt handling until HW is initialized */
8494 atomic_set(&bp->intr_sem, 1);
e1510706 8495 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8496
34f80b04 8497 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8498
1cf167f2 8499 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8500 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8501
8502 rc = bnx2x_get_hwinfo(bp);
8503
8504 /* need to reset chip if undi was active */
8505 if (!BP_NOMCP(bp))
8506 bnx2x_undi_unload(bp);
8507
8508 if (CHIP_REV_IS_FPGA(bp))
8509 printk(KERN_ERR PFX "FPGA detected\n");
8510
8511 if (BP_NOMCP(bp) && (func == 0))
8512 printk(KERN_ERR PFX
8513 "MCP disabled, must load devices in order!\n");
8514
555f6c78 8515 /* Set multi queue mode */
8badd27a
EG
8516 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8517 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8518 printk(KERN_ERR PFX
8badd27a 8519 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8520 multi_mode = ETH_RSS_MODE_DISABLED;
8521 }
8522 bp->multi_mode = multi_mode;
8523
8524
7a9b2557
VZ
8525 /* Set TPA flags */
8526 if (disable_tpa) {
8527 bp->flags &= ~TPA_ENABLE_FLAG;
8528 bp->dev->features &= ~NETIF_F_LRO;
8529 } else {
8530 bp->flags |= TPA_ENABLE_FLAG;
8531 bp->dev->features |= NETIF_F_LRO;
8532 }
8533
8d5726c4 8534 bp->mrrs = mrrs;
7a9b2557 8535
34f80b04
EG
8536 bp->tx_ring_size = MAX_TX_AVAIL;
8537 bp->rx_ring_size = MAX_RX_AVAIL;
8538
8539 bp->rx_csum = 1;
34f80b04
EG
8540
8541 bp->tx_ticks = 50;
8542 bp->rx_ticks = 25;
8543
87942b46
EG
8544 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8545 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8546
8547 init_timer(&bp->timer);
8548 bp->timer.expires = jiffies + bp->current_interval;
8549 bp->timer.data = (unsigned long) bp;
8550 bp->timer.function = bnx2x_timer;
8551
8552 return rc;
a2fbb9ea
ET
8553}
8554
8555/*
8556 * ethtool service functions
8557 */
8558
8559/* All ethtool functions called with rtnl_lock */
8560
8561static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8562{
8563 struct bnx2x *bp = netdev_priv(dev);
8564
34f80b04
EG
8565 cmd->supported = bp->port.supported;
8566 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8567
8568 if (netif_carrier_ok(dev)) {
c18487ee
YR
8569 cmd->speed = bp->link_vars.line_speed;
8570 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8571 } else {
c18487ee
YR
8572 cmd->speed = bp->link_params.req_line_speed;
8573 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8574 }
34f80b04
EG
8575 if (IS_E1HMF(bp)) {
8576 u16 vn_max_rate;
8577
8578 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8579 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8580 if (vn_max_rate < cmd->speed)
8581 cmd->speed = vn_max_rate;
8582 }
a2fbb9ea 8583
c18487ee
YR
8584 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8585 u32 ext_phy_type =
8586 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8587
8588 switch (ext_phy_type) {
8589 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8590 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8591 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8592 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8593 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8594 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8595 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8596 cmd->port = PORT_FIBRE;
8597 break;
8598
8599 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8600 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8601 cmd->port = PORT_TP;
8602 break;
8603
c18487ee
YR
8604 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8605 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8606 bp->link_params.ext_phy_config);
8607 break;
8608
f1410647
ET
8609 default:
8610 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8611 bp->link_params.ext_phy_config);
8612 break;
f1410647
ET
8613 }
8614 } else
a2fbb9ea 8615 cmd->port = PORT_TP;
a2fbb9ea 8616
34f80b04 8617 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8618 cmd->transceiver = XCVR_INTERNAL;
8619
c18487ee 8620 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8621 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8622 else
a2fbb9ea 8623 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8624
8625 cmd->maxtxpkt = 0;
8626 cmd->maxrxpkt = 0;
8627
8628 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8629 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8630 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8631 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8632 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8633 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8634 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8635
8636 return 0;
8637}
8638
8639static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8640{
8641 struct bnx2x *bp = netdev_priv(dev);
8642 u32 advertising;
8643
34f80b04
EG
8644 if (IS_E1HMF(bp))
8645 return 0;
8646
a2fbb9ea
ET
8647 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8648 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8649 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8650 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8651 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8652 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8653 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8654
a2fbb9ea 8655 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8656 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8657 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8658 return -EINVAL;
f1410647 8659 }
a2fbb9ea
ET
8660
8661 /* advertise the requested speed and duplex if supported */
34f80b04 8662 cmd->advertising &= bp->port.supported;
a2fbb9ea 8663
c18487ee
YR
8664 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8665 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8666 bp->port.advertising |= (ADVERTISED_Autoneg |
8667 cmd->advertising);
a2fbb9ea
ET
8668
8669 } else { /* forced speed */
8670 /* advertise the requested speed and duplex if supported */
8671 switch (cmd->speed) {
8672 case SPEED_10:
8673 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8674 if (!(bp->port.supported &
f1410647
ET
8675 SUPPORTED_10baseT_Full)) {
8676 DP(NETIF_MSG_LINK,
8677 "10M full not supported\n");
a2fbb9ea 8678 return -EINVAL;
f1410647 8679 }
a2fbb9ea
ET
8680
8681 advertising = (ADVERTISED_10baseT_Full |
8682 ADVERTISED_TP);
8683 } else {
34f80b04 8684 if (!(bp->port.supported &
f1410647
ET
8685 SUPPORTED_10baseT_Half)) {
8686 DP(NETIF_MSG_LINK,
8687 "10M half not supported\n");
a2fbb9ea 8688 return -EINVAL;
f1410647 8689 }
a2fbb9ea
ET
8690
8691 advertising = (ADVERTISED_10baseT_Half |
8692 ADVERTISED_TP);
8693 }
8694 break;
8695
8696 case SPEED_100:
8697 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8698 if (!(bp->port.supported &
f1410647
ET
8699 SUPPORTED_100baseT_Full)) {
8700 DP(NETIF_MSG_LINK,
8701 "100M full not supported\n");
a2fbb9ea 8702 return -EINVAL;
f1410647 8703 }
a2fbb9ea
ET
8704
8705 advertising = (ADVERTISED_100baseT_Full |
8706 ADVERTISED_TP);
8707 } else {
34f80b04 8708 if (!(bp->port.supported &
f1410647
ET
8709 SUPPORTED_100baseT_Half)) {
8710 DP(NETIF_MSG_LINK,
8711 "100M half not supported\n");
a2fbb9ea 8712 return -EINVAL;
f1410647 8713 }
a2fbb9ea
ET
8714
8715 advertising = (ADVERTISED_100baseT_Half |
8716 ADVERTISED_TP);
8717 }
8718 break;
8719
8720 case SPEED_1000:
f1410647
ET
8721 if (cmd->duplex != DUPLEX_FULL) {
8722 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8723 return -EINVAL;
f1410647 8724 }
a2fbb9ea 8725
34f80b04 8726 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8727 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8728 return -EINVAL;
f1410647 8729 }
a2fbb9ea
ET
8730
8731 advertising = (ADVERTISED_1000baseT_Full |
8732 ADVERTISED_TP);
8733 break;
8734
8735 case SPEED_2500:
f1410647
ET
8736 if (cmd->duplex != DUPLEX_FULL) {
8737 DP(NETIF_MSG_LINK,
8738 "2.5G half not supported\n");
a2fbb9ea 8739 return -EINVAL;
f1410647 8740 }
a2fbb9ea 8741
34f80b04 8742 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8743 DP(NETIF_MSG_LINK,
8744 "2.5G full not supported\n");
a2fbb9ea 8745 return -EINVAL;
f1410647 8746 }
a2fbb9ea 8747
f1410647 8748 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8749 ADVERTISED_TP);
8750 break;
8751
8752 case SPEED_10000:
f1410647
ET
8753 if (cmd->duplex != DUPLEX_FULL) {
8754 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8755 return -EINVAL;
f1410647 8756 }
a2fbb9ea 8757
34f80b04 8758 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8759 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8760 return -EINVAL;
f1410647 8761 }
a2fbb9ea
ET
8762
8763 advertising = (ADVERTISED_10000baseT_Full |
8764 ADVERTISED_FIBRE);
8765 break;
8766
8767 default:
f1410647 8768 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8769 return -EINVAL;
8770 }
8771
c18487ee
YR
8772 bp->link_params.req_line_speed = cmd->speed;
8773 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8774 bp->port.advertising = advertising;
a2fbb9ea
ET
8775 }
8776
c18487ee 8777 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8778 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8779 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8780 bp->port.advertising);
a2fbb9ea 8781
34f80b04 8782 if (netif_running(dev)) {
bb2a0f7a 8783 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8784 bnx2x_link_set(bp);
8785 }
a2fbb9ea
ET
8786
8787 return 0;
8788}
8789
c18487ee
YR
8790#define PHY_FW_VER_LEN 10
8791
a2fbb9ea
ET
8792static void bnx2x_get_drvinfo(struct net_device *dev,
8793 struct ethtool_drvinfo *info)
8794{
8795 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8796 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8797
8798 strcpy(info->driver, DRV_MODULE_NAME);
8799 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8800
8801 phy_fw_ver[0] = '\0';
34f80b04 8802 if (bp->port.pmf) {
4a37fb66 8803 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8804 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8805 (bp->state != BNX2X_STATE_CLOSED),
8806 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8807 bnx2x_release_phy_lock(bp);
34f80b04 8808 }
c18487ee 8809
f0e53a84
EG
8810 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8811 (bp->common.bc_ver & 0xff0000) >> 16,
8812 (bp->common.bc_ver & 0xff00) >> 8,
8813 (bp->common.bc_ver & 0xff),
8814 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8815 strcpy(info->bus_info, pci_name(bp->pdev));
8816 info->n_stats = BNX2X_NUM_STATS;
8817 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8818 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8819 info->regdump_len = 0;
8820}
8821
0a64ea57
EG
8822#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8823#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8824
8825static int bnx2x_get_regs_len(struct net_device *dev)
8826{
8827 static u32 regdump_len;
8828 struct bnx2x *bp = netdev_priv(dev);
8829 int i;
8830
8831 if (regdump_len)
8832 return regdump_len;
8833
8834 if (CHIP_IS_E1(bp)) {
8835 for (i = 0; i < REGS_COUNT; i++)
8836 if (IS_E1_ONLINE(reg_addrs[i].info))
8837 regdump_len += reg_addrs[i].size;
8838
8839 for (i = 0; i < WREGS_COUNT_E1; i++)
8840 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8841 regdump_len += wreg_addrs_e1[i].size *
8842 (1 + wreg_addrs_e1[i].read_regs_count);
8843
8844 } else { /* E1H */
8845 for (i = 0; i < REGS_COUNT; i++)
8846 if (IS_E1H_ONLINE(reg_addrs[i].info))
8847 regdump_len += reg_addrs[i].size;
8848
8849 for (i = 0; i < WREGS_COUNT_E1H; i++)
8850 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8851 regdump_len += wreg_addrs_e1h[i].size *
8852 (1 + wreg_addrs_e1h[i].read_regs_count);
8853 }
8854 regdump_len *= 4;
8855 regdump_len += sizeof(struct dump_hdr);
8856
8857 return regdump_len;
8858}
8859
8860static void bnx2x_get_regs(struct net_device *dev,
8861 struct ethtool_regs *regs, void *_p)
8862{
8863 u32 *p = _p, i, j;
8864 struct bnx2x *bp = netdev_priv(dev);
8865 struct dump_hdr dump_hdr = {0};
8866
8867 regs->version = 0;
8868 memset(p, 0, regs->len);
8869
8870 if (!netif_running(bp->dev))
8871 return;
8872
8873 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8874 dump_hdr.dump_sign = dump_sign_all;
8875 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8876 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8877 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8878 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8879 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8880
8881 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8882 p += dump_hdr.hdr_size + 1;
8883
8884 if (CHIP_IS_E1(bp)) {
8885 for (i = 0; i < REGS_COUNT; i++)
8886 if (IS_E1_ONLINE(reg_addrs[i].info))
8887 for (j = 0; j < reg_addrs[i].size; j++)
8888 *p++ = REG_RD(bp,
8889 reg_addrs[i].addr + j*4);
8890
8891 } else { /* E1H */
8892 for (i = 0; i < REGS_COUNT; i++)
8893 if (IS_E1H_ONLINE(reg_addrs[i].info))
8894 for (j = 0; j < reg_addrs[i].size; j++)
8895 *p++ = REG_RD(bp,
8896 reg_addrs[i].addr + j*4);
8897 }
8898}
8899
a2fbb9ea
ET
8900static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8901{
8902 struct bnx2x *bp = netdev_priv(dev);
8903
8904 if (bp->flags & NO_WOL_FLAG) {
8905 wol->supported = 0;
8906 wol->wolopts = 0;
8907 } else {
8908 wol->supported = WAKE_MAGIC;
8909 if (bp->wol)
8910 wol->wolopts = WAKE_MAGIC;
8911 else
8912 wol->wolopts = 0;
8913 }
8914 memset(&wol->sopass, 0, sizeof(wol->sopass));
8915}
8916
8917static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8918{
8919 struct bnx2x *bp = netdev_priv(dev);
8920
8921 if (wol->wolopts & ~WAKE_MAGIC)
8922 return -EINVAL;
8923
8924 if (wol->wolopts & WAKE_MAGIC) {
8925 if (bp->flags & NO_WOL_FLAG)
8926 return -EINVAL;
8927
8928 bp->wol = 1;
34f80b04 8929 } else
a2fbb9ea 8930 bp->wol = 0;
34f80b04 8931
a2fbb9ea
ET
8932 return 0;
8933}
8934
8935static u32 bnx2x_get_msglevel(struct net_device *dev)
8936{
8937 struct bnx2x *bp = netdev_priv(dev);
8938
8939 return bp->msglevel;
8940}
8941
8942static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8943{
8944 struct bnx2x *bp = netdev_priv(dev);
8945
8946 if (capable(CAP_NET_ADMIN))
8947 bp->msglevel = level;
8948}
8949
8950static int bnx2x_nway_reset(struct net_device *dev)
8951{
8952 struct bnx2x *bp = netdev_priv(dev);
8953
34f80b04
EG
8954 if (!bp->port.pmf)
8955 return 0;
a2fbb9ea 8956
34f80b04 8957 if (netif_running(dev)) {
bb2a0f7a 8958 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8959 bnx2x_link_set(bp);
8960 }
a2fbb9ea
ET
8961
8962 return 0;
8963}
8964
01e53298
NO
8965static u32
8966bnx2x_get_link(struct net_device *dev)
8967{
8968 struct bnx2x *bp = netdev_priv(dev);
8969
8970 return bp->link_vars.link_up;
8971}
8972
a2fbb9ea
ET
8973static int bnx2x_get_eeprom_len(struct net_device *dev)
8974{
8975 struct bnx2x *bp = netdev_priv(dev);
8976
34f80b04 8977 return bp->common.flash_size;
a2fbb9ea
ET
8978}
8979
8980static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8981{
34f80b04 8982 int port = BP_PORT(bp);
a2fbb9ea
ET
8983 int count, i;
8984 u32 val = 0;
8985
8986 /* adjust timeout for emulation/FPGA */
8987 count = NVRAM_TIMEOUT_COUNT;
8988 if (CHIP_REV_IS_SLOW(bp))
8989 count *= 100;
8990
8991 /* request access to nvram interface */
8992 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8993 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8994
8995 for (i = 0; i < count*10; i++) {
8996 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8997 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8998 break;
8999
9000 udelay(5);
9001 }
9002
9003 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9004 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9005 return -EBUSY;
9006 }
9007
9008 return 0;
9009}
9010
9011static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9012{
34f80b04 9013 int port = BP_PORT(bp);
a2fbb9ea
ET
9014 int count, i;
9015 u32 val = 0;
9016
9017 /* adjust timeout for emulation/FPGA */
9018 count = NVRAM_TIMEOUT_COUNT;
9019 if (CHIP_REV_IS_SLOW(bp))
9020 count *= 100;
9021
9022 /* relinquish nvram interface */
9023 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9024 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9025
9026 for (i = 0; i < count*10; i++) {
9027 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9028 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9029 break;
9030
9031 udelay(5);
9032 }
9033
9034 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9035 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9036 return -EBUSY;
9037 }
9038
9039 return 0;
9040}
9041
9042static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9043{
9044 u32 val;
9045
9046 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9047
9048 /* enable both bits, even on read */
9049 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9050 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9051 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9052}
9053
9054static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9055{
9056 u32 val;
9057
9058 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9059
9060 /* disable both bits, even after read */
9061 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9062 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9063 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9064}
9065
4781bfad 9066static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9067 u32 cmd_flags)
9068{
f1410647 9069 int count, i, rc;
a2fbb9ea
ET
9070 u32 val;
9071
9072 /* build the command word */
9073 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9074
9075 /* need to clear DONE bit separately */
9076 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9077
9078 /* address of the NVRAM to read from */
9079 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9080 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9081
9082 /* issue a read command */
9083 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9084
9085 /* adjust timeout for emulation/FPGA */
9086 count = NVRAM_TIMEOUT_COUNT;
9087 if (CHIP_REV_IS_SLOW(bp))
9088 count *= 100;
9089
9090 /* wait for completion */
9091 *ret_val = 0;
9092 rc = -EBUSY;
9093 for (i = 0; i < count; i++) {
9094 udelay(5);
9095 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9096
9097 if (val & MCPR_NVM_COMMAND_DONE) {
9098 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9099 /* we read nvram data in cpu order
9100 * but ethtool sees it as an array of bytes
9101 * converting to big-endian will do the work */
4781bfad 9102 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9103 rc = 0;
9104 break;
9105 }
9106 }
9107
9108 return rc;
9109}
9110
9111static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9112 int buf_size)
9113{
9114 int rc;
9115 u32 cmd_flags;
4781bfad 9116 __be32 val;
a2fbb9ea
ET
9117
9118 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9119 DP(BNX2X_MSG_NVM,
c14423fe 9120 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9121 offset, buf_size);
9122 return -EINVAL;
9123 }
9124
34f80b04
EG
9125 if (offset + buf_size > bp->common.flash_size) {
9126 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9127 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9128 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9129 return -EINVAL;
9130 }
9131
9132 /* request access to nvram interface */
9133 rc = bnx2x_acquire_nvram_lock(bp);
9134 if (rc)
9135 return rc;
9136
9137 /* enable access to nvram interface */
9138 bnx2x_enable_nvram_access(bp);
9139
9140 /* read the first word(s) */
9141 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9142 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9143 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9144 memcpy(ret_buf, &val, 4);
9145
9146 /* advance to the next dword */
9147 offset += sizeof(u32);
9148 ret_buf += sizeof(u32);
9149 buf_size -= sizeof(u32);
9150 cmd_flags = 0;
9151 }
9152
9153 if (rc == 0) {
9154 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9155 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9156 memcpy(ret_buf, &val, 4);
9157 }
9158
9159 /* disable access to nvram interface */
9160 bnx2x_disable_nvram_access(bp);
9161 bnx2x_release_nvram_lock(bp);
9162
9163 return rc;
9164}
9165
9166static int bnx2x_get_eeprom(struct net_device *dev,
9167 struct ethtool_eeprom *eeprom, u8 *eebuf)
9168{
9169 struct bnx2x *bp = netdev_priv(dev);
9170 int rc;
9171
2add3acb
EG
9172 if (!netif_running(dev))
9173 return -EAGAIN;
9174
34f80b04 9175 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9176 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9177 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9178 eeprom->len, eeprom->len);
9179
9180 /* parameters already validated in ethtool_get_eeprom */
9181
9182 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9183
9184 return rc;
9185}
9186
9187static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9188 u32 cmd_flags)
9189{
f1410647 9190 int count, i, rc;
a2fbb9ea
ET
9191
9192 /* build the command word */
9193 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9194
9195 /* need to clear DONE bit separately */
9196 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9197
9198 /* write the data */
9199 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9200
9201 /* address of the NVRAM to write to */
9202 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9203 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9204
9205 /* issue the write command */
9206 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9207
9208 /* adjust timeout for emulation/FPGA */
9209 count = NVRAM_TIMEOUT_COUNT;
9210 if (CHIP_REV_IS_SLOW(bp))
9211 count *= 100;
9212
9213 /* wait for completion */
9214 rc = -EBUSY;
9215 for (i = 0; i < count; i++) {
9216 udelay(5);
9217 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9218 if (val & MCPR_NVM_COMMAND_DONE) {
9219 rc = 0;
9220 break;
9221 }
9222 }
9223
9224 return rc;
9225}
9226
f1410647 9227#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9228
9229static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9230 int buf_size)
9231{
9232 int rc;
9233 u32 cmd_flags;
9234 u32 align_offset;
4781bfad 9235 __be32 val;
a2fbb9ea 9236
34f80b04
EG
9237 if (offset + buf_size > bp->common.flash_size) {
9238 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9239 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9240 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9241 return -EINVAL;
9242 }
9243
9244 /* request access to nvram interface */
9245 rc = bnx2x_acquire_nvram_lock(bp);
9246 if (rc)
9247 return rc;
9248
9249 /* enable access to nvram interface */
9250 bnx2x_enable_nvram_access(bp);
9251
9252 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9253 align_offset = (offset & ~0x03);
9254 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9255
9256 if (rc == 0) {
9257 val &= ~(0xff << BYTE_OFFSET(offset));
9258 val |= (*data_buf << BYTE_OFFSET(offset));
9259
9260 /* nvram data is returned as an array of bytes
9261 * convert it back to cpu order */
9262 val = be32_to_cpu(val);
9263
a2fbb9ea
ET
9264 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9265 cmd_flags);
9266 }
9267
9268 /* disable access to nvram interface */
9269 bnx2x_disable_nvram_access(bp);
9270 bnx2x_release_nvram_lock(bp);
9271
9272 return rc;
9273}
9274
9275static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9276 int buf_size)
9277{
9278 int rc;
9279 u32 cmd_flags;
9280 u32 val;
9281 u32 written_so_far;
9282
34f80b04 9283 if (buf_size == 1) /* ethtool */
a2fbb9ea 9284 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9285
9286 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9287 DP(BNX2X_MSG_NVM,
c14423fe 9288 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9289 offset, buf_size);
9290 return -EINVAL;
9291 }
9292
34f80b04
EG
9293 if (offset + buf_size > bp->common.flash_size) {
9294 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9295 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9296 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9297 return -EINVAL;
9298 }
9299
9300 /* request access to nvram interface */
9301 rc = bnx2x_acquire_nvram_lock(bp);
9302 if (rc)
9303 return rc;
9304
9305 /* enable access to nvram interface */
9306 bnx2x_enable_nvram_access(bp);
9307
9308 written_so_far = 0;
9309 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9310 while ((written_so_far < buf_size) && (rc == 0)) {
9311 if (written_so_far == (buf_size - sizeof(u32)))
9312 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9313 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9314 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9315 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9316 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9317
9318 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9319
9320 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9321
9322 /* advance to the next dword */
9323 offset += sizeof(u32);
9324 data_buf += sizeof(u32);
9325 written_so_far += sizeof(u32);
9326 cmd_flags = 0;
9327 }
9328
9329 /* disable access to nvram interface */
9330 bnx2x_disable_nvram_access(bp);
9331 bnx2x_release_nvram_lock(bp);
9332
9333 return rc;
9334}
9335
9336static int bnx2x_set_eeprom(struct net_device *dev,
9337 struct ethtool_eeprom *eeprom, u8 *eebuf)
9338{
9339 struct bnx2x *bp = netdev_priv(dev);
9340 int rc;
9341
9f4c9583
EG
9342 if (!netif_running(dev))
9343 return -EAGAIN;
9344
34f80b04 9345 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9346 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9347 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9348 eeprom->len, eeprom->len);
9349
9350 /* parameters already validated in ethtool_set_eeprom */
9351
c18487ee 9352 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
9353 if (eeprom->magic == 0x00504859)
9354 if (bp->port.pmf) {
9355
4a37fb66 9356 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9357 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9358 bp->link_params.ext_phy_config,
9359 (bp->state != BNX2X_STATE_CLOSED),
9360 eebuf, eeprom->len);
bb2a0f7a
YG
9361 if ((bp->state == BNX2X_STATE_OPEN) ||
9362 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9363 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9364 &bp->link_vars, 1);
34f80b04
EG
9365 rc |= bnx2x_phy_init(&bp->link_params,
9366 &bp->link_vars);
bb2a0f7a 9367 }
4a37fb66 9368 bnx2x_release_phy_lock(bp);
34f80b04
EG
9369
9370 } else /* Only the PMF can access the PHY */
9371 return -EINVAL;
9372 else
c18487ee 9373 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9374
9375 return rc;
9376}
9377
9378static int bnx2x_get_coalesce(struct net_device *dev,
9379 struct ethtool_coalesce *coal)
9380{
9381 struct bnx2x *bp = netdev_priv(dev);
9382
9383 memset(coal, 0, sizeof(struct ethtool_coalesce));
9384
9385 coal->rx_coalesce_usecs = bp->rx_ticks;
9386 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9387
9388 return 0;
9389}
9390
ca00392c 9391#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9392static int bnx2x_set_coalesce(struct net_device *dev,
9393 struct ethtool_coalesce *coal)
9394{
9395 struct bnx2x *bp = netdev_priv(dev);
9396
9397 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9398 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9399 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9400
9401 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9402 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9403 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9404
34f80b04 9405 if (netif_running(dev))
a2fbb9ea
ET
9406 bnx2x_update_coalesce(bp);
9407
9408 return 0;
9409}
9410
9411static void bnx2x_get_ringparam(struct net_device *dev,
9412 struct ethtool_ringparam *ering)
9413{
9414 struct bnx2x *bp = netdev_priv(dev);
9415
9416 ering->rx_max_pending = MAX_RX_AVAIL;
9417 ering->rx_mini_max_pending = 0;
9418 ering->rx_jumbo_max_pending = 0;
9419
9420 ering->rx_pending = bp->rx_ring_size;
9421 ering->rx_mini_pending = 0;
9422 ering->rx_jumbo_pending = 0;
9423
9424 ering->tx_max_pending = MAX_TX_AVAIL;
9425 ering->tx_pending = bp->tx_ring_size;
9426}
9427
9428static int bnx2x_set_ringparam(struct net_device *dev,
9429 struct ethtool_ringparam *ering)
9430{
9431 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9432 int rc = 0;
a2fbb9ea
ET
9433
9434 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9435 (ering->tx_pending > MAX_TX_AVAIL) ||
9436 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9437 return -EINVAL;
9438
9439 bp->rx_ring_size = ering->rx_pending;
9440 bp->tx_ring_size = ering->tx_pending;
9441
34f80b04
EG
9442 if (netif_running(dev)) {
9443 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9444 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9445 }
9446
34f80b04 9447 return rc;
a2fbb9ea
ET
9448}
9449
9450static void bnx2x_get_pauseparam(struct net_device *dev,
9451 struct ethtool_pauseparam *epause)
9452{
9453 struct bnx2x *bp = netdev_priv(dev);
9454
356e2385
EG
9455 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9456 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9457 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9458
c0700f90
DM
9459 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9460 BNX2X_FLOW_CTRL_RX);
9461 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9462 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9463
9464 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9465 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9466 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9467}
9468
9469static int bnx2x_set_pauseparam(struct net_device *dev,
9470 struct ethtool_pauseparam *epause)
9471{
9472 struct bnx2x *bp = netdev_priv(dev);
9473
34f80b04
EG
9474 if (IS_E1HMF(bp))
9475 return 0;
9476
a2fbb9ea
ET
9477 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9478 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9479 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9480
c0700f90 9481 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9482
f1410647 9483 if (epause->rx_pause)
c0700f90 9484 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9485
f1410647 9486 if (epause->tx_pause)
c0700f90 9487 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9488
c0700f90
DM
9489 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9490 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9491
c18487ee 9492 if (epause->autoneg) {
34f80b04 9493 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9494 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9495 return -EINVAL;
9496 }
a2fbb9ea 9497
c18487ee 9498 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9499 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9500 }
a2fbb9ea 9501
c18487ee
YR
9502 DP(NETIF_MSG_LINK,
9503 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9504
9505 if (netif_running(dev)) {
bb2a0f7a 9506 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9507 bnx2x_link_set(bp);
9508 }
a2fbb9ea
ET
9509
9510 return 0;
9511}
9512
df0f2343
VZ
9513static int bnx2x_set_flags(struct net_device *dev, u32 data)
9514{
9515 struct bnx2x *bp = netdev_priv(dev);
9516 int changed = 0;
9517 int rc = 0;
9518
9519 /* TPA requires Rx CSUM offloading */
9520 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9521 if (!(dev->features & NETIF_F_LRO)) {
9522 dev->features |= NETIF_F_LRO;
9523 bp->flags |= TPA_ENABLE_FLAG;
9524 changed = 1;
9525 }
9526
9527 } else if (dev->features & NETIF_F_LRO) {
9528 dev->features &= ~NETIF_F_LRO;
9529 bp->flags &= ~TPA_ENABLE_FLAG;
9530 changed = 1;
9531 }
9532
9533 if (changed && netif_running(dev)) {
9534 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9535 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9536 }
9537
9538 return rc;
9539}
9540
a2fbb9ea
ET
9541static u32 bnx2x_get_rx_csum(struct net_device *dev)
9542{
9543 struct bnx2x *bp = netdev_priv(dev);
9544
9545 return bp->rx_csum;
9546}
9547
9548static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9549{
9550 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9551 int rc = 0;
a2fbb9ea
ET
9552
9553 bp->rx_csum = data;
df0f2343
VZ
9554
9555 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9556 TPA'ed packets will be discarded due to wrong TCP CSUM */
9557 if (!data) {
9558 u32 flags = ethtool_op_get_flags(dev);
9559
9560 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9561 }
9562
9563 return rc;
a2fbb9ea
ET
9564}
9565
9566static int bnx2x_set_tso(struct net_device *dev, u32 data)
9567{
755735eb 9568 if (data) {
a2fbb9ea 9569 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9570 dev->features |= NETIF_F_TSO6;
9571 } else {
a2fbb9ea 9572 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9573 dev->features &= ~NETIF_F_TSO6;
9574 }
9575
a2fbb9ea
ET
9576 return 0;
9577}
9578
f3c87cdd 9579static const struct {
a2fbb9ea
ET
9580 char string[ETH_GSTRING_LEN];
9581} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9582 { "register_test (offline)" },
9583 { "memory_test (offline)" },
9584 { "loopback_test (offline)" },
9585 { "nvram_test (online)" },
9586 { "interrupt_test (online)" },
9587 { "link_test (online)" },
d3d4f495 9588 { "idle check (online)" }
a2fbb9ea
ET
9589};
9590
9591static int bnx2x_self_test_count(struct net_device *dev)
9592{
9593 return BNX2X_NUM_TESTS;
9594}
9595
f3c87cdd
YG
9596static int bnx2x_test_registers(struct bnx2x *bp)
9597{
9598 int idx, i, rc = -ENODEV;
9599 u32 wr_val = 0;
9dabc424 9600 int port = BP_PORT(bp);
f3c87cdd
YG
9601 static const struct {
9602 u32 offset0;
9603 u32 offset1;
9604 u32 mask;
9605 } reg_tbl[] = {
9606/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9607 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9608 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9609 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9610 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9611 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9612 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9613 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9614 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9615 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9616/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9617 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9618 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9619 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9620 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9621 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9622 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9623 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9624 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9625 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9626/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9627 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9628 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9629 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9630 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9631 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9632 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9633 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9634 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9635 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9636/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9637 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9638 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9639 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9640 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9641 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9642 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9643
9644 { 0xffffffff, 0, 0x00000000 }
9645 };
9646
9647 if (!netif_running(bp->dev))
9648 return rc;
9649
9650 /* Repeat the test twice:
9651 First by writing 0x00000000, second by writing 0xffffffff */
9652 for (idx = 0; idx < 2; idx++) {
9653
9654 switch (idx) {
9655 case 0:
9656 wr_val = 0;
9657 break;
9658 case 1:
9659 wr_val = 0xffffffff;
9660 break;
9661 }
9662
9663 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9664 u32 offset, mask, save_val, val;
f3c87cdd
YG
9665
9666 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9667 mask = reg_tbl[i].mask;
9668
9669 save_val = REG_RD(bp, offset);
9670
9671 REG_WR(bp, offset, wr_val);
9672 val = REG_RD(bp, offset);
9673
9674 /* Restore the original register's value */
9675 REG_WR(bp, offset, save_val);
9676
9677 /* verify that value is as expected value */
9678 if ((val & mask) != (wr_val & mask))
9679 goto test_reg_exit;
9680 }
9681 }
9682
9683 rc = 0;
9684
9685test_reg_exit:
9686 return rc;
9687}
9688
9689static int bnx2x_test_memory(struct bnx2x *bp)
9690{
9691 int i, j, rc = -ENODEV;
9692 u32 val;
9693 static const struct {
9694 u32 offset;
9695 int size;
9696 } mem_tbl[] = {
9697 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9698 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9699 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9700 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9701 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9702 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9703 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9704
9705 { 0xffffffff, 0 }
9706 };
9707 static const struct {
9708 char *name;
9709 u32 offset;
9dabc424
YG
9710 u32 e1_mask;
9711 u32 e1h_mask;
f3c87cdd 9712 } prty_tbl[] = {
9dabc424
YG
9713 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9714 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9715 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9716 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9717 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9718 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9719
9720 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9721 };
9722
9723 if (!netif_running(bp->dev))
9724 return rc;
9725
9726 /* Go through all the memories */
9727 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9728 for (j = 0; j < mem_tbl[i].size; j++)
9729 REG_RD(bp, mem_tbl[i].offset + j*4);
9730
9731 /* Check the parity status */
9732 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9733 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9734 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9735 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9736 DP(NETIF_MSG_HW,
9737 "%s is 0x%x\n", prty_tbl[i].name, val);
9738 goto test_mem_exit;
9739 }
9740 }
9741
9742 rc = 0;
9743
9744test_mem_exit:
9745 return rc;
9746}
9747
f3c87cdd
YG
9748static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9749{
9750 int cnt = 1000;
9751
9752 if (link_up)
9753 while (bnx2x_link_test(bp) && cnt--)
9754 msleep(10);
9755}
9756
9757static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9758{
9759 unsigned int pkt_size, num_pkts, i;
9760 struct sk_buff *skb;
9761 unsigned char *packet;
ca00392c
EG
9762 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9763 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
9764 u16 tx_start_idx, tx_idx;
9765 u16 rx_start_idx, rx_idx;
ca00392c 9766 u16 pkt_prod, bd_prod;
f3c87cdd 9767 struct sw_tx_bd *tx_buf;
ca00392c
EG
9768 struct eth_tx_start_bd *tx_start_bd;
9769 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
9770 dma_addr_t mapping;
9771 union eth_rx_cqe *cqe;
9772 u8 cqe_fp_flags;
9773 struct sw_rx_bd *rx_buf;
9774 u16 len;
9775 int rc = -ENODEV;
9776
b5bf9068
EG
9777 /* check the loopback mode */
9778 switch (loopback_mode) {
9779 case BNX2X_PHY_LOOPBACK:
9780 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9781 return -EINVAL;
9782 break;
9783 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9784 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9785 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9786 break;
9787 default:
f3c87cdd 9788 return -EINVAL;
b5bf9068 9789 }
f3c87cdd 9790
b5bf9068
EG
9791 /* prepare the loopback packet */
9792 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9793 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9794 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9795 if (!skb) {
9796 rc = -ENOMEM;
9797 goto test_loopback_exit;
9798 }
9799 packet = skb_put(skb, pkt_size);
9800 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
9801 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9802 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
9803 for (i = ETH_HLEN; i < pkt_size; i++)
9804 packet[i] = (unsigned char) (i & 0xff);
9805
b5bf9068 9806 /* send the loopback packet */
f3c87cdd 9807 num_pkts = 0;
ca00392c
EG
9808 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9809 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 9810
ca00392c
EG
9811 pkt_prod = fp_tx->tx_pkt_prod++;
9812 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9813 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 9814 tx_buf->skb = skb;
ca00392c 9815 tx_buf->flags = 0;
f3c87cdd 9816
ca00392c
EG
9817 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9818 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
9819 mapping = pci_map_single(bp->pdev, skb->data,
9820 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
9821 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9822 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9823 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9824 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9825 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
9826 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9827 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
9828 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9829
9830 /* turn on parsing and get a BD */
9831 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9832 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
9833
9834 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 9835
58f4c4cf
EG
9836 wmb();
9837
ca00392c
EG
9838 fp_tx->tx_db.data.prod += 2;
9839 barrier();
9840 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
9841
9842 mmiowb();
9843
9844 num_pkts++;
ca00392c 9845 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
9846 bp->dev->trans_start = jiffies;
9847
9848 udelay(100);
9849
ca00392c 9850 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
9851 if (tx_idx != tx_start_idx + num_pkts)
9852 goto test_loopback_exit;
9853
ca00392c 9854 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
9855 if (rx_idx != rx_start_idx + num_pkts)
9856 goto test_loopback_exit;
9857
ca00392c 9858 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
9859 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9860 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9861 goto test_loopback_rx_exit;
9862
9863 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9864 if (len != pkt_size)
9865 goto test_loopback_rx_exit;
9866
ca00392c 9867 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
9868 skb = rx_buf->skb;
9869 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9870 for (i = ETH_HLEN; i < pkt_size; i++)
9871 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9872 goto test_loopback_rx_exit;
9873
9874 rc = 0;
9875
9876test_loopback_rx_exit:
f3c87cdd 9877
ca00392c
EG
9878 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
9879 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
9880 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
9881 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
9882
9883 /* Update producers */
ca00392c
EG
9884 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
9885 fp_rx->rx_sge_prod);
f3c87cdd
YG
9886
9887test_loopback_exit:
9888 bp->link_params.loopback_mode = LOOPBACK_NONE;
9889
9890 return rc;
9891}
9892
9893static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9894{
b5bf9068 9895 int rc = 0, res;
f3c87cdd
YG
9896
9897 if (!netif_running(bp->dev))
9898 return BNX2X_LOOPBACK_FAILED;
9899
f8ef6e44 9900 bnx2x_netif_stop(bp, 1);
3910c8ae 9901 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9902
b5bf9068
EG
9903 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9904 if (res) {
9905 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9906 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9907 }
9908
b5bf9068
EG
9909 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9910 if (res) {
9911 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9912 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9913 }
9914
3910c8ae 9915 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9916 bnx2x_netif_start(bp);
9917
9918 return rc;
9919}
9920
9921#define CRC32_RESIDUAL 0xdebb20e3
9922
9923static int bnx2x_test_nvram(struct bnx2x *bp)
9924{
9925 static const struct {
9926 int offset;
9927 int size;
9928 } nvram_tbl[] = {
9929 { 0, 0x14 }, /* bootstrap */
9930 { 0x14, 0xec }, /* dir */
9931 { 0x100, 0x350 }, /* manuf_info */
9932 { 0x450, 0xf0 }, /* feature_info */
9933 { 0x640, 0x64 }, /* upgrade_key_info */
9934 { 0x6a4, 0x64 },
9935 { 0x708, 0x70 }, /* manuf_key_info */
9936 { 0x778, 0x70 },
9937 { 0, 0 }
9938 };
4781bfad 9939 __be32 buf[0x350 / 4];
f3c87cdd
YG
9940 u8 *data = (u8 *)buf;
9941 int i, rc;
9942 u32 magic, csum;
9943
9944 rc = bnx2x_nvram_read(bp, 0, data, 4);
9945 if (rc) {
f5372251 9946 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9947 goto test_nvram_exit;
9948 }
9949
9950 magic = be32_to_cpu(buf[0]);
9951 if (magic != 0x669955aa) {
9952 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9953 rc = -ENODEV;
9954 goto test_nvram_exit;
9955 }
9956
9957 for (i = 0; nvram_tbl[i].size; i++) {
9958
9959 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9960 nvram_tbl[i].size);
9961 if (rc) {
9962 DP(NETIF_MSG_PROBE,
f5372251 9963 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9964 goto test_nvram_exit;
9965 }
9966
9967 csum = ether_crc_le(nvram_tbl[i].size, data);
9968 if (csum != CRC32_RESIDUAL) {
9969 DP(NETIF_MSG_PROBE,
9970 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9971 rc = -ENODEV;
9972 goto test_nvram_exit;
9973 }
9974 }
9975
9976test_nvram_exit:
9977 return rc;
9978}
9979
9980static int bnx2x_test_intr(struct bnx2x *bp)
9981{
9982 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9983 int i, rc;
9984
9985 if (!netif_running(bp->dev))
9986 return -ENODEV;
9987
8d9c5f34 9988 config->hdr.length = 0;
af246401
EG
9989 if (CHIP_IS_E1(bp))
9990 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9991 else
9992 config->hdr.offset = BP_FUNC(bp);
0626b899 9993 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9994 config->hdr.reserved1 = 0;
9995
9996 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9997 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9998 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9999 if (rc == 0) {
10000 bp->set_mac_pending++;
10001 for (i = 0; i < 10; i++) {
10002 if (!bp->set_mac_pending)
10003 break;
10004 msleep_interruptible(10);
10005 }
10006 if (i == 10)
10007 rc = -ENODEV;
10008 }
10009
10010 return rc;
10011}
10012
a2fbb9ea
ET
10013static void bnx2x_self_test(struct net_device *dev,
10014 struct ethtool_test *etest, u64 *buf)
10015{
10016 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10017
10018 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10019
f3c87cdd 10020 if (!netif_running(dev))
a2fbb9ea 10021 return;
a2fbb9ea 10022
33471629 10023 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10024 if (IS_E1HMF(bp))
10025 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10026
10027 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10028 int port = BP_PORT(bp);
10029 u32 val;
f3c87cdd
YG
10030 u8 link_up;
10031
279abdf5
EG
10032 /* save current value of input enable for TX port IF */
10033 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10034 /* disable input for TX port IF */
10035 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10036
f3c87cdd
YG
10037 link_up = bp->link_vars.link_up;
10038 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10039 bnx2x_nic_load(bp, LOAD_DIAG);
10040 /* wait until link state is restored */
10041 bnx2x_wait_for_link(bp, link_up);
10042
10043 if (bnx2x_test_registers(bp) != 0) {
10044 buf[0] = 1;
10045 etest->flags |= ETH_TEST_FL_FAILED;
10046 }
10047 if (bnx2x_test_memory(bp) != 0) {
10048 buf[1] = 1;
10049 etest->flags |= ETH_TEST_FL_FAILED;
10050 }
10051 buf[2] = bnx2x_test_loopback(bp, link_up);
10052 if (buf[2] != 0)
10053 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10054
f3c87cdd 10055 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10056
10057 /* restore input for TX port IF */
10058 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10059
f3c87cdd
YG
10060 bnx2x_nic_load(bp, LOAD_NORMAL);
10061 /* wait until link state is restored */
10062 bnx2x_wait_for_link(bp, link_up);
10063 }
10064 if (bnx2x_test_nvram(bp) != 0) {
10065 buf[3] = 1;
a2fbb9ea
ET
10066 etest->flags |= ETH_TEST_FL_FAILED;
10067 }
f3c87cdd
YG
10068 if (bnx2x_test_intr(bp) != 0) {
10069 buf[4] = 1;
10070 etest->flags |= ETH_TEST_FL_FAILED;
10071 }
10072 if (bp->port.pmf)
10073 if (bnx2x_link_test(bp) != 0) {
10074 buf[5] = 1;
10075 etest->flags |= ETH_TEST_FL_FAILED;
10076 }
f3c87cdd
YG
10077
10078#ifdef BNX2X_EXTRA_DEBUG
10079 bnx2x_panic_dump(bp);
10080#endif
a2fbb9ea
ET
10081}
10082
de832a55
EG
10083static const struct {
10084 long offset;
10085 int size;
10086 u8 string[ETH_GSTRING_LEN];
10087} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10088/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10089 { Q_STATS_OFFSET32(error_bytes_received_hi),
10090 8, "[%d]: rx_error_bytes" },
10091 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10092 8, "[%d]: rx_ucast_packets" },
10093 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10094 8, "[%d]: rx_mcast_packets" },
10095 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10096 8, "[%d]: rx_bcast_packets" },
10097 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10098 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10099 4, "[%d]: rx_phy_ip_err_discards"},
10100 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10101 4, "[%d]: rx_skb_alloc_discard" },
10102 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10103
10104/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10105 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10106 8, "[%d]: tx_packets" }
10107};
10108
bb2a0f7a
YG
10109static const struct {
10110 long offset;
10111 int size;
10112 u32 flags;
66e855f3
YG
10113#define STATS_FLAGS_PORT 1
10114#define STATS_FLAGS_FUNC 2
de832a55 10115#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10116 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10117} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10118/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10119 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10120 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10121 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10122 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10123 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10124 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10125 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10126 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10127 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10128 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10129 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10130 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10131 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10132 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10133 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10134 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10135 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10136/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10137 8, STATS_FLAGS_PORT, "rx_fragments" },
10138 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10139 8, STATS_FLAGS_PORT, "rx_jabbers" },
10140 { STATS_OFFSET32(no_buff_discard_hi),
10141 8, STATS_FLAGS_BOTH, "rx_discards" },
10142 { STATS_OFFSET32(mac_filter_discard),
10143 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10144 { STATS_OFFSET32(xxoverflow_discard),
10145 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10146 { STATS_OFFSET32(brb_drop_hi),
10147 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10148 { STATS_OFFSET32(brb_truncate_hi),
10149 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10150 { STATS_OFFSET32(pause_frames_received_hi),
10151 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10152 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10153 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10154 { STATS_OFFSET32(nig_timer_max),
10155 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10156/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10157 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10158 { STATS_OFFSET32(rx_skb_alloc_failed),
10159 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10160 { STATS_OFFSET32(hw_csum_err),
10161 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10162
10163 { STATS_OFFSET32(total_bytes_transmitted_hi),
10164 8, STATS_FLAGS_BOTH, "tx_bytes" },
10165 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10166 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10167 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10168 8, STATS_FLAGS_BOTH, "tx_packets" },
10169 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10170 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10171 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10172 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10173 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10174 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10175 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10176 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10177/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10178 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10179 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10180 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10181 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10182 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10183 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10184 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10185 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10186 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10187 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10188 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10189 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10190 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10191 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10192 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10193 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10194 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10195 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10196 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10197/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10198 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10199 { STATS_OFFSET32(pause_frames_sent_hi),
10200 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10201};
10202
de832a55
EG
10203#define IS_PORT_STAT(i) \
10204 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10205#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10206#define IS_E1HMF_MODE_STAT(bp) \
10207 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10208
a2fbb9ea
ET
10209static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10210{
bb2a0f7a 10211 struct bnx2x *bp = netdev_priv(dev);
de832a55 10212 int i, j, k;
bb2a0f7a 10213
a2fbb9ea
ET
10214 switch (stringset) {
10215 case ETH_SS_STATS:
de832a55
EG
10216 if (is_multi(bp)) {
10217 k = 0;
ca00392c 10218 for_each_rx_queue(bp, i) {
de832a55
EG
10219 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10220 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10221 bnx2x_q_stats_arr[j].string, i);
10222 k += BNX2X_NUM_Q_STATS;
10223 }
10224 if (IS_E1HMF_MODE_STAT(bp))
10225 break;
10226 for (j = 0; j < BNX2X_NUM_STATS; j++)
10227 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10228 bnx2x_stats_arr[j].string);
10229 } else {
10230 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10231 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10232 continue;
10233 strcpy(buf + j*ETH_GSTRING_LEN,
10234 bnx2x_stats_arr[i].string);
10235 j++;
10236 }
bb2a0f7a 10237 }
a2fbb9ea
ET
10238 break;
10239
10240 case ETH_SS_TEST:
10241 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10242 break;
10243 }
10244}
10245
10246static int bnx2x_get_stats_count(struct net_device *dev)
10247{
bb2a0f7a 10248 struct bnx2x *bp = netdev_priv(dev);
de832a55 10249 int i, num_stats;
bb2a0f7a 10250
de832a55 10251 if (is_multi(bp)) {
ca00392c 10252 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
de832a55
EG
10253 if (!IS_E1HMF_MODE_STAT(bp))
10254 num_stats += BNX2X_NUM_STATS;
10255 } else {
10256 if (IS_E1HMF_MODE_STAT(bp)) {
10257 num_stats = 0;
10258 for (i = 0; i < BNX2X_NUM_STATS; i++)
10259 if (IS_FUNC_STAT(i))
10260 num_stats++;
10261 } else
10262 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10263 }
de832a55 10264
bb2a0f7a 10265 return num_stats;
a2fbb9ea
ET
10266}
10267
10268static void bnx2x_get_ethtool_stats(struct net_device *dev,
10269 struct ethtool_stats *stats, u64 *buf)
10270{
10271 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10272 u32 *hw_stats, *offset;
10273 int i, j, k;
bb2a0f7a 10274
de832a55
EG
10275 if (is_multi(bp)) {
10276 k = 0;
ca00392c 10277 for_each_rx_queue(bp, i) {
de832a55
EG
10278 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10279 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10280 if (bnx2x_q_stats_arr[j].size == 0) {
10281 /* skip this counter */
10282 buf[k + j] = 0;
10283 continue;
10284 }
10285 offset = (hw_stats +
10286 bnx2x_q_stats_arr[j].offset);
10287 if (bnx2x_q_stats_arr[j].size == 4) {
10288 /* 4-byte counter */
10289 buf[k + j] = (u64) *offset;
10290 continue;
10291 }
10292 /* 8-byte counter */
10293 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10294 }
10295 k += BNX2X_NUM_Q_STATS;
10296 }
10297 if (IS_E1HMF_MODE_STAT(bp))
10298 return;
10299 hw_stats = (u32 *)&bp->eth_stats;
10300 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10301 if (bnx2x_stats_arr[j].size == 0) {
10302 /* skip this counter */
10303 buf[k + j] = 0;
10304 continue;
10305 }
10306 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10307 if (bnx2x_stats_arr[j].size == 4) {
10308 /* 4-byte counter */
10309 buf[k + j] = (u64) *offset;
10310 continue;
10311 }
10312 /* 8-byte counter */
10313 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10314 }
de832a55
EG
10315 } else {
10316 hw_stats = (u32 *)&bp->eth_stats;
10317 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10318 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10319 continue;
10320 if (bnx2x_stats_arr[i].size == 0) {
10321 /* skip this counter */
10322 buf[j] = 0;
10323 j++;
10324 continue;
10325 }
10326 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10327 if (bnx2x_stats_arr[i].size == 4) {
10328 /* 4-byte counter */
10329 buf[j] = (u64) *offset;
10330 j++;
10331 continue;
10332 }
10333 /* 8-byte counter */
10334 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10335 j++;
a2fbb9ea 10336 }
a2fbb9ea
ET
10337 }
10338}
10339
10340static int bnx2x_phys_id(struct net_device *dev, u32 data)
10341{
10342 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10343 int port = BP_PORT(bp);
a2fbb9ea
ET
10344 int i;
10345
34f80b04
EG
10346 if (!netif_running(dev))
10347 return 0;
10348
10349 if (!bp->port.pmf)
10350 return 0;
10351
a2fbb9ea
ET
10352 if (data == 0)
10353 data = 2;
10354
10355 for (i = 0; i < (data * 2); i++) {
c18487ee 10356 if ((i % 2) == 0)
34f80b04 10357 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10358 bp->link_params.hw_led_mode,
10359 bp->link_params.chip_id);
10360 else
34f80b04 10361 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10362 bp->link_params.hw_led_mode,
10363 bp->link_params.chip_id);
10364
a2fbb9ea
ET
10365 msleep_interruptible(500);
10366 if (signal_pending(current))
10367 break;
10368 }
10369
c18487ee 10370 if (bp->link_vars.link_up)
34f80b04 10371 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10372 bp->link_vars.line_speed,
10373 bp->link_params.hw_led_mode,
10374 bp->link_params.chip_id);
a2fbb9ea
ET
10375
10376 return 0;
10377}
10378
10379static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10380 .get_settings = bnx2x_get_settings,
10381 .set_settings = bnx2x_set_settings,
10382 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10383 .get_regs_len = bnx2x_get_regs_len,
10384 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10385 .get_wol = bnx2x_get_wol,
10386 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10387 .get_msglevel = bnx2x_get_msglevel,
10388 .set_msglevel = bnx2x_set_msglevel,
10389 .nway_reset = bnx2x_nway_reset,
01e53298 10390 .get_link = bnx2x_get_link,
7a9b2557
VZ
10391 .get_eeprom_len = bnx2x_get_eeprom_len,
10392 .get_eeprom = bnx2x_get_eeprom,
10393 .set_eeprom = bnx2x_set_eeprom,
10394 .get_coalesce = bnx2x_get_coalesce,
10395 .set_coalesce = bnx2x_set_coalesce,
10396 .get_ringparam = bnx2x_get_ringparam,
10397 .set_ringparam = bnx2x_set_ringparam,
10398 .get_pauseparam = bnx2x_get_pauseparam,
10399 .set_pauseparam = bnx2x_set_pauseparam,
10400 .get_rx_csum = bnx2x_get_rx_csum,
10401 .set_rx_csum = bnx2x_set_rx_csum,
10402 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10403 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10404 .set_flags = bnx2x_set_flags,
10405 .get_flags = ethtool_op_get_flags,
10406 .get_sg = ethtool_op_get_sg,
10407 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10408 .get_tso = ethtool_op_get_tso,
10409 .set_tso = bnx2x_set_tso,
10410 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10411 .self_test = bnx2x_self_test,
10412 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10413 .phys_id = bnx2x_phys_id,
10414 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10415 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10416};
10417
10418/* end of ethtool_ops */
10419
10420/****************************************************************************
10421* General service functions
10422****************************************************************************/
10423
10424static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10425{
10426 u16 pmcsr;
10427
10428 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10429
10430 switch (state) {
10431 case PCI_D0:
34f80b04 10432 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10433 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10434 PCI_PM_CTRL_PME_STATUS));
10435
10436 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10437 /* delay required during transition out of D3hot */
a2fbb9ea 10438 msleep(20);
34f80b04 10439 break;
a2fbb9ea 10440
34f80b04
EG
10441 case PCI_D3hot:
10442 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10443 pmcsr |= 3;
a2fbb9ea 10444
34f80b04
EG
10445 if (bp->wol)
10446 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10447
34f80b04
EG
10448 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10449 pmcsr);
a2fbb9ea 10450
34f80b04
EG
10451 /* No more memory access after this point until
10452 * device is brought back to D0.
10453 */
10454 break;
10455
10456 default:
10457 return -EINVAL;
10458 }
10459 return 0;
a2fbb9ea
ET
10460}
10461
237907c1
EG
10462static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10463{
10464 u16 rx_cons_sb;
10465
10466 /* Tell compiler that status block fields can change */
10467 barrier();
10468 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10469 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10470 rx_cons_sb++;
10471 return (fp->rx_comp_cons != rx_cons_sb);
10472}
10473
34f80b04
EG
10474/*
10475 * net_device service functions
10476 */
10477
a2fbb9ea
ET
10478static int bnx2x_poll(struct napi_struct *napi, int budget)
10479{
10480 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10481 napi);
10482 struct bnx2x *bp = fp->bp;
10483 int work_done = 0;
10484
10485#ifdef BNX2X_STOP_ON_ERROR
10486 if (unlikely(bp->panic))
34f80b04 10487 goto poll_panic;
a2fbb9ea
ET
10488#endif
10489
a2fbb9ea
ET
10490 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10491 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10492
10493 bnx2x_update_fpsb_idx(fp);
10494
8534f32c 10495 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10496 work_done = bnx2x_rx_int(fp, budget);
356e2385 10497
8534f32c
EG
10498 /* must not complete if we consumed full budget */
10499 if (work_done >= budget)
10500 goto poll_again;
10501 }
a2fbb9ea 10502
ca00392c 10503 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10504 * ensure that status block indices have been actually read
ca00392c 10505 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10506 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10507 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10508 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10509 * may be postponed to right before bnx2x_ack_sb). In this case
10510 * there will never be another interrupt until there is another update
10511 * of the status block, while there is still unhandled work.
10512 */
10513 rmb();
a2fbb9ea 10514
ca00392c 10515 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10516#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10517poll_panic:
a2fbb9ea 10518#endif
288379f0 10519 napi_complete(napi);
a2fbb9ea 10520
0626b899 10521 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10522 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10523 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10524 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10525 }
356e2385 10526
8534f32c 10527poll_again:
a2fbb9ea
ET
10528 return work_done;
10529}
10530
755735eb
EG
10531
10532/* we split the first BD into headers and data BDs
33471629 10533 * to ease the pain of our fellow microcode engineers
755735eb
EG
10534 * we use one mapping for both BDs
10535 * So far this has only been observed to happen
10536 * in Other Operating Systems(TM)
10537 */
10538static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10539 struct bnx2x_fastpath *fp,
ca00392c
EG
10540 struct sw_tx_bd *tx_buf,
10541 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10542 u16 bd_prod, int nbd)
10543{
ca00392c 10544 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10545 struct eth_tx_bd *d_tx_bd;
10546 dma_addr_t mapping;
10547 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10548
10549 /* first fix first BD */
10550 h_tx_bd->nbd = cpu_to_le16(nbd);
10551 h_tx_bd->nbytes = cpu_to_le16(hlen);
10552
10553 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10554 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10555 h_tx_bd->addr_lo, h_tx_bd->nbd);
10556
10557 /* now get a new data BD
10558 * (after the pbd) and fill it */
10559 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10560 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10561
10562 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10563 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10564
10565 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10566 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10567 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10568
10569 /* this marks the BD as one that has no individual mapping */
10570 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10571
755735eb
EG
10572 DP(NETIF_MSG_TX_QUEUED,
10573 "TSO split data size is %d (%x:%x)\n",
10574 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10575
ca00392c
EG
10576 /* update tx_bd */
10577 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10578
10579 return bd_prod;
10580}
10581
10582static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10583{
10584 if (fix > 0)
10585 csum = (u16) ~csum_fold(csum_sub(csum,
10586 csum_partial(t_header - fix, fix, 0)));
10587
10588 else if (fix < 0)
10589 csum = (u16) ~csum_fold(csum_add(csum,
10590 csum_partial(t_header, -fix, 0)));
10591
10592 return swab16(csum);
10593}
10594
10595static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10596{
10597 u32 rc;
10598
10599 if (skb->ip_summed != CHECKSUM_PARTIAL)
10600 rc = XMIT_PLAIN;
10601
10602 else {
4781bfad 10603 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10604 rc = XMIT_CSUM_V6;
10605 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10606 rc |= XMIT_CSUM_TCP;
10607
10608 } else {
10609 rc = XMIT_CSUM_V4;
10610 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10611 rc |= XMIT_CSUM_TCP;
10612 }
10613 }
10614
10615 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10616 rc |= XMIT_GSO_V4;
10617
10618 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10619 rc |= XMIT_GSO_V6;
10620
10621 return rc;
10622}
10623
632da4d6 10624#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10625/* check if packet requires linearization (packet is too fragmented)
10626 no need to check fragmentation if page size > 8K (there will be no
10627 violation to FW restrictions) */
755735eb
EG
10628static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10629 u32 xmit_type)
10630{
10631 int to_copy = 0;
10632 int hlen = 0;
10633 int first_bd_sz = 0;
10634
10635 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10636 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10637
10638 if (xmit_type & XMIT_GSO) {
10639 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10640 /* Check if LSO packet needs to be copied:
10641 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10642 int wnd_size = MAX_FETCH_BD - 3;
33471629 10643 /* Number of windows to check */
755735eb
EG
10644 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10645 int wnd_idx = 0;
10646 int frag_idx = 0;
10647 u32 wnd_sum = 0;
10648
10649 /* Headers length */
10650 hlen = (int)(skb_transport_header(skb) - skb->data) +
10651 tcp_hdrlen(skb);
10652
10653 /* Amount of data (w/o headers) on linear part of SKB*/
10654 first_bd_sz = skb_headlen(skb) - hlen;
10655
10656 wnd_sum = first_bd_sz;
10657
10658 /* Calculate the first sum - it's special */
10659 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10660 wnd_sum +=
10661 skb_shinfo(skb)->frags[frag_idx].size;
10662
10663 /* If there was data on linear skb data - check it */
10664 if (first_bd_sz > 0) {
10665 if (unlikely(wnd_sum < lso_mss)) {
10666 to_copy = 1;
10667 goto exit_lbl;
10668 }
10669
10670 wnd_sum -= first_bd_sz;
10671 }
10672
10673 /* Others are easier: run through the frag list and
10674 check all windows */
10675 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10676 wnd_sum +=
10677 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10678
10679 if (unlikely(wnd_sum < lso_mss)) {
10680 to_copy = 1;
10681 break;
10682 }
10683 wnd_sum -=
10684 skb_shinfo(skb)->frags[wnd_idx].size;
10685 }
755735eb
EG
10686 } else {
10687 /* in non-LSO too fragmented packet should always
10688 be linearized */
10689 to_copy = 1;
10690 }
10691 }
10692
10693exit_lbl:
10694 if (unlikely(to_copy))
10695 DP(NETIF_MSG_TX_QUEUED,
10696 "Linearization IS REQUIRED for %s packet. "
10697 "num_frags %d hlen %d first_bd_sz %d\n",
10698 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10699 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10700
10701 return to_copy;
10702}
632da4d6 10703#endif
755735eb
EG
10704
10705/* called with netif_tx_lock
a2fbb9ea 10706 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10707 * netif_wake_queue()
a2fbb9ea
ET
10708 */
10709static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10710{
10711 struct bnx2x *bp = netdev_priv(dev);
ca00392c 10712 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 10713 struct netdev_queue *txq;
a2fbb9ea 10714 struct sw_tx_bd *tx_buf;
ca00392c
EG
10715 struct eth_tx_start_bd *tx_start_bd;
10716 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
10717 struct eth_tx_parse_bd *pbd = NULL;
10718 u16 pkt_prod, bd_prod;
755735eb 10719 int nbd, fp_index;
a2fbb9ea 10720 dma_addr_t mapping;
755735eb 10721 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
10722 int i;
10723 u8 hlen = 0;
ca00392c 10724 __le16 pkt_size = 0;
a2fbb9ea
ET
10725
10726#ifdef BNX2X_STOP_ON_ERROR
10727 if (unlikely(bp->panic))
10728 return NETDEV_TX_BUSY;
10729#endif
10730
555f6c78
EG
10731 fp_index = skb_get_queue_mapping(skb);
10732 txq = netdev_get_tx_queue(dev, fp_index);
10733
ca00392c
EG
10734 fp = &bp->fp[fp_index + bp->num_rx_queues];
10735 fp_stat = &bp->fp[fp_index];
755735eb 10736
231fd58a 10737 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 10738 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 10739 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10740 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10741 return NETDEV_TX_BUSY;
10742 }
10743
755735eb
EG
10744 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10745 " gso type %x xmit_type %x\n",
10746 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10747 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10748
632da4d6 10749#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10750 /* First, check if we need to linearize the skb (due to FW
10751 restrictions). No need to check fragmentation if page size > 8K
10752 (there will be no violation to FW restrictions) */
755735eb
EG
10753 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10754 /* Statistics of linearization */
10755 bp->lin_cnt++;
10756 if (skb_linearize(skb) != 0) {
10757 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10758 "silently dropping this SKB\n");
10759 dev_kfree_skb_any(skb);
da5a662a 10760 return NETDEV_TX_OK;
755735eb
EG
10761 }
10762 }
632da4d6 10763#endif
755735eb 10764
a2fbb9ea 10765 /*
755735eb 10766 Please read carefully. First we use one BD which we mark as start,
ca00392c 10767 then we have a parsing info BD (used for TSO or xsum),
755735eb 10768 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10769 (don't forget to mark the last one as last,
10770 and to unmap only AFTER you write to the BD ...)
755735eb 10771 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10772 */
10773
10774 pkt_prod = fp->tx_pkt_prod++;
755735eb 10775 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10776
755735eb 10777 /* get a tx_buf and first BD */
a2fbb9ea 10778 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 10779 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 10780
ca00392c
EG
10781 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10782 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10783 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 10784 /* header nbd */
ca00392c 10785 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10786
755735eb
EG
10787 /* remember the first BD of the packet */
10788 tx_buf->first_bd = fp->tx_bd_prod;
10789 tx_buf->skb = skb;
ca00392c 10790 tx_buf->flags = 0;
a2fbb9ea
ET
10791
10792 DP(NETIF_MSG_TX_QUEUED,
10793 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 10794 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 10795
0c6671b0
EG
10796#ifdef BCM_VLAN
10797 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10798 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
10799 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10800 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 10801 } else
0c6671b0 10802#endif
ca00392c 10803 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10804
ca00392c
EG
10805 /* turn on parsing and get a BD */
10806 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10807 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 10808
ca00392c 10809 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
10810
10811 if (xmit_type & XMIT_CSUM) {
ca00392c 10812 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
10813
10814 /* for now NS flag is not used in Linux */
4781bfad
EG
10815 pbd->global_data =
10816 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10817 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10818
755735eb
EG
10819 pbd->ip_hlen = (skb_transport_header(skb) -
10820 skb_network_header(skb)) / 2;
10821
10822 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10823
755735eb 10824 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 10825 hlen = hlen*2;
a2fbb9ea 10826
ca00392c 10827 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
10828
10829 if (xmit_type & XMIT_CSUM_V4)
ca00392c 10830 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
10831 ETH_TX_BD_FLAGS_IP_CSUM;
10832 else
ca00392c
EG
10833 tx_start_bd->bd_flags.as_bitfield |=
10834 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
10835
10836 if (xmit_type & XMIT_CSUM_TCP) {
10837 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10838
10839 } else {
10840 s8 fix = SKB_CS_OFF(skb); /* signed! */
10841
ca00392c 10842 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 10843
755735eb 10844 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
10845 "hlen %d fix %d csum before fix %x\n",
10846 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
10847
10848 /* HW bug: fixup the CSUM */
10849 pbd->tcp_pseudo_csum =
10850 bnx2x_csum_fix(skb_transport_header(skb),
10851 SKB_CS(skb), fix);
10852
10853 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10854 pbd->tcp_pseudo_csum);
10855 }
a2fbb9ea
ET
10856 }
10857
10858 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10859 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 10860
ca00392c
EG
10861 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10862 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10863 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
10864 tx_start_bd->nbd = cpu_to_le16(nbd);
10865 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10866 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
10867
10868 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 10869 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
10870 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
10871 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
10872 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 10873
755735eb 10874 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10875
10876 DP(NETIF_MSG_TX_QUEUED,
10877 "TSO packet len %d hlen %d total len %d tso size %d\n",
10878 skb->len, hlen, skb_headlen(skb),
10879 skb_shinfo(skb)->gso_size);
10880
ca00392c 10881 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 10882
755735eb 10883 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
10884 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
10885 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
10886
10887 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10888 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10889 pbd->tcp_flags = pbd_tcp_flags(skb);
10890
10891 if (xmit_type & XMIT_GSO_V4) {
10892 pbd->ip_id = swab16(ip_hdr(skb)->id);
10893 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10894 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10895 ip_hdr(skb)->daddr,
10896 0, IPPROTO_TCP, 0));
755735eb
EG
10897
10898 } else
10899 pbd->tcp_pseudo_csum =
10900 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10901 &ipv6_hdr(skb)->daddr,
10902 0, IPPROTO_TCP, 0));
10903
a2fbb9ea
ET
10904 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10905 }
ca00392c 10906 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 10907
755735eb
EG
10908 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10909 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10910
755735eb 10911 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
10912 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10913 if (total_pkt_bd == NULL)
10914 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 10915
755735eb
EG
10916 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10917 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10918
ca00392c
EG
10919 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10920 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10921 tx_data_bd->nbytes = cpu_to_le16(frag->size);
10922 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 10923
755735eb 10924 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
10925 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
10926 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
10927 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
10928 }
10929
ca00392c 10930 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 10931
a2fbb9ea
ET
10932 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10933
755735eb 10934 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10935 * if the packet contains or ends with it
10936 */
10937 if (TX_BD_POFF(bd_prod) < nbd)
10938 nbd++;
10939
ca00392c
EG
10940 if (total_pkt_bd != NULL)
10941 total_pkt_bd->total_pkt_bytes = pkt_size;
10942
a2fbb9ea
ET
10943 if (pbd)
10944 DP(NETIF_MSG_TX_QUEUED,
10945 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10946 " tcp_flags %x xsum %x seq %u hlen %u\n",
10947 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10948 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10949 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10950
755735eb 10951 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10952
58f4c4cf
EG
10953 /*
10954 * Make sure that the BD data is updated before updating the producer
10955 * since FW might read the BD right after the producer is updated.
10956 * This is only applicable for weak-ordered memory model archs such
10957 * as IA-64. The following barrier is also mandatory since FW will
10958 * assumes packets must have BDs.
10959 */
10960 wmb();
10961
ca00392c
EG
10962 fp->tx_db.data.prod += nbd;
10963 barrier();
10964 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
10965
10966 mmiowb();
10967
755735eb 10968 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10969
10970 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 10971 netif_tx_stop_queue(txq);
58f4c4cf
EG
10972 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10973 if we put Tx into XOFF state. */
10974 smp_mb();
ca00392c 10975 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 10976 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10977 netif_tx_wake_queue(txq);
a2fbb9ea 10978 }
ca00392c 10979 fp_stat->tx_pkt++;
a2fbb9ea
ET
10980
10981 return NETDEV_TX_OK;
10982}
10983
bb2a0f7a 10984/* called with rtnl_lock */
a2fbb9ea
ET
10985static int bnx2x_open(struct net_device *dev)
10986{
10987 struct bnx2x *bp = netdev_priv(dev);
10988
6eccabb3
EG
10989 netif_carrier_off(dev);
10990
a2fbb9ea
ET
10991 bnx2x_set_power_state(bp, PCI_D0);
10992
bb2a0f7a 10993 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10994}
10995
bb2a0f7a 10996/* called with rtnl_lock */
a2fbb9ea
ET
10997static int bnx2x_close(struct net_device *dev)
10998{
a2fbb9ea
ET
10999 struct bnx2x *bp = netdev_priv(dev);
11000
11001 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11002 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11003 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11004 if (!CHIP_REV_IS_SLOW(bp))
11005 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11006
11007 return 0;
11008}
11009
f5372251 11010/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11011static void bnx2x_set_rx_mode(struct net_device *dev)
11012{
11013 struct bnx2x *bp = netdev_priv(dev);
11014 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11015 int port = BP_PORT(bp);
11016
11017 if (bp->state != BNX2X_STATE_OPEN) {
11018 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11019 return;
11020 }
11021
11022 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11023
11024 if (dev->flags & IFF_PROMISC)
11025 rx_mode = BNX2X_RX_MODE_PROMISC;
11026
11027 else if ((dev->flags & IFF_ALLMULTI) ||
11028 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11029 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11030
11031 else { /* some multicasts */
11032 if (CHIP_IS_E1(bp)) {
11033 int i, old, offset;
11034 struct dev_mc_list *mclist;
11035 struct mac_configuration_cmd *config =
11036 bnx2x_sp(bp, mcast_config);
11037
11038 for (i = 0, mclist = dev->mc_list;
11039 mclist && (i < dev->mc_count);
11040 i++, mclist = mclist->next) {
11041
11042 config->config_table[i].
11043 cam_entry.msb_mac_addr =
11044 swab16(*(u16 *)&mclist->dmi_addr[0]);
11045 config->config_table[i].
11046 cam_entry.middle_mac_addr =
11047 swab16(*(u16 *)&mclist->dmi_addr[2]);
11048 config->config_table[i].
11049 cam_entry.lsb_mac_addr =
11050 swab16(*(u16 *)&mclist->dmi_addr[4]);
11051 config->config_table[i].cam_entry.flags =
11052 cpu_to_le16(port);
11053 config->config_table[i].
11054 target_table_entry.flags = 0;
ca00392c
EG
11055 config->config_table[i].target_table_entry.
11056 clients_bit_vector =
11057 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11058 config->config_table[i].
11059 target_table_entry.vlan_id = 0;
11060
11061 DP(NETIF_MSG_IFUP,
11062 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11063 config->config_table[i].
11064 cam_entry.msb_mac_addr,
11065 config->config_table[i].
11066 cam_entry.middle_mac_addr,
11067 config->config_table[i].
11068 cam_entry.lsb_mac_addr);
11069 }
8d9c5f34 11070 old = config->hdr.length;
34f80b04
EG
11071 if (old > i) {
11072 for (; i < old; i++) {
11073 if (CAM_IS_INVALID(config->
11074 config_table[i])) {
af246401 11075 /* already invalidated */
34f80b04
EG
11076 break;
11077 }
11078 /* invalidate */
11079 CAM_INVALIDATE(config->
11080 config_table[i]);
11081 }
11082 }
11083
11084 if (CHIP_REV_IS_SLOW(bp))
11085 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11086 else
11087 offset = BNX2X_MAX_MULTICAST*(1 + port);
11088
8d9c5f34 11089 config->hdr.length = i;
34f80b04 11090 config->hdr.offset = offset;
8d9c5f34 11091 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11092 config->hdr.reserved1 = 0;
11093
11094 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11095 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11096 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11097 0);
11098 } else { /* E1H */
11099 /* Accept one or more multicasts */
11100 struct dev_mc_list *mclist;
11101 u32 mc_filter[MC_HASH_SIZE];
11102 u32 crc, bit, regidx;
11103 int i;
11104
11105 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11106
11107 for (i = 0, mclist = dev->mc_list;
11108 mclist && (i < dev->mc_count);
11109 i++, mclist = mclist->next) {
11110
7c510e4b
JB
11111 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11112 mclist->dmi_addr);
34f80b04
EG
11113
11114 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11115 bit = (crc >> 24) & 0xff;
11116 regidx = bit >> 5;
11117 bit &= 0x1f;
11118 mc_filter[regidx] |= (1 << bit);
11119 }
11120
11121 for (i = 0; i < MC_HASH_SIZE; i++)
11122 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11123 mc_filter[i]);
11124 }
11125 }
11126
11127 bp->rx_mode = rx_mode;
11128 bnx2x_set_storm_rx_mode(bp);
11129}
11130
11131/* called with rtnl_lock */
a2fbb9ea
ET
11132static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11133{
11134 struct sockaddr *addr = p;
11135 struct bnx2x *bp = netdev_priv(dev);
11136
34f80b04 11137 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11138 return -EINVAL;
11139
11140 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11141 if (netif_running(dev)) {
11142 if (CHIP_IS_E1(bp))
3101c2bc 11143 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 11144 else
3101c2bc 11145 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 11146 }
a2fbb9ea
ET
11147
11148 return 0;
11149}
11150
c18487ee 11151/* called with rtnl_lock */
a2fbb9ea
ET
11152static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11153{
11154 struct mii_ioctl_data *data = if_mii(ifr);
11155 struct bnx2x *bp = netdev_priv(dev);
3196a88a 11156 int port = BP_PORT(bp);
a2fbb9ea
ET
11157 int err;
11158
11159 switch (cmd) {
11160 case SIOCGMIIPHY:
34f80b04 11161 data->phy_id = bp->port.phy_addr;
a2fbb9ea 11162
c14423fe 11163 /* fallthrough */
c18487ee 11164
a2fbb9ea 11165 case SIOCGMIIREG: {
c18487ee 11166 u16 mii_regval;
a2fbb9ea 11167
c18487ee
YR
11168 if (!netif_running(dev))
11169 return -EAGAIN;
a2fbb9ea 11170
34f80b04 11171 mutex_lock(&bp->port.phy_mutex);
3196a88a 11172 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
11173 DEFAULT_PHY_DEV_ADDR,
11174 (data->reg_num & 0x1f), &mii_regval);
11175 data->val_out = mii_regval;
34f80b04 11176 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
11177 return err;
11178 }
11179
11180 case SIOCSMIIREG:
11181 if (!capable(CAP_NET_ADMIN))
11182 return -EPERM;
11183
c18487ee
YR
11184 if (!netif_running(dev))
11185 return -EAGAIN;
11186
34f80b04 11187 mutex_lock(&bp->port.phy_mutex);
3196a88a 11188 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
11189 DEFAULT_PHY_DEV_ADDR,
11190 (data->reg_num & 0x1f), data->val_in);
34f80b04 11191 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
11192 return err;
11193
11194 default:
11195 /* do nothing */
11196 break;
11197 }
11198
11199 return -EOPNOTSUPP;
11200}
11201
34f80b04 11202/* called with rtnl_lock */
a2fbb9ea
ET
11203static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11204{
11205 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11206 int rc = 0;
a2fbb9ea
ET
11207
11208 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11209 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11210 return -EINVAL;
11211
11212 /* This does not race with packet allocation
c14423fe 11213 * because the actual alloc size is
a2fbb9ea
ET
11214 * only updated as part of load
11215 */
11216 dev->mtu = new_mtu;
11217
11218 if (netif_running(dev)) {
34f80b04
EG
11219 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11220 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11221 }
34f80b04
EG
11222
11223 return rc;
a2fbb9ea
ET
11224}
11225
11226static void bnx2x_tx_timeout(struct net_device *dev)
11227{
11228 struct bnx2x *bp = netdev_priv(dev);
11229
11230#ifdef BNX2X_STOP_ON_ERROR
11231 if (!bp->panic)
11232 bnx2x_panic();
11233#endif
11234 /* This allows the netif to be shutdown gracefully before resetting */
11235 schedule_work(&bp->reset_task);
11236}
11237
11238#ifdef BCM_VLAN
34f80b04 11239/* called with rtnl_lock */
a2fbb9ea
ET
11240static void bnx2x_vlan_rx_register(struct net_device *dev,
11241 struct vlan_group *vlgrp)
11242{
11243 struct bnx2x *bp = netdev_priv(dev);
11244
11245 bp->vlgrp = vlgrp;
0c6671b0
EG
11246
11247 /* Set flags according to the required capabilities */
11248 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11249
11250 if (dev->features & NETIF_F_HW_VLAN_TX)
11251 bp->flags |= HW_VLAN_TX_FLAG;
11252
11253 if (dev->features & NETIF_F_HW_VLAN_RX)
11254 bp->flags |= HW_VLAN_RX_FLAG;
11255
a2fbb9ea 11256 if (netif_running(dev))
49d66772 11257 bnx2x_set_client_config(bp);
a2fbb9ea 11258}
34f80b04 11259
a2fbb9ea
ET
11260#endif
11261
11262#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11263static void poll_bnx2x(struct net_device *dev)
11264{
11265 struct bnx2x *bp = netdev_priv(dev);
11266
11267 disable_irq(bp->pdev->irq);
11268 bnx2x_interrupt(bp->pdev->irq, dev);
11269 enable_irq(bp->pdev->irq);
11270}
11271#endif
11272
c64213cd
SH
11273static const struct net_device_ops bnx2x_netdev_ops = {
11274 .ndo_open = bnx2x_open,
11275 .ndo_stop = bnx2x_close,
11276 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11277 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11278 .ndo_set_mac_address = bnx2x_change_mac_addr,
11279 .ndo_validate_addr = eth_validate_addr,
11280 .ndo_do_ioctl = bnx2x_ioctl,
11281 .ndo_change_mtu = bnx2x_change_mtu,
11282 .ndo_tx_timeout = bnx2x_tx_timeout,
11283#ifdef BCM_VLAN
11284 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11285#endif
11286#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11287 .ndo_poll_controller = poll_bnx2x,
11288#endif
11289};
11290
34f80b04
EG
11291static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11292 struct net_device *dev)
a2fbb9ea
ET
11293{
11294 struct bnx2x *bp;
11295 int rc;
11296
11297 SET_NETDEV_DEV(dev, &pdev->dev);
11298 bp = netdev_priv(dev);
11299
34f80b04
EG
11300 bp->dev = dev;
11301 bp->pdev = pdev;
a2fbb9ea 11302 bp->flags = 0;
34f80b04 11303 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11304
11305 rc = pci_enable_device(pdev);
11306 if (rc) {
11307 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11308 goto err_out;
11309 }
11310
11311 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11312 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11313 " aborting\n");
11314 rc = -ENODEV;
11315 goto err_out_disable;
11316 }
11317
11318 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11319 printk(KERN_ERR PFX "Cannot find second PCI device"
11320 " base address, aborting\n");
11321 rc = -ENODEV;
11322 goto err_out_disable;
11323 }
11324
34f80b04
EG
11325 if (atomic_read(&pdev->enable_cnt) == 1) {
11326 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11327 if (rc) {
11328 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11329 " aborting\n");
11330 goto err_out_disable;
11331 }
a2fbb9ea 11332
34f80b04
EG
11333 pci_set_master(pdev);
11334 pci_save_state(pdev);
11335 }
a2fbb9ea
ET
11336
11337 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11338 if (bp->pm_cap == 0) {
11339 printk(KERN_ERR PFX "Cannot find power management"
11340 " capability, aborting\n");
11341 rc = -EIO;
11342 goto err_out_release;
11343 }
11344
11345 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11346 if (bp->pcie_cap == 0) {
11347 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11348 " aborting\n");
11349 rc = -EIO;
11350 goto err_out_release;
11351 }
11352
6a35528a 11353 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11354 bp->flags |= USING_DAC_FLAG;
6a35528a 11355 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11356 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11357 " failed, aborting\n");
11358 rc = -EIO;
11359 goto err_out_release;
11360 }
11361
284901a9 11362 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11363 printk(KERN_ERR PFX "System does not support DMA,"
11364 " aborting\n");
11365 rc = -EIO;
11366 goto err_out_release;
11367 }
11368
34f80b04
EG
11369 dev->mem_start = pci_resource_start(pdev, 0);
11370 dev->base_addr = dev->mem_start;
11371 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11372
11373 dev->irq = pdev->irq;
11374
275f165f 11375 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11376 if (!bp->regview) {
11377 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11378 rc = -ENOMEM;
11379 goto err_out_release;
11380 }
11381
34f80b04
EG
11382 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11383 min_t(u64, BNX2X_DB_SIZE,
11384 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11385 if (!bp->doorbells) {
11386 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11387 rc = -ENOMEM;
11388 goto err_out_unmap;
11389 }
11390
11391 bnx2x_set_power_state(bp, PCI_D0);
11392
34f80b04
EG
11393 /* clean indirect addresses */
11394 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11395 PCICFG_VENDOR_ID_OFFSET);
11396 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11397 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11398 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11399 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11400
34f80b04 11401 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11402
c64213cd 11403 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11404 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11405 dev->features |= NETIF_F_SG;
11406 dev->features |= NETIF_F_HW_CSUM;
11407 if (bp->flags & USING_DAC_FLAG)
11408 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11409 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11410 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11411#ifdef BCM_VLAN
11412 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11413 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11414
11415 dev->vlan_features |= NETIF_F_SG;
11416 dev->vlan_features |= NETIF_F_HW_CSUM;
11417 if (bp->flags & USING_DAC_FLAG)
11418 dev->vlan_features |= NETIF_F_HIGHDMA;
11419 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11420 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11421#endif
a2fbb9ea
ET
11422
11423 return 0;
11424
11425err_out_unmap:
11426 if (bp->regview) {
11427 iounmap(bp->regview);
11428 bp->regview = NULL;
11429 }
a2fbb9ea
ET
11430 if (bp->doorbells) {
11431 iounmap(bp->doorbells);
11432 bp->doorbells = NULL;
11433 }
11434
11435err_out_release:
34f80b04
EG
11436 if (atomic_read(&pdev->enable_cnt) == 1)
11437 pci_release_regions(pdev);
a2fbb9ea
ET
11438
11439err_out_disable:
11440 pci_disable_device(pdev);
11441 pci_set_drvdata(pdev, NULL);
11442
11443err_out:
11444 return rc;
11445}
11446
25047950
ET
11447static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11448{
11449 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11450
11451 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11452 return val;
11453}
11454
11455/* return value of 1=2.5GHz 2=5GHz */
11456static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11457{
11458 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11459
11460 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11461 return val;
11462}
94a78b79
VZ
11463static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11464{
11465 struct bnx2x_fw_file_hdr *fw_hdr;
11466 struct bnx2x_fw_file_section *sections;
11467 u16 *ops_offsets;
11468 u32 offset, len, num_ops;
11469 int i;
11470 const struct firmware *firmware = bp->firmware;
11471 const u8 * fw_ver;
11472
11473 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11474 return -EINVAL;
11475
11476 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11477 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11478
11479 /* Make sure none of the offsets and sizes make us read beyond
11480 * the end of the firmware data */
11481 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11482 offset = be32_to_cpu(sections[i].offset);
11483 len = be32_to_cpu(sections[i].len);
11484 if (offset + len > firmware->size) {
11485 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11486 return -EINVAL;
11487 }
11488 }
11489
11490 /* Likewise for the init_ops offsets */
11491 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11492 ops_offsets = (u16 *)(firmware->data + offset);
11493 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11494
11495 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11496 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11497 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11498 return -EINVAL;
11499 }
11500 }
11501
11502 /* Check FW version */
11503 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11504 fw_ver = firmware->data + offset;
11505 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11506 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11507 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11508 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11509 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11510 " Should be %d.%d.%d.%d\n",
11511 fw_ver[0], fw_ver[1], fw_ver[2],
11512 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11513 BCM_5710_FW_MINOR_VERSION,
11514 BCM_5710_FW_REVISION_VERSION,
11515 BCM_5710_FW_ENGINEERING_VERSION);
11516 return -EINVAL;
11517 }
11518
11519 return 0;
11520}
11521
11522static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11523{
11524 u32 i;
11525 const __be32 *source = (const __be32*)_source;
11526 u32 *target = (u32*)_target;
11527
11528 for (i = 0; i < n/4; i++)
11529 target[i] = be32_to_cpu(source[i]);
11530}
11531
11532/*
11533 Ops array is stored in the following format:
11534 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11535 */
11536static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11537{
11538 u32 i, j, tmp;
11539 const __be32 *source = (const __be32*)_source;
11540 struct raw_op *target = (struct raw_op*)_target;
11541
11542 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11543 tmp = be32_to_cpu(source[j]);
11544 target[i].op = (tmp >> 24) & 0xff;
11545 target[i].offset = tmp & 0xffffff;
11546 target[i].raw_data = be32_to_cpu(source[j+1]);
11547 }
11548}
11549static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11550{
11551 u32 i;
11552 u16 *target = (u16*)_target;
11553 const __be16 *source = (const __be16*)_source;
11554
11555 for (i = 0; i < n/2; i++)
11556 target[i] = be16_to_cpu(source[i]);
11557}
11558
11559#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11560 do { \
11561 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11562 bp->arr = kmalloc(len, GFP_KERNEL); \
11563 if (!bp->arr) { \
11564 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11565 goto lbl; \
11566 } \
11567 func(bp->firmware->data + \
11568 be32_to_cpu(fw_hdr->arr.offset), \
11569 (u8*)bp->arr, len); \
11570 } while (0)
11571
11572
11573static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11574{
11575 char fw_file_name[40] = {0};
11576 int rc, offset;
11577 struct bnx2x_fw_file_hdr *fw_hdr;
11578
11579 /* Create a FW file name */
11580 if (CHIP_IS_E1(bp))
11581 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11582 else
11583 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11584
11585 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11586 BCM_5710_FW_MAJOR_VERSION,
11587 BCM_5710_FW_MINOR_VERSION,
11588 BCM_5710_FW_REVISION_VERSION,
11589 BCM_5710_FW_ENGINEERING_VERSION);
11590
11591 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11592
11593 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11594 if (rc) {
11595 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11596 goto request_firmware_exit;
11597 }
11598
11599 rc = bnx2x_check_firmware(bp);
11600 if (rc) {
11601 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11602 goto request_firmware_exit;
11603 }
11604
11605 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11606
11607 /* Initialize the pointers to the init arrays */
11608 /* Blob */
11609 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11610
11611 /* Opcodes */
11612 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11613
11614 /* Offsets */
11615 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11616
11617 /* STORMs firmware */
11618 bp->tsem_int_table_data = bp->firmware->data +
11619 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11620 bp->tsem_pram_data = bp->firmware->data +
11621 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11622 bp->usem_int_table_data = bp->firmware->data +
11623 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11624 bp->usem_pram_data = bp->firmware->data +
11625 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11626 bp->xsem_int_table_data = bp->firmware->data +
11627 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11628 bp->xsem_pram_data = bp->firmware->data +
11629 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11630 bp->csem_int_table_data = bp->firmware->data +
11631 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11632 bp->csem_pram_data = bp->firmware->data +
11633 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11634
11635 return 0;
11636init_offsets_alloc_err:
11637 kfree(bp->init_ops);
11638init_ops_alloc_err:
11639 kfree(bp->init_data);
11640request_firmware_exit:
11641 release_firmware(bp->firmware);
11642
11643 return rc;
11644}
11645
11646
25047950 11647
a2fbb9ea
ET
11648static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11649 const struct pci_device_id *ent)
11650{
11651 static int version_printed;
11652 struct net_device *dev = NULL;
11653 struct bnx2x *bp;
25047950 11654 int rc;
a2fbb9ea
ET
11655
11656 if (version_printed++ == 0)
11657 printk(KERN_INFO "%s", version);
11658
11659 /* dev zeroed in init_etherdev */
555f6c78 11660 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11661 if (!dev) {
11662 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11663 return -ENOMEM;
34f80b04 11664 }
a2fbb9ea 11665
a2fbb9ea
ET
11666 bp = netdev_priv(dev);
11667 bp->msglevel = debug;
11668
34f80b04 11669 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11670 if (rc < 0) {
11671 free_netdev(dev);
11672 return rc;
11673 }
11674
a2fbb9ea
ET
11675 pci_set_drvdata(pdev, dev);
11676
34f80b04 11677 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11678 if (rc)
11679 goto init_one_exit;
11680
94a78b79
VZ
11681 /* Set init arrays */
11682 rc = bnx2x_init_firmware(bp, &pdev->dev);
11683 if (rc) {
11684 printk(KERN_ERR PFX "Error loading firmware\n");
11685 goto init_one_exit;
11686 }
11687
693fc0d1 11688 rc = register_netdev(dev);
34f80b04 11689 if (rc) {
693fc0d1 11690 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11691 goto init_one_exit;
11692 }
11693
25047950 11694 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11695 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11696 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11697 bnx2x_get_pcie_width(bp),
11698 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11699 dev->base_addr, bp->pdev->irq);
e174961c 11700 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11701
a2fbb9ea 11702 return 0;
34f80b04
EG
11703
11704init_one_exit:
11705 if (bp->regview)
11706 iounmap(bp->regview);
11707
11708 if (bp->doorbells)
11709 iounmap(bp->doorbells);
11710
11711 free_netdev(dev);
11712
11713 if (atomic_read(&pdev->enable_cnt) == 1)
11714 pci_release_regions(pdev);
11715
11716 pci_disable_device(pdev);
11717 pci_set_drvdata(pdev, NULL);
11718
11719 return rc;
a2fbb9ea
ET
11720}
11721
11722static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11723{
11724 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11725 struct bnx2x *bp;
11726
11727 if (!dev) {
228241eb
ET
11728 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11729 return;
11730 }
228241eb 11731 bp = netdev_priv(dev);
a2fbb9ea 11732
a2fbb9ea
ET
11733 unregister_netdev(dev);
11734
94a78b79
VZ
11735 kfree(bp->init_ops_offsets);
11736 kfree(bp->init_ops);
11737 kfree(bp->init_data);
11738 release_firmware(bp->firmware);
11739
a2fbb9ea
ET
11740 if (bp->regview)
11741 iounmap(bp->regview);
11742
11743 if (bp->doorbells)
11744 iounmap(bp->doorbells);
11745
11746 free_netdev(dev);
34f80b04
EG
11747
11748 if (atomic_read(&pdev->enable_cnt) == 1)
11749 pci_release_regions(pdev);
11750
a2fbb9ea
ET
11751 pci_disable_device(pdev);
11752 pci_set_drvdata(pdev, NULL);
11753}
11754
11755static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11756{
11757 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11758 struct bnx2x *bp;
11759
34f80b04
EG
11760 if (!dev) {
11761 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11762 return -ENODEV;
11763 }
11764 bp = netdev_priv(dev);
a2fbb9ea 11765
34f80b04 11766 rtnl_lock();
a2fbb9ea 11767
34f80b04 11768 pci_save_state(pdev);
228241eb 11769
34f80b04
EG
11770 if (!netif_running(dev)) {
11771 rtnl_unlock();
11772 return 0;
11773 }
a2fbb9ea
ET
11774
11775 netif_device_detach(dev);
a2fbb9ea 11776
da5a662a 11777 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11778
a2fbb9ea 11779 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11780
34f80b04
EG
11781 rtnl_unlock();
11782
a2fbb9ea
ET
11783 return 0;
11784}
11785
11786static int bnx2x_resume(struct pci_dev *pdev)
11787{
11788 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11789 struct bnx2x *bp;
a2fbb9ea
ET
11790 int rc;
11791
228241eb
ET
11792 if (!dev) {
11793 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11794 return -ENODEV;
11795 }
228241eb 11796 bp = netdev_priv(dev);
a2fbb9ea 11797
34f80b04
EG
11798 rtnl_lock();
11799
228241eb 11800 pci_restore_state(pdev);
34f80b04
EG
11801
11802 if (!netif_running(dev)) {
11803 rtnl_unlock();
11804 return 0;
11805 }
11806
a2fbb9ea
ET
11807 bnx2x_set_power_state(bp, PCI_D0);
11808 netif_device_attach(dev);
11809
da5a662a 11810 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11811
34f80b04
EG
11812 rtnl_unlock();
11813
11814 return rc;
a2fbb9ea
ET
11815}
11816
f8ef6e44
YG
11817static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11818{
11819 int i;
11820
11821 bp->state = BNX2X_STATE_ERROR;
11822
11823 bp->rx_mode = BNX2X_RX_MODE_NONE;
11824
11825 bnx2x_netif_stop(bp, 0);
11826
11827 del_timer_sync(&bp->timer);
11828 bp->stats_state = STATS_STATE_DISABLED;
11829 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11830
11831 /* Release IRQs */
11832 bnx2x_free_irq(bp);
11833
11834 if (CHIP_IS_E1(bp)) {
11835 struct mac_configuration_cmd *config =
11836 bnx2x_sp(bp, mcast_config);
11837
8d9c5f34 11838 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11839 CAM_INVALIDATE(config->config_table[i]);
11840 }
11841
11842 /* Free SKBs, SGEs, TPA pool and driver internals */
11843 bnx2x_free_skbs(bp);
555f6c78 11844 for_each_rx_queue(bp, i)
f8ef6e44 11845 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11846 for_each_rx_queue(bp, i)
7cde1c8b 11847 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11848 bnx2x_free_mem(bp);
11849
11850 bp->state = BNX2X_STATE_CLOSED;
11851
11852 netif_carrier_off(bp->dev);
11853
11854 return 0;
11855}
11856
11857static void bnx2x_eeh_recover(struct bnx2x *bp)
11858{
11859 u32 val;
11860
11861 mutex_init(&bp->port.phy_mutex);
11862
11863 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11864 bp->link_params.shmem_base = bp->common.shmem_base;
11865 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11866
11867 if (!bp->common.shmem_base ||
11868 (bp->common.shmem_base < 0xA0000) ||
11869 (bp->common.shmem_base >= 0xC0000)) {
11870 BNX2X_DEV_INFO("MCP not active\n");
11871 bp->flags |= NO_MCP_FLAG;
11872 return;
11873 }
11874
11875 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11876 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11877 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11878 BNX2X_ERR("BAD MCP validity signature\n");
11879
11880 if (!BP_NOMCP(bp)) {
11881 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11882 & DRV_MSG_SEQ_NUMBER_MASK);
11883 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11884 }
11885}
11886
493adb1f
WX
11887/**
11888 * bnx2x_io_error_detected - called when PCI error is detected
11889 * @pdev: Pointer to PCI device
11890 * @state: The current pci connection state
11891 *
11892 * This function is called after a PCI bus error affecting
11893 * this device has been detected.
11894 */
11895static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11896 pci_channel_state_t state)
11897{
11898 struct net_device *dev = pci_get_drvdata(pdev);
11899 struct bnx2x *bp = netdev_priv(dev);
11900
11901 rtnl_lock();
11902
11903 netif_device_detach(dev);
11904
07ce50e4
DN
11905 if (state == pci_channel_io_perm_failure) {
11906 rtnl_unlock();
11907 return PCI_ERS_RESULT_DISCONNECT;
11908 }
11909
493adb1f 11910 if (netif_running(dev))
f8ef6e44 11911 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11912
11913 pci_disable_device(pdev);
11914
11915 rtnl_unlock();
11916
11917 /* Request a slot reset */
11918 return PCI_ERS_RESULT_NEED_RESET;
11919}
11920
11921/**
11922 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11923 * @pdev: Pointer to PCI device
11924 *
11925 * Restart the card from scratch, as if from a cold-boot.
11926 */
11927static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11928{
11929 struct net_device *dev = pci_get_drvdata(pdev);
11930 struct bnx2x *bp = netdev_priv(dev);
11931
11932 rtnl_lock();
11933
11934 if (pci_enable_device(pdev)) {
11935 dev_err(&pdev->dev,
11936 "Cannot re-enable PCI device after reset\n");
11937 rtnl_unlock();
11938 return PCI_ERS_RESULT_DISCONNECT;
11939 }
11940
11941 pci_set_master(pdev);
11942 pci_restore_state(pdev);
11943
11944 if (netif_running(dev))
11945 bnx2x_set_power_state(bp, PCI_D0);
11946
11947 rtnl_unlock();
11948
11949 return PCI_ERS_RESULT_RECOVERED;
11950}
11951
11952/**
11953 * bnx2x_io_resume - called when traffic can start flowing again
11954 * @pdev: Pointer to PCI device
11955 *
11956 * This callback is called when the error recovery driver tells us that
11957 * its OK to resume normal operation.
11958 */
11959static void bnx2x_io_resume(struct pci_dev *pdev)
11960{
11961 struct net_device *dev = pci_get_drvdata(pdev);
11962 struct bnx2x *bp = netdev_priv(dev);
11963
11964 rtnl_lock();
11965
f8ef6e44
YG
11966 bnx2x_eeh_recover(bp);
11967
493adb1f 11968 if (netif_running(dev))
f8ef6e44 11969 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11970
11971 netif_device_attach(dev);
11972
11973 rtnl_unlock();
11974}
11975
11976static struct pci_error_handlers bnx2x_err_handler = {
11977 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11978 .slot_reset = bnx2x_io_slot_reset,
11979 .resume = bnx2x_io_resume,
493adb1f
WX
11980};
11981
a2fbb9ea 11982static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11983 .name = DRV_MODULE_NAME,
11984 .id_table = bnx2x_pci_tbl,
11985 .probe = bnx2x_init_one,
11986 .remove = __devexit_p(bnx2x_remove_one),
11987 .suspend = bnx2x_suspend,
11988 .resume = bnx2x_resume,
11989 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11990};
11991
11992static int __init bnx2x_init(void)
11993{
dd21ca6d
SG
11994 int ret;
11995
1cf167f2
EG
11996 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11997 if (bnx2x_wq == NULL) {
11998 printk(KERN_ERR PFX "Cannot create workqueue\n");
11999 return -ENOMEM;
12000 }
12001
dd21ca6d
SG
12002 ret = pci_register_driver(&bnx2x_pci_driver);
12003 if (ret) {
12004 printk(KERN_ERR PFX "Cannot register driver\n");
12005 destroy_workqueue(bnx2x_wq);
12006 }
12007 return ret;
a2fbb9ea
ET
12008}
12009
12010static void __exit bnx2x_cleanup(void)
12011{
12012 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12013
12014 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12015}
12016
12017module_init(bnx2x_init);
12018module_exit(bnx2x_cleanup);
12019
94a78b79 12020