]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2: return PCI_ERS_RESULT_DISCONNECT on permanent failure
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
573dd788
EG
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
2059aba7 83MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
555f6c78 84
19680c48 85static int disable_tpa;
19680c48 86module_param(disable_tpa, int, 0);
9898f86d 87MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
88
89static int int_mode;
90module_param(int_mode, int, 0);
91MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
9898f86d 93static int poll;
a2fbb9ea 94module_param(poll, int, 0);
9898f86d 95MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
96
97static int mrrs = -1;
98module_param(mrrs, int, 0);
99MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
9898f86d 101static int debug;
a2fbb9ea 102module_param(debug, int, 0);
9898f86d
EG
103MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 106
1cf167f2 107static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
108
109enum bnx2x_board_type {
110 BCM57710 = 0,
34f80b04
EG
111 BCM57711 = 1,
112 BCM57711E = 2,
a2fbb9ea
ET
113};
114
34f80b04 115/* indexed by board_type, above */
53a10565 116static struct {
a2fbb9ea
ET
117 char *name;
118} board_info[] __devinitdata = {
34f80b04
EG
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
122};
123
34f80b04 124
a2fbb9ea
ET
125static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
132 { 0 }
133};
134
135MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137/****************************************************************************
138* General service functions
139****************************************************************************/
140
141/* used only at init
142 * locking is done by mcp
143 */
144static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145{
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150}
151
a2fbb9ea
ET
152static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153{
154 u32 val;
155
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
160
161 return val;
162}
a2fbb9ea
ET
163
164static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169};
170
171/* copy command into DMAE command memory and set DMAE command go */
172static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173 int idx)
174{
175 u32 cmd_offset;
176 int i;
177
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
ad8d3948
EG
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
184 }
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
186}
187
ad8d3948
EG
188void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189 u32 len32)
a2fbb9ea 190{
ad8d3948 191 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
193 int cnt = 200;
194
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201 return;
202 }
203
204 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
205
206 memset(dmae, 0, sizeof(struct dmae_command));
207
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211#ifdef __BIG_ENDIAN
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
213#else
214 DMAE_CMD_ENDIANITY_DW_SWAP |
215#endif
34f80b04
EG
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
222 dmae->len = len32;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 225 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 226
c3eefaf6 227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
237
238 *wb_comp = 0;
239
34f80b04 240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
241
242 udelay(5);
ad8d3948
EG
243
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
ad8d3948 247 if (!cnt) {
c3eefaf6 248 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
249 break;
250 }
ad8d3948 251 cnt--;
12469401
YG
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
254 msleep(100);
255 else
256 udelay(5);
a2fbb9ea 257 }
ad8d3948
EG
258
259 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
260}
261
c18487ee 262void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 263{
ad8d3948 264 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
266 int cnt = 200;
267
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
270 int i;
271
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276 return;
277 }
278
279 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
280
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
283
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287#ifdef __BIG_ENDIAN
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
289#else
290 DMAE_CMD_ENDIANITY_DW_SWAP |
291#endif
34f80b04
EG
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298 dmae->len = len32;
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 301 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 302
c3eefaf6 303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
310
311 *wb_comp = 0;
312
34f80b04 313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
314
315 udelay(5);
ad8d3948
EG
316
317 while (*wb_comp != DMAE_COMP_VAL) {
318
ad8d3948 319 if (!cnt) {
c3eefaf6 320 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
321 break;
322 }
ad8d3948 323 cnt--;
12469401
YG
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
326 msleep(100);
327 else
328 udelay(5);
a2fbb9ea 329 }
ad8d3948 330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
333
334 mutex_unlock(&bp->dmae_mutex);
335}
336
337/* used only for slowpath so not inlined */
338static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339{
340 u32 wb_write[2];
341
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 345}
a2fbb9ea 346
ad8d3948
EG
347#ifdef USE_WB_RD
348static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349{
350 u32 wb_data[2];
351
352 REG_RD_DMAE(bp, reg, wb_data, 2);
353
354 return HILO_U64(wb_data[0], wb_data[1]);
355}
356#endif
357
a2fbb9ea
ET
358static int bnx2x_mc_assert(struct bnx2x *bp)
359{
a2fbb9ea 360 char last_idx;
34f80b04
EG
361 int i, rc = 0;
362 u32 row0, row1, row2, row3;
363
364 /* XSTORM */
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
367 if (last_idx)
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
386 rc++;
387 } else {
388 break;
389 }
390 }
391
392 /* TSTORM */
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
395 if (last_idx)
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
414 rc++;
415 } else {
416 break;
417 }
418 }
419
420 /* CSTORM */
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
423 if (last_idx)
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
442 rc++;
443 } else {
444 break;
445 }
446 }
447
448 /* USTORM */
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
451 if (last_idx)
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
470 rc++;
471 } else {
472 break;
a2fbb9ea
ET
473 }
474 }
34f80b04 475
a2fbb9ea
ET
476 return rc;
477}
c14423fe 478
a2fbb9ea
ET
479static void bnx2x_fw_dump(struct bnx2x *bp)
480{
481 u32 mark, offset;
4781bfad 482 __be32 data[9];
a2fbb9ea
ET
483 int word;
484
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
488
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499 offset + 4*word));
500 data[8] = 0x0;
49d66772 501 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
502 }
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
504}
505
506static void bnx2x_panic_dump(struct bnx2x *bp)
507{
508 int i;
509 u16 j, start, end;
510
66e855f3
YG
511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
a2fbb9ea
ET
514 BNX2X_ERR("begin crash dump -----------------\n");
515
8440d2b6
EG
516 /* Indices */
517 /* Common */
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524 /* Rx */
525 for_each_rx_queue(bp, i) {
a2fbb9ea 526 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 527
c3eefaf6 528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 531 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
539 }
a2fbb9ea 540
8440d2b6
EG
541 /* Tx */
542 for_each_tx_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 545
c3eefaf6 546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
8440d2b6
EG
551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552 fp->status_blk->c_status_block.status_block_index,
553 hw_prods->packets_prod, hw_prods->bds_prod);
554 }
a2fbb9ea 555
8440d2b6
EG
556 /* Rings */
557 /* Rx */
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
560
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 563 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
c3eefaf6
EG
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
569 }
570
3196a88a
EG
571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
8440d2b6 573 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
c3eefaf6
EG
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
579 }
580
a2fbb9ea
ET
581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
c3eefaf6
EG
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
588 }
589 }
590
8440d2b6
EG
591 /* Tx */
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
c3eefaf6
EG
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
602 }
603
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
611 }
612 }
a2fbb9ea 613
34f80b04 614 bnx2x_fw_dump(bp);
a2fbb9ea
ET
615 bnx2x_mc_assert(bp);
616 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
617}
618
615f8fd9 619static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 620{
34f80b04 621 int port = BP_PORT(bp);
a2fbb9ea
ET
622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
626
627 if (msix) {
8badd27a
EG
628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
632 } else if (msi) {
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
637 } else {
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 642
8badd27a
EG
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
615f8fd9
ET
645
646 REG_WR(bp, addr, val);
647
a2fbb9ea
ET
648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649 }
650
8badd27a
EG
651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
653
654 REG_WR(bp, addr, val);
37dbbf32
EG
655 /*
656 * Ensure that HC_CONFIG is written before leading/trailing edge config
657 */
658 mmiowb();
659 barrier();
34f80b04
EG
660
661 if (CHIP_IS_E1H(bp)) {
662 /* init leading/trailing edge */
663 if (IS_E1HMF(bp)) {
8badd27a 664 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 665 if (bp->port.pmf)
4acac6a5
EG
666 /* enable nig and gpio3 attention */
667 val |= 0x1100;
34f80b04
EG
668 } else
669 val = 0xffff;
670
671 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
672 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
673 }
37dbbf32
EG
674
675 /* Make sure that interrupts are indeed enabled from here on */
676 mmiowb();
a2fbb9ea
ET
677}
678
615f8fd9 679static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 680{
34f80b04 681 int port = BP_PORT(bp);
a2fbb9ea
ET
682 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
683 u32 val = REG_RD(bp, addr);
684
685 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
686 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
687 HC_CONFIG_0_REG_INT_LINE_EN_0 |
688 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
689
690 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
691 val, port, addr);
692
8badd27a
EG
693 /* flush all outstanding writes */
694 mmiowb();
695
a2fbb9ea
ET
696 REG_WR(bp, addr, val);
697 if (REG_RD(bp, addr) != val)
698 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 699
a2fbb9ea
ET
700}
701
f8ef6e44 702static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 703{
a2fbb9ea 704 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 705 int i, offset;
a2fbb9ea 706
34f80b04 707 /* disable interrupt handling */
a2fbb9ea 708 atomic_inc(&bp->intr_sem);
e1510706
EG
709 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
710
f8ef6e44
YG
711 if (disable_hw)
712 /* prevent the HW from sending interrupts */
713 bnx2x_int_disable(bp);
a2fbb9ea
ET
714
715 /* make sure all ISRs are done */
716 if (msix) {
8badd27a
EG
717 synchronize_irq(bp->msix_table[0].vector);
718 offset = 1;
a2fbb9ea 719 for_each_queue(bp, i)
8badd27a 720 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
721 } else
722 synchronize_irq(bp->pdev->irq);
723
724 /* make sure sp_task is not running */
1cf167f2
EG
725 cancel_delayed_work(&bp->sp_task);
726 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
727}
728
34f80b04 729/* fast path */
a2fbb9ea
ET
730
731/*
34f80b04 732 * General service functions
a2fbb9ea
ET
733 */
734
34f80b04 735static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
736 u8 storm, u16 index, u8 op, u8 update)
737{
5c862848
EG
738 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
739 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
740 struct igu_ack_register igu_ack;
741
742 igu_ack.status_block_index = index;
743 igu_ack.sb_id_and_flags =
34f80b04 744 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
745 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
746 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
747 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
748
5c862848
EG
749 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
750 (*(u32 *)&igu_ack), hc_addr);
751 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
752
753 /* Make sure that ACK is written */
754 mmiowb();
755 barrier();
a2fbb9ea
ET
756}
757
758static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
759{
760 struct host_status_block *fpsb = fp->status_blk;
761 u16 rc = 0;
762
763 barrier(); /* status block is written to by the chip */
764 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
765 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
766 rc |= 1;
767 }
768 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
769 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
770 rc |= 2;
771 }
772 return rc;
773}
774
a2fbb9ea
ET
775static u16 bnx2x_ack_int(struct bnx2x *bp)
776{
5c862848
EG
777 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
778 COMMAND_REG_SIMD_MASK);
779 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 780
5c862848
EG
781 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
782 result, hc_addr);
a2fbb9ea 783
a2fbb9ea
ET
784 return result;
785}
786
787
788/*
789 * fast path service functions
790 */
791
237907c1
EG
792static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
793{
794 u16 tx_cons_sb;
795
796 /* Tell compiler that status block fields can change */
797 barrier();
798 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
799 return (fp->tx_pkt_cons != tx_cons_sb);
800}
801
802static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
803{
804 /* Tell compiler that consumer and producer can change */
805 barrier();
806 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
807}
808
a2fbb9ea
ET
809/* free skb in the packet ring at pos idx
810 * return idx of last bd freed
811 */
812static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
813 u16 idx)
814{
815 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
816 struct eth_tx_bd *tx_bd;
817 struct sk_buff *skb = tx_buf->skb;
34f80b04 818 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
819 int nbd;
820
821 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
822 idx, tx_buf, skb);
823
824 /* unmap first bd */
825 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
828 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
829
830 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 831 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
832#ifdef BNX2X_STOP_ON_ERROR
833 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 834 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
835 bnx2x_panic();
836 }
837#endif
838
839 /* Skip a parse bd and the TSO split header bd
840 since they have no mapping */
841 if (nbd)
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
845 ETH_TX_BD_FLAGS_TCP_CSUM |
846 ETH_TX_BD_FLAGS_SW_LSO)) {
847 if (--nbd)
848 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
849 tx_bd = &fp->tx_desc_ring[bd_idx];
850 /* is this a TSO split header bd? */
851 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
852 if (--nbd)
853 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
854 }
855 }
856
857 /* now free frags */
858 while (nbd > 0) {
859
860 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
861 tx_bd = &fp->tx_desc_ring[bd_idx];
862 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
863 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
864 if (--nbd)
865 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866 }
867
868 /* release skb */
53e5e96e 869 WARN_ON(!skb);
a2fbb9ea
ET
870 dev_kfree_skb(skb);
871 tx_buf->first_bd = 0;
872 tx_buf->skb = NULL;
873
34f80b04 874 return new_cons;
a2fbb9ea
ET
875}
876
34f80b04 877static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 878{
34f80b04
EG
879 s16 used;
880 u16 prod;
881 u16 cons;
a2fbb9ea 882
34f80b04 883 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
884 prod = fp->tx_bd_prod;
885 cons = fp->tx_bd_cons;
886
34f80b04
EG
887 /* NUM_TX_RINGS = number of "next-page" entries
888 It will be used as a threshold */
889 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 890
34f80b04 891#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
892 WARN_ON(used < 0);
893 WARN_ON(used > fp->bp->tx_ring_size);
894 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 895#endif
a2fbb9ea 896
34f80b04 897 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
898}
899
7961f791 900static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
901{
902 struct bnx2x *bp = fp->bp;
555f6c78 903 struct netdev_queue *txq;
a2fbb9ea
ET
904 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
905 int done = 0;
906
907#ifdef BNX2X_STOP_ON_ERROR
908 if (unlikely(bp->panic))
909 return;
910#endif
911
555f6c78 912 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
913 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
914 sw_cons = fp->tx_pkt_cons;
915
916 while (sw_cons != hw_cons) {
917 u16 pkt_cons;
918
919 pkt_cons = TX_BD(sw_cons);
920
921 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
922
34f80b04 923 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
924 hw_cons, sw_cons, pkt_cons);
925
34f80b04 926/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
927 rmb();
928 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
929 }
930*/
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
932 sw_cons++;
933 done++;
a2fbb9ea
ET
934 }
935
936 fp->tx_pkt_cons = sw_cons;
937 fp->tx_bd_cons = bd_cons;
938
a2fbb9ea 939 /* TBD need a thresh? */
555f6c78 940 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 941
555f6c78 942 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 943
6044735d
EG
944 /* Need to make the tx_bd_cons update visible to start_xmit()
945 * before checking for netif_tx_queue_stopped(). Without the
946 * memory barrier, there is a small possibility that
947 * start_xmit() will miss it and cause the queue to be stopped
948 * forever.
949 */
950 smp_mb();
951
555f6c78 952 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 953 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 954 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 955 netif_tx_wake_queue(txq);
a2fbb9ea 956
555f6c78 957 __netif_tx_unlock(txq);
a2fbb9ea
ET
958 }
959}
960
3196a88a 961
a2fbb9ea
ET
962static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
963 union eth_rx_cqe *rr_cqe)
964{
965 struct bnx2x *bp = fp->bp;
966 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
967 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
968
34f80b04 969 DP(BNX2X_MSG_SP,
a2fbb9ea 970 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 971 fp->index, cid, command, bp->state,
34f80b04 972 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
973
974 bp->spq_left++;
975
0626b899 976 if (fp->index) {
a2fbb9ea
ET
977 switch (command | fp->state) {
978 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
979 BNX2X_FP_STATE_OPENING):
980 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
981 cid);
982 fp->state = BNX2X_FP_STATE_OPEN;
983 break;
984
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
986 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
987 cid);
988 fp->state = BNX2X_FP_STATE_HALTED;
989 break;
990
991 default:
34f80b04
EG
992 BNX2X_ERR("unexpected MC reply (%d) "
993 "fp->state is %x\n", command, fp->state);
994 break;
a2fbb9ea 995 }
34f80b04 996 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
997 return;
998 }
c14423fe 999
a2fbb9ea
ET
1000 switch (command | bp->state) {
1001 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1002 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1003 bp->state = BNX2X_STATE_OPEN;
1004 break;
1005
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1007 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1008 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1009 fp->state = BNX2X_FP_STATE_HALTED;
1010 break;
1011
a2fbb9ea 1012 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1013 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1014 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1015 break;
1016
3196a88a 1017
a2fbb9ea 1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1019 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1020 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1021 bp->set_mac_pending = 0;
a2fbb9ea
ET
1022 break;
1023
49d66772 1024 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1025 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1026 break;
1027
a2fbb9ea 1028 default:
34f80b04 1029 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1030 command, bp->state);
34f80b04 1031 break;
a2fbb9ea 1032 }
34f80b04 1033 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1034}
1035
7a9b2557
VZ
1036static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1037 struct bnx2x_fastpath *fp, u16 index)
1038{
1039 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1040 struct page *page = sw_buf->page;
1041 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1042
1043 /* Skip "next page" elements */
1044 if (!page)
1045 return;
1046
1047 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1048 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1049 __free_pages(page, PAGES_PER_SGE_SHIFT);
1050
1051 sw_buf->page = NULL;
1052 sge->addr_hi = 0;
1053 sge->addr_lo = 0;
1054}
1055
1056static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1057 struct bnx2x_fastpath *fp, int last)
1058{
1059 int i;
1060
1061 for (i = 0; i < last; i++)
1062 bnx2x_free_rx_sge(bp, fp, i);
1063}
1064
1065static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1066 struct bnx2x_fastpath *fp, u16 index)
1067{
1068 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1071 dma_addr_t mapping;
1072
1073 if (unlikely(page == NULL))
1074 return -ENOMEM;
1075
4f40f2cb 1076 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1077 PCI_DMA_FROMDEVICE);
8d8bb39b 1078 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080 return -ENOMEM;
1081 }
1082
1083 sw_buf->page = page;
1084 pci_unmap_addr_set(sw_buf, mapping, mapping);
1085
1086 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1087 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1088
1089 return 0;
1090}
1091
a2fbb9ea
ET
1092static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1093 struct bnx2x_fastpath *fp, u16 index)
1094{
1095 struct sk_buff *skb;
1096 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1097 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1098 dma_addr_t mapping;
1099
1100 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1101 if (unlikely(skb == NULL))
1102 return -ENOMEM;
1103
437cf2f1 1104 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1105 PCI_DMA_FROMDEVICE);
8d8bb39b 1106 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1107 dev_kfree_skb(skb);
1108 return -ENOMEM;
1109 }
1110
1111 rx_buf->skb = skb;
1112 pci_unmap_addr_set(rx_buf, mapping, mapping);
1113
1114 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1115 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1116
1117 return 0;
1118}
1119
1120/* note that we are not allocating a new skb,
1121 * we are just moving one from cons to prod
1122 * we are not creating a new mapping,
1123 * so there is no need to check for dma_mapping_error().
1124 */
1125static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1126 struct sk_buff *skb, u16 cons, u16 prod)
1127{
1128 struct bnx2x *bp = fp->bp;
1129 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1130 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1131 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1132 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1133
1134 pci_dma_sync_single_for_device(bp->pdev,
1135 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1136 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1137
1138 prod_rx_buf->skb = cons_rx_buf->skb;
1139 pci_unmap_addr_set(prod_rx_buf, mapping,
1140 pci_unmap_addr(cons_rx_buf, mapping));
1141 *prod_bd = *cons_bd;
1142}
1143
7a9b2557
VZ
1144static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1145 u16 idx)
1146{
1147 u16 last_max = fp->last_max_sge;
1148
1149 if (SUB_S16(idx, last_max) > 0)
1150 fp->last_max_sge = idx;
1151}
1152
1153static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1154{
1155 int i, j;
1156
1157 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1158 int idx = RX_SGE_CNT * i - 1;
1159
1160 for (j = 0; j < 2; j++) {
1161 SGE_MASK_CLEAR_BIT(fp, idx);
1162 idx--;
1163 }
1164 }
1165}
1166
1167static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1168 struct eth_fast_path_rx_cqe *fp_cqe)
1169{
1170 struct bnx2x *bp = fp->bp;
4f40f2cb 1171 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1172 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1173 SGE_PAGE_SHIFT;
7a9b2557
VZ
1174 u16 last_max, last_elem, first_elem;
1175 u16 delta = 0;
1176 u16 i;
1177
1178 if (!sge_len)
1179 return;
1180
1181 /* First mark all used pages */
1182 for (i = 0; i < sge_len; i++)
1183 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1184
1185 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1186 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1187
1188 /* Here we assume that the last SGE index is the biggest */
1189 prefetch((void *)(fp->sge_mask));
1190 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1191
1192 last_max = RX_SGE(fp->last_max_sge);
1193 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1194 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1195
1196 /* If ring is not full */
1197 if (last_elem + 1 != first_elem)
1198 last_elem++;
1199
1200 /* Now update the prod */
1201 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1202 if (likely(fp->sge_mask[i]))
1203 break;
1204
1205 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1206 delta += RX_SGE_MASK_ELEM_SZ;
1207 }
1208
1209 if (delta > 0) {
1210 fp->rx_sge_prod += delta;
1211 /* clear page-end entries */
1212 bnx2x_clear_sge_mask_next_elems(fp);
1213 }
1214
1215 DP(NETIF_MSG_RX_STATUS,
1216 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1217 fp->last_max_sge, fp->rx_sge_prod);
1218}
1219
1220static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1221{
1222 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1223 memset(fp->sge_mask, 0xff,
1224 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1225
33471629
EG
1226 /* Clear the two last indices in the page to 1:
1227 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1228 hence will never be indicated and should be removed from
1229 the calculations. */
1230 bnx2x_clear_sge_mask_next_elems(fp);
1231}
1232
1233static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1234 struct sk_buff *skb, u16 cons, u16 prod)
1235{
1236 struct bnx2x *bp = fp->bp;
1237 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1238 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1239 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1240 dma_addr_t mapping;
1241
1242 /* move empty skb from pool to prod and map it */
1243 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1244 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1245 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1246 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1247
1248 /* move partial skb from cons to pool (don't unmap yet) */
1249 fp->tpa_pool[queue] = *cons_rx_buf;
1250
1251 /* mark bin state as start - print error if current state != stop */
1252 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1253 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1254
1255 fp->tpa_state[queue] = BNX2X_TPA_START;
1256
1257 /* point prod_bd to new skb */
1258 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1259 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1260
1261#ifdef BNX2X_STOP_ON_ERROR
1262 fp->tpa_queue_used |= (1 << queue);
1263#ifdef __powerpc64__
1264 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1265#else
1266 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1267#endif
1268 fp->tpa_queue_used);
1269#endif
1270}
1271
1272static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273 struct sk_buff *skb,
1274 struct eth_fast_path_rx_cqe *fp_cqe,
1275 u16 cqe_idx)
1276{
1277 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1278 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1279 u32 i, frag_len, frag_size, pages;
1280 int err;
1281 int j;
1282
1283 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1284 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1285
1286 /* This is needed in order to enable forwarding support */
1287 if (frag_size)
4f40f2cb 1288 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1289 max(frag_size, (u32)len_on_bd));
1290
1291#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1292 if (pages >
1293 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1294 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1295 pages, cqe_idx);
1296 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1297 fp_cqe->pkt_len, len_on_bd);
1298 bnx2x_panic();
1299 return -EINVAL;
1300 }
1301#endif
1302
1303 /* Run through the SGL and compose the fragmented skb */
1304 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1305 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1306
1307 /* FW gives the indices of the SGE as if the ring is an array
1308 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1309 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1310 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1311 old_rx_pg = *rx_pg;
1312
1313 /* If we fail to allocate a substitute page, we simply stop
1314 where we are and drop the whole packet */
1315 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1316 if (unlikely(err)) {
de832a55 1317 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1318 return err;
1319 }
1320
1321 /* Unmap the page as we r going to pass it to the stack */
1322 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1323 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1324
1325 /* Add one frag and update the appropriate fields in the skb */
1326 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1327
1328 skb->data_len += frag_len;
1329 skb->truesize += frag_len;
1330 skb->len += frag_len;
1331
1332 frag_size -= frag_len;
1333 }
1334
1335 return 0;
1336}
1337
1338static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1339 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1340 u16 cqe_idx)
1341{
1342 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1343 struct sk_buff *skb = rx_buf->skb;
1344 /* alloc new skb */
1345 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1346
1347 /* Unmap skb in the pool anyway, as we are going to change
1348 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1349 fails. */
1350 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1351 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1352
7a9b2557 1353 if (likely(new_skb)) {
66e855f3
YG
1354 /* fix ip xsum and give it to the stack */
1355 /* (no need to map the new skb) */
0c6671b0
EG
1356#ifdef BCM_VLAN
1357 int is_vlan_cqe =
1358 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1359 PARSING_FLAGS_VLAN);
1360 int is_not_hwaccel_vlan_cqe =
1361 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1362#endif
7a9b2557
VZ
1363
1364 prefetch(skb);
1365 prefetch(((char *)(skb)) + 128);
1366
7a9b2557
VZ
1367#ifdef BNX2X_STOP_ON_ERROR
1368 if (pad + len > bp->rx_buf_size) {
1369 BNX2X_ERR("skb_put is about to fail... "
1370 "pad %d len %d rx_buf_size %d\n",
1371 pad, len, bp->rx_buf_size);
1372 bnx2x_panic();
1373 return;
1374 }
1375#endif
1376
1377 skb_reserve(skb, pad);
1378 skb_put(skb, len);
1379
1380 skb->protocol = eth_type_trans(skb, bp->dev);
1381 skb->ip_summed = CHECKSUM_UNNECESSARY;
1382
1383 {
1384 struct iphdr *iph;
1385
1386 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1387#ifdef BCM_VLAN
1388 /* If there is no Rx VLAN offloading -
1389 take VLAN tag into an account */
1390 if (unlikely(is_not_hwaccel_vlan_cqe))
1391 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1392#endif
7a9b2557
VZ
1393 iph->check = 0;
1394 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1395 }
1396
1397 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1398 &cqe->fast_path_cqe, cqe_idx)) {
1399#ifdef BCM_VLAN
0c6671b0
EG
1400 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1401 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1402 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1403 le16_to_cpu(cqe->fast_path_cqe.
1404 vlan_tag));
1405 else
1406#endif
1407 netif_receive_skb(skb);
1408 } else {
1409 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1410 " - dropping packet!\n");
1411 dev_kfree_skb(skb);
1412 }
1413
7a9b2557
VZ
1414
1415 /* put new skb in bin */
1416 fp->tpa_pool[queue].skb = new_skb;
1417
1418 } else {
66e855f3 1419 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1420 DP(NETIF_MSG_RX_STATUS,
1421 "Failed to allocate new skb - dropping packet!\n");
de832a55 1422 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1423 }
1424
1425 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1426}
1427
1428static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1429 struct bnx2x_fastpath *fp,
1430 u16 bd_prod, u16 rx_comp_prod,
1431 u16 rx_sge_prod)
1432{
8d9c5f34 1433 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1434 int i;
1435
1436 /* Update producers */
1437 rx_prods.bd_prod = bd_prod;
1438 rx_prods.cqe_prod = rx_comp_prod;
1439 rx_prods.sge_prod = rx_sge_prod;
1440
58f4c4cf
EG
1441 /*
1442 * Make sure that the BD and SGE data is updated before updating the
1443 * producers since FW might read the BD/SGE right after the producer
1444 * is updated.
1445 * This is only applicable for weak-ordered memory model archs such
1446 * as IA-64. The following barrier is also mandatory since FW will
1447 * assumes BDs must have buffers.
1448 */
1449 wmb();
1450
8d9c5f34
EG
1451 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1452 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1453 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1454 ((u32 *)&rx_prods)[i]);
1455
58f4c4cf
EG
1456 mmiowb(); /* keep prod updates ordered */
1457
7a9b2557 1458 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1459 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1460 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1461}
1462
a2fbb9ea
ET
1463static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1464{
1465 struct bnx2x *bp = fp->bp;
34f80b04 1466 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1467 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1468 int rx_pkt = 0;
1469
1470#ifdef BNX2X_STOP_ON_ERROR
1471 if (unlikely(bp->panic))
1472 return 0;
1473#endif
1474
34f80b04
EG
1475 /* CQ "next element" is of the size of the regular element,
1476 that's why it's ok here */
a2fbb9ea
ET
1477 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1478 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1479 hw_comp_cons++;
1480
1481 bd_cons = fp->rx_bd_cons;
1482 bd_prod = fp->rx_bd_prod;
34f80b04 1483 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1484 sw_comp_cons = fp->rx_comp_cons;
1485 sw_comp_prod = fp->rx_comp_prod;
1486
1487 /* Memory barrier necessary as speculative reads of the rx
1488 * buffer can be ahead of the index in the status block
1489 */
1490 rmb();
1491
1492 DP(NETIF_MSG_RX_STATUS,
1493 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1494 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1495
1496 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1497 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1498 struct sk_buff *skb;
1499 union eth_rx_cqe *cqe;
34f80b04
EG
1500 u8 cqe_fp_flags;
1501 u16 len, pad;
a2fbb9ea
ET
1502
1503 comp_ring_cons = RCQ_BD(sw_comp_cons);
1504 bd_prod = RX_BD(bd_prod);
1505 bd_cons = RX_BD(bd_cons);
1506
1507 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1508 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1509
a2fbb9ea 1510 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1511 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1512 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1513 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1514 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1515 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1516
1517 /* is this a slowpath msg? */
34f80b04 1518 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1519 bnx2x_sp_event(fp, cqe);
1520 goto next_cqe;
1521
1522 /* this is an rx packet */
1523 } else {
1524 rx_buf = &fp->rx_buf_ring[bd_cons];
1525 skb = rx_buf->skb;
a2fbb9ea
ET
1526 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1527 pad = cqe->fast_path_cqe.placement_offset;
1528
7a9b2557
VZ
1529 /* If CQE is marked both TPA_START and TPA_END
1530 it is a non-TPA CQE */
1531 if ((!fp->disable_tpa) &&
1532 (TPA_TYPE(cqe_fp_flags) !=
1533 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1534 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1535
1536 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1537 DP(NETIF_MSG_RX_STATUS,
1538 "calling tpa_start on queue %d\n",
1539 queue);
1540
1541 bnx2x_tpa_start(fp, queue, skb,
1542 bd_cons, bd_prod);
1543 goto next_rx;
1544 }
1545
1546 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1547 DP(NETIF_MSG_RX_STATUS,
1548 "calling tpa_stop on queue %d\n",
1549 queue);
1550
1551 if (!BNX2X_RX_SUM_FIX(cqe))
1552 BNX2X_ERR("STOP on none TCP "
1553 "data\n");
1554
1555 /* This is a size of the linear data
1556 on this skb */
1557 len = le16_to_cpu(cqe->fast_path_cqe.
1558 len_on_bd);
1559 bnx2x_tpa_stop(bp, fp, queue, pad,
1560 len, cqe, comp_ring_cons);
1561#ifdef BNX2X_STOP_ON_ERROR
1562 if (bp->panic)
17cb4006 1563 return 0;
7a9b2557
VZ
1564#endif
1565
1566 bnx2x_update_sge_prod(fp,
1567 &cqe->fast_path_cqe);
1568 goto next_cqe;
1569 }
1570 }
1571
a2fbb9ea
ET
1572 pci_dma_sync_single_for_device(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
1574 pad + RX_COPY_THRESH,
1575 PCI_DMA_FROMDEVICE);
1576 prefetch(skb);
1577 prefetch(((char *)(skb)) + 128);
1578
1579 /* is this an error packet? */
34f80b04 1580 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1581 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1582 "ERROR flags %x rx packet %u\n",
1583 cqe_fp_flags, sw_comp_cons);
de832a55 1584 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1585 goto reuse_rx;
1586 }
1587
1588 /* Since we don't have a jumbo ring
1589 * copy small packets if mtu > 1500
1590 */
1591 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1592 (len <= RX_COPY_THRESH)) {
1593 struct sk_buff *new_skb;
1594
1595 new_skb = netdev_alloc_skb(bp->dev,
1596 len + pad);
1597 if (new_skb == NULL) {
1598 DP(NETIF_MSG_RX_ERR,
34f80b04 1599 "ERROR packet dropped "
a2fbb9ea 1600 "because of alloc failure\n");
de832a55 1601 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1602 goto reuse_rx;
1603 }
1604
1605 /* aligned copy */
1606 skb_copy_from_linear_data_offset(skb, pad,
1607 new_skb->data + pad, len);
1608 skb_reserve(new_skb, pad);
1609 skb_put(new_skb, len);
1610
1611 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612
1613 skb = new_skb;
1614
1615 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1616 pci_unmap_single(bp->pdev,
1617 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1618 bp->rx_buf_size,
a2fbb9ea
ET
1619 PCI_DMA_FROMDEVICE);
1620 skb_reserve(skb, pad);
1621 skb_put(skb, len);
1622
1623 } else {
1624 DP(NETIF_MSG_RX_ERR,
34f80b04 1625 "ERROR packet dropped because "
a2fbb9ea 1626 "of alloc failure\n");
de832a55 1627 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1628reuse_rx:
1629 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1630 goto next_rx;
1631 }
1632
1633 skb->protocol = eth_type_trans(skb, bp->dev);
1634
1635 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1636 if (bp->rx_csum) {
1adcd8be
EG
1637 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1638 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1639 else
de832a55 1640 fp->eth_q_stats.hw_csum_err++;
66e855f3 1641 }
a2fbb9ea
ET
1642 }
1643
748e5439 1644 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1645#ifdef BCM_VLAN
0c6671b0 1646 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1647 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1648 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1649 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1650 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1651 else
1652#endif
34f80b04 1653 netif_receive_skb(skb);
a2fbb9ea 1654
a2fbb9ea
ET
1655
1656next_rx:
1657 rx_buf->skb = NULL;
1658
1659 bd_cons = NEXT_RX_IDX(bd_cons);
1660 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1661 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1662 rx_pkt++;
a2fbb9ea
ET
1663next_cqe:
1664 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1665 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1666
34f80b04 1667 if (rx_pkt == budget)
a2fbb9ea
ET
1668 break;
1669 } /* while */
1670
1671 fp->rx_bd_cons = bd_cons;
34f80b04 1672 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1673 fp->rx_comp_cons = sw_comp_cons;
1674 fp->rx_comp_prod = sw_comp_prod;
1675
7a9b2557
VZ
1676 /* Update producers */
1677 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1678 fp->rx_sge_prod);
a2fbb9ea
ET
1679
1680 fp->rx_pkt += rx_pkt;
1681 fp->rx_calls++;
1682
1683 return rx_pkt;
1684}
1685
1686static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1687{
1688 struct bnx2x_fastpath *fp = fp_cookie;
1689 struct bnx2x *bp = fp->bp;
0626b899 1690 int index = fp->index;
a2fbb9ea 1691
da5a662a
VZ
1692 /* Return here if interrupt is disabled */
1693 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1694 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1695 return IRQ_HANDLED;
1696 }
1697
34f80b04 1698 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1699 index, fp->sb_id);
1700 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1701
1702#ifdef BNX2X_STOP_ON_ERROR
1703 if (unlikely(bp->panic))
1704 return IRQ_HANDLED;
1705#endif
1706
1707 prefetch(fp->rx_cons_sb);
1708 prefetch(fp->tx_cons_sb);
1709 prefetch(&fp->status_blk->c_status_block.status_block_index);
1710 prefetch(&fp->status_blk->u_status_block.status_block_index);
1711
288379f0 1712 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1713
a2fbb9ea
ET
1714 return IRQ_HANDLED;
1715}
1716
1717static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1718{
555f6c78 1719 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1720 u16 status = bnx2x_ack_int(bp);
34f80b04 1721 u16 mask;
a2fbb9ea 1722
34f80b04 1723 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1724 if (unlikely(status == 0)) {
1725 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1726 return IRQ_NONE;
1727 }
f5372251 1728 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1729
34f80b04 1730 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1731 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1732 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1733 return IRQ_HANDLED;
1734 }
1735
3196a88a
EG
1736#ifdef BNX2X_STOP_ON_ERROR
1737 if (unlikely(bp->panic))
1738 return IRQ_HANDLED;
1739#endif
1740
34f80b04
EG
1741 mask = 0x2 << bp->fp[0].sb_id;
1742 if (status & mask) {
a2fbb9ea
ET
1743 struct bnx2x_fastpath *fp = &bp->fp[0];
1744
1745 prefetch(fp->rx_cons_sb);
1746 prefetch(fp->tx_cons_sb);
1747 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748 prefetch(&fp->status_blk->u_status_block.status_block_index);
1749
288379f0 1750 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1751
34f80b04 1752 status &= ~mask;
a2fbb9ea
ET
1753 }
1754
a2fbb9ea 1755
34f80b04 1756 if (unlikely(status & 0x1)) {
1cf167f2 1757 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1758
1759 status &= ~0x1;
1760 if (!status)
1761 return IRQ_HANDLED;
1762 }
1763
34f80b04
EG
1764 if (status)
1765 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1766 status);
a2fbb9ea 1767
c18487ee 1768 return IRQ_HANDLED;
a2fbb9ea
ET
1769}
1770
c18487ee 1771/* end of fast path */
a2fbb9ea 1772
bb2a0f7a 1773static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1774
c18487ee
YR
1775/* Link */
1776
1777/*
1778 * General service functions
1779 */
a2fbb9ea 1780
4a37fb66 1781static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1782{
1783 u32 lock_status;
1784 u32 resource_bit = (1 << resource);
4a37fb66
YG
1785 int func = BP_FUNC(bp);
1786 u32 hw_lock_control_reg;
c18487ee 1787 int cnt;
a2fbb9ea 1788
c18487ee
YR
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791 DP(NETIF_MSG_HW,
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794 return -EINVAL;
1795 }
a2fbb9ea 1796
4a37fb66
YG
1797 if (func <= 5) {
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799 } else {
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802 }
1803
c18487ee 1804 /* Validating that the resource is not already taken */
4a37fb66 1805 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1806 if (lock_status & resource_bit) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1809 return -EEXIST;
1810 }
a2fbb9ea 1811
46230476
EG
1812 /* Try for 5 second every 5ms */
1813 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1814 /* Try to acquire the lock */
4a37fb66
YG
1815 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1816 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1817 if (lock_status & resource_bit)
1818 return 0;
a2fbb9ea 1819
c18487ee 1820 msleep(5);
a2fbb9ea 1821 }
c18487ee
YR
1822 DP(NETIF_MSG_HW, "Timeout\n");
1823 return -EAGAIN;
1824}
a2fbb9ea 1825
4a37fb66 1826static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1827{
1828 u32 lock_status;
1829 u32 resource_bit = (1 << resource);
4a37fb66
YG
1830 int func = BP_FUNC(bp);
1831 u32 hw_lock_control_reg;
a2fbb9ea 1832
c18487ee
YR
1833 /* Validating that the resource is within range */
1834 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1835 DP(NETIF_MSG_HW,
1836 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1837 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1838 return -EINVAL;
1839 }
1840
4a37fb66
YG
1841 if (func <= 5) {
1842 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1843 } else {
1844 hw_lock_control_reg =
1845 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1846 }
1847
c18487ee 1848 /* Validating that the resource is currently taken */
4a37fb66 1849 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1850 if (!(lock_status & resource_bit)) {
1851 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1852 lock_status, resource_bit);
1853 return -EFAULT;
a2fbb9ea
ET
1854 }
1855
4a37fb66 1856 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1857 return 0;
1858}
1859
1860/* HW Lock for shared dual port PHYs */
4a37fb66 1861static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1862{
34f80b04 1863 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1864
46c6a674
EG
1865 if (bp->port.need_hw_lock)
1866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1867}
a2fbb9ea 1868
4a37fb66 1869static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1870{
46c6a674
EG
1871 if (bp->port.need_hw_lock)
1872 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1873
34f80b04 1874 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1875}
a2fbb9ea 1876
4acac6a5
EG
1877int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1878{
1879 /* The GPIO should be swapped if swap register is set and active */
1880 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1881 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1882 int gpio_shift = gpio_num +
1883 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1884 u32 gpio_mask = (1 << gpio_shift);
1885 u32 gpio_reg;
1886 int value;
1887
1888 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1889 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1890 return -EINVAL;
1891 }
1892
1893 /* read GPIO value */
1894 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1895
1896 /* get the requested pin value */
1897 if ((gpio_reg & gpio_mask) == gpio_mask)
1898 value = 1;
1899 else
1900 value = 0;
1901
1902 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1903
1904 return value;
1905}
1906
17de50b7 1907int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1908{
1909 /* The GPIO should be swapped if swap register is set and active */
1910 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1911 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1912 int gpio_shift = gpio_num +
1913 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1914 u32 gpio_mask = (1 << gpio_shift);
1915 u32 gpio_reg;
a2fbb9ea 1916
c18487ee
YR
1917 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1918 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1919 return -EINVAL;
1920 }
a2fbb9ea 1921
4a37fb66 1922 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1923 /* read GPIO and mask except the float bits */
1924 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1925
c18487ee
YR
1926 switch (mode) {
1927 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1928 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1929 gpio_num, gpio_shift);
1930 /* clear FLOAT and set CLR */
1931 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1932 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1933 break;
a2fbb9ea 1934
c18487ee
YR
1935 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1936 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1937 gpio_num, gpio_shift);
1938 /* clear FLOAT and set SET */
1939 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1940 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1941 break;
a2fbb9ea 1942
17de50b7 1943 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1944 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1945 gpio_num, gpio_shift);
1946 /* set FLOAT */
1947 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1948 break;
a2fbb9ea 1949
c18487ee
YR
1950 default:
1951 break;
a2fbb9ea
ET
1952 }
1953
c18487ee 1954 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1955 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1956
c18487ee 1957 return 0;
a2fbb9ea
ET
1958}
1959
4acac6a5
EG
1960int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1961{
1962 /* The GPIO should be swapped if swap register is set and active */
1963 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1964 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1965 int gpio_shift = gpio_num +
1966 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1967 u32 gpio_mask = (1 << gpio_shift);
1968 u32 gpio_reg;
1969
1970 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1971 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1972 return -EINVAL;
1973 }
1974
1975 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1976 /* read GPIO int */
1977 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1978
1979 switch (mode) {
1980 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1981 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1982 "output low\n", gpio_num, gpio_shift);
1983 /* clear SET and set CLR */
1984 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1985 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1986 break;
1987
1988 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1989 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1990 "output high\n", gpio_num, gpio_shift);
1991 /* clear CLR and set SET */
1992 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1993 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1994 break;
1995
1996 default:
1997 break;
1998 }
1999
2000 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2001 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2002
2003 return 0;
2004}
2005
c18487ee 2006static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2007{
c18487ee
YR
2008 u32 spio_mask = (1 << spio_num);
2009 u32 spio_reg;
a2fbb9ea 2010
c18487ee
YR
2011 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2012 (spio_num > MISC_REGISTERS_SPIO_7)) {
2013 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2014 return -EINVAL;
a2fbb9ea
ET
2015 }
2016
4a37fb66 2017 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2018 /* read SPIO and mask except the float bits */
2019 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2020
c18487ee 2021 switch (mode) {
6378c025 2022 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2023 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2024 /* clear FLOAT and set CLR */
2025 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2027 break;
a2fbb9ea 2028
6378c025 2029 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2030 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2031 /* clear FLOAT and set SET */
2032 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2033 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2034 break;
a2fbb9ea 2035
c18487ee
YR
2036 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2037 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2038 /* set FLOAT */
2039 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2040 break;
a2fbb9ea 2041
c18487ee
YR
2042 default:
2043 break;
a2fbb9ea
ET
2044 }
2045
c18487ee 2046 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2047 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2048
a2fbb9ea
ET
2049 return 0;
2050}
2051
c18487ee 2052static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2053{
ad33ea3a
EG
2054 switch (bp->link_vars.ieee_fc &
2055 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2056 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2057 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2058 ADVERTISED_Pause);
2059 break;
356e2385 2060
c18487ee 2061 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2062 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2063 ADVERTISED_Pause);
2064 break;
356e2385 2065
c18487ee 2066 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2067 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2068 break;
356e2385 2069
c18487ee 2070 default:
34f80b04 2071 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2072 ADVERTISED_Pause);
2073 break;
2074 }
2075}
f1410647 2076
c18487ee
YR
2077static void bnx2x_link_report(struct bnx2x *bp)
2078{
2079 if (bp->link_vars.link_up) {
2080 if (bp->state == BNX2X_STATE_OPEN)
2081 netif_carrier_on(bp->dev);
2082 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2083
c18487ee 2084 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2085
c18487ee
YR
2086 if (bp->link_vars.duplex == DUPLEX_FULL)
2087 printk("full duplex");
2088 else
2089 printk("half duplex");
f1410647 2090
c0700f90
DM
2091 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2092 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2093 printk(", receive ");
356e2385
EG
2094 if (bp->link_vars.flow_ctrl &
2095 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2096 printk("& transmit ");
2097 } else {
2098 printk(", transmit ");
2099 }
2100 printk("flow control ON");
2101 }
2102 printk("\n");
f1410647 2103
c18487ee
YR
2104 } else { /* link_down */
2105 netif_carrier_off(bp->dev);
2106 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2107 }
c18487ee
YR
2108}
2109
b5bf9068 2110static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2111{
19680c48
EG
2112 if (!BP_NOMCP(bp)) {
2113 u8 rc;
a2fbb9ea 2114
19680c48 2115 /* Initialize link parameters structure variables */
8c99e7b0
YR
2116 /* It is recommended to turn off RX FC for jumbo frames
2117 for better performance */
2118 if (IS_E1HMF(bp))
c0700f90 2119 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2120 else if (bp->dev->mtu > 5000)
c0700f90 2121 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2122 else
c0700f90 2123 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2124
4a37fb66 2125 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2126
2127 if (load_mode == LOAD_DIAG)
2128 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2129
19680c48 2130 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2131
4a37fb66 2132 bnx2x_release_phy_lock(bp);
a2fbb9ea 2133
3c96c68b
EG
2134 bnx2x_calc_fc_adv(bp);
2135
b5bf9068
EG
2136 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2137 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2138 bnx2x_link_report(bp);
b5bf9068 2139 }
34f80b04 2140
19680c48
EG
2141 return rc;
2142 }
f5372251 2143 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2144 return -EINVAL;
a2fbb9ea
ET
2145}
2146
c18487ee 2147static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2148{
19680c48 2149 if (!BP_NOMCP(bp)) {
4a37fb66 2150 bnx2x_acquire_phy_lock(bp);
19680c48 2151 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2152 bnx2x_release_phy_lock(bp);
a2fbb9ea 2153
19680c48
EG
2154 bnx2x_calc_fc_adv(bp);
2155 } else
f5372251 2156 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2157}
a2fbb9ea 2158
c18487ee
YR
2159static void bnx2x__link_reset(struct bnx2x *bp)
2160{
19680c48 2161 if (!BP_NOMCP(bp)) {
4a37fb66 2162 bnx2x_acquire_phy_lock(bp);
589abe3a 2163 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2164 bnx2x_release_phy_lock(bp);
19680c48 2165 } else
f5372251 2166 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2167}
a2fbb9ea 2168
c18487ee
YR
2169static u8 bnx2x_link_test(struct bnx2x *bp)
2170{
2171 u8 rc;
a2fbb9ea 2172
4a37fb66 2173 bnx2x_acquire_phy_lock(bp);
c18487ee 2174 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2175 bnx2x_release_phy_lock(bp);
a2fbb9ea 2176
c18487ee
YR
2177 return rc;
2178}
a2fbb9ea 2179
8a1c38d1 2180static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2181{
8a1c38d1
EG
2182 u32 r_param = bp->link_vars.line_speed / 8;
2183 u32 fair_periodic_timeout_usec;
2184 u32 t_fair;
34f80b04 2185
8a1c38d1
EG
2186 memset(&(bp->cmng.rs_vars), 0,
2187 sizeof(struct rate_shaping_vars_per_port));
2188 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2189
8a1c38d1
EG
2190 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2191 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2192
8a1c38d1
EG
2193 /* this is the threshold below which no timer arming will occur
2194 1.25 coefficient is for the threshold to be a little bigger
2195 than the real time, to compensate for timer in-accuracy */
2196 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2197 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2198
8a1c38d1
EG
2199 /* resolution of fairness timer */
2200 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2201 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2202 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2203
8a1c38d1
EG
2204 /* this is the threshold below which we won't arm the timer anymore */
2205 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2206
8a1c38d1
EG
2207 /* we multiply by 1e3/8 to get bytes/msec.
2208 We don't want the credits to pass a credit
2209 of the t_fair*FAIR_MEM (algorithm resolution) */
2210 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2211 /* since each tick is 4 usec */
2212 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2213}
2214
8a1c38d1 2215static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2216{
2217 struct rate_shaping_vars_per_vn m_rs_vn;
2218 struct fairness_vars_per_vn m_fair_vn;
2219 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2220 u16 vn_min_rate, vn_max_rate;
2221 int i;
2222
2223 /* If function is hidden - set min and max to zeroes */
2224 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2225 vn_min_rate = 0;
2226 vn_max_rate = 0;
2227
2228 } else {
2229 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2230 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2231 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2232 if current min rate is zero - set it to 1.
33471629 2233 This is a requirement of the algorithm. */
8a1c38d1 2234 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2235 vn_min_rate = DEF_MIN_RATE;
2236 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2237 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2238 }
2239
8a1c38d1
EG
2240 DP(NETIF_MSG_IFUP,
2241 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2242 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2243
2244 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2245 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2246
2247 /* global vn counter - maximal Mbps for this vn */
2248 m_rs_vn.vn_counter.rate = vn_max_rate;
2249
2250 /* quota - number of bytes transmitted in this period */
2251 m_rs_vn.vn_counter.quota =
2252 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2253
8a1c38d1 2254 if (bp->vn_weight_sum) {
34f80b04
EG
2255 /* credit for each period of the fairness algorithm:
2256 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2257 vn_weight_sum should not be larger than 10000, thus
2258 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2259 than zero */
34f80b04 2260 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2261 max((u32)(vn_min_rate * (T_FAIR_COEF /
2262 (8 * bp->vn_weight_sum))),
2263 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2264 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2265 m_fair_vn.vn_credit_delta);
2266 }
2267
34f80b04
EG
2268 /* Store it to internal memory */
2269 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2270 REG_WR(bp, BAR_XSTRORM_INTMEM +
2271 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2272 ((u32 *)(&m_rs_vn))[i]);
2273
2274 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2275 REG_WR(bp, BAR_XSTRORM_INTMEM +
2276 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2277 ((u32 *)(&m_fair_vn))[i]);
2278}
2279
8a1c38d1 2280
c18487ee
YR
2281/* This function is called upon link interrupt */
2282static void bnx2x_link_attn(struct bnx2x *bp)
2283{
bb2a0f7a
YG
2284 /* Make sure that we are synced with the current statistics */
2285 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2286
c18487ee 2287 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2288
bb2a0f7a
YG
2289 if (bp->link_vars.link_up) {
2290
1c06328c
EG
2291 /* dropless flow control */
2292 if (CHIP_IS_E1H(bp)) {
2293 int port = BP_PORT(bp);
2294 u32 pause_enabled = 0;
2295
2296 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2297 pause_enabled = 1;
2298
2299 REG_WR(bp, BAR_USTRORM_INTMEM +
2300 USTORM_PAUSE_ENABLED_OFFSET(port),
2301 pause_enabled);
2302 }
2303
bb2a0f7a
YG
2304 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2305 struct host_port_stats *pstats;
2306
2307 pstats = bnx2x_sp(bp, port_stats);
2308 /* reset old bmac stats */
2309 memset(&(pstats->mac_stx[0]), 0,
2310 sizeof(struct mac_stx));
2311 }
2312 if ((bp->state == BNX2X_STATE_OPEN) ||
2313 (bp->state == BNX2X_STATE_DISABLED))
2314 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2315 }
2316
c18487ee
YR
2317 /* indicate link status */
2318 bnx2x_link_report(bp);
34f80b04
EG
2319
2320 if (IS_E1HMF(bp)) {
8a1c38d1 2321 int port = BP_PORT(bp);
34f80b04 2322 int func;
8a1c38d1 2323 int vn;
34f80b04
EG
2324
2325 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2326 if (vn == BP_E1HVN(bp))
2327 continue;
2328
8a1c38d1 2329 func = ((vn << 1) | port);
34f80b04
EG
2330
2331 /* Set the attention towards other drivers
2332 on the same port */
2333 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2334 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2335 }
34f80b04 2336
8a1c38d1
EG
2337 if (bp->link_vars.link_up) {
2338 int i;
2339
2340 /* Init rate shaping and fairness contexts */
2341 bnx2x_init_port_minmax(bp);
34f80b04 2342
34f80b04 2343 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2344 bnx2x_init_vn_minmax(bp, 2*vn + port);
2345
2346 /* Store it to internal memory */
2347 for (i = 0;
2348 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2349 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2351 ((u32 *)(&bp->cmng))[i]);
2352 }
34f80b04 2353 }
c18487ee 2354}
a2fbb9ea 2355
c18487ee
YR
2356static void bnx2x__link_status_update(struct bnx2x *bp)
2357{
2358 if (bp->state != BNX2X_STATE_OPEN)
2359 return;
a2fbb9ea 2360
c18487ee 2361 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2362
bb2a0f7a
YG
2363 if (bp->link_vars.link_up)
2364 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2365 else
2366 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2367
c18487ee
YR
2368 /* indicate link status */
2369 bnx2x_link_report(bp);
a2fbb9ea 2370}
a2fbb9ea 2371
34f80b04
EG
2372static void bnx2x_pmf_update(struct bnx2x *bp)
2373{
2374 int port = BP_PORT(bp);
2375 u32 val;
2376
2377 bp->port.pmf = 1;
2378 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2379
2380 /* enable nig attention */
2381 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2382 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2383 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2384
2385 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2386}
2387
c18487ee 2388/* end of Link */
a2fbb9ea
ET
2389
2390/* slow path */
2391
2392/*
2393 * General service functions
2394 */
2395
2396/* the slow path queue is odd since completions arrive on the fastpath ring */
2397static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2398 u32 data_hi, u32 data_lo, int common)
2399{
34f80b04 2400 int func = BP_FUNC(bp);
a2fbb9ea 2401
34f80b04
EG
2402 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2403 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2404 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2405 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2406 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2407
2408#ifdef BNX2X_STOP_ON_ERROR
2409 if (unlikely(bp->panic))
2410 return -EIO;
2411#endif
2412
34f80b04 2413 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2414
2415 if (!bp->spq_left) {
2416 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2417 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2418 bnx2x_panic();
2419 return -EBUSY;
2420 }
f1410647 2421
a2fbb9ea
ET
2422 /* CID needs port number to be encoded int it */
2423 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2424 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2425 HW_CID(bp, cid)));
2426 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2427 if (common)
2428 bp->spq_prod_bd->hdr.type |=
2429 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2430
2431 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2432 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2433
2434 bp->spq_left--;
2435
2436 if (bp->spq_prod_bd == bp->spq_last_bd) {
2437 bp->spq_prod_bd = bp->spq;
2438 bp->spq_prod_idx = 0;
2439 DP(NETIF_MSG_TIMER, "end of spq\n");
2440
2441 } else {
2442 bp->spq_prod_bd++;
2443 bp->spq_prod_idx++;
2444 }
2445
37dbbf32
EG
2446 /* Make sure that BD data is updated before writing the producer */
2447 wmb();
2448
34f80b04 2449 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2450 bp->spq_prod_idx);
2451
37dbbf32
EG
2452 mmiowb();
2453
34f80b04 2454 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2455 return 0;
2456}
2457
2458/* acquire split MCP access lock register */
4a37fb66 2459static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2460{
a2fbb9ea 2461 u32 i, j, val;
34f80b04 2462 int rc = 0;
a2fbb9ea
ET
2463
2464 might_sleep();
2465 i = 100;
2466 for (j = 0; j < i*10; j++) {
2467 val = (1UL << 31);
2468 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2469 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2470 if (val & (1L << 31))
2471 break;
2472
2473 msleep(5);
2474 }
a2fbb9ea 2475 if (!(val & (1L << 31))) {
19680c48 2476 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2477 rc = -EBUSY;
2478 }
2479
2480 return rc;
2481}
2482
4a37fb66
YG
2483/* release split MCP access lock register */
2484static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2485{
2486 u32 val = 0;
2487
2488 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2489}
2490
2491static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2492{
2493 struct host_def_status_block *def_sb = bp->def_status_blk;
2494 u16 rc = 0;
2495
2496 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2497 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2498 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2499 rc |= 1;
2500 }
2501 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2502 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2503 rc |= 2;
2504 }
2505 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2506 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2507 rc |= 4;
2508 }
2509 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2510 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2511 rc |= 8;
2512 }
2513 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2514 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2515 rc |= 16;
2516 }
2517 return rc;
2518}
2519
2520/*
2521 * slow path service functions
2522 */
2523
2524static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2525{
34f80b04 2526 int port = BP_PORT(bp);
5c862848
EG
2527 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2528 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2529 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2530 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2531 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2532 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2533 u32 aeu_mask;
87942b46 2534 u32 nig_mask = 0;
a2fbb9ea 2535
a2fbb9ea
ET
2536 if (bp->attn_state & asserted)
2537 BNX2X_ERR("IGU ERROR\n");
2538
3fcaf2e5
EG
2539 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540 aeu_mask = REG_RD(bp, aeu_addr);
2541
a2fbb9ea 2542 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2543 aeu_mask, asserted);
2544 aeu_mask &= ~(asserted & 0xff);
2545 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2546
3fcaf2e5
EG
2547 REG_WR(bp, aeu_addr, aeu_mask);
2548 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2549
3fcaf2e5 2550 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2551 bp->attn_state |= asserted;
3fcaf2e5 2552 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2553
2554 if (asserted & ATTN_HARD_WIRED_MASK) {
2555 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2556
a5e9a7cf
EG
2557 bnx2x_acquire_phy_lock(bp);
2558
877e9aa4 2559 /* save nig interrupt mask */
87942b46 2560 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2561 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2562
c18487ee 2563 bnx2x_link_attn(bp);
a2fbb9ea
ET
2564
2565 /* handle unicore attn? */
2566 }
2567 if (asserted & ATTN_SW_TIMER_4_FUNC)
2568 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2569
2570 if (asserted & GPIO_2_FUNC)
2571 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2572
2573 if (asserted & GPIO_3_FUNC)
2574 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2575
2576 if (asserted & GPIO_4_FUNC)
2577 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2578
2579 if (port == 0) {
2580 if (asserted & ATTN_GENERAL_ATTN_1) {
2581 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2582 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2583 }
2584 if (asserted & ATTN_GENERAL_ATTN_2) {
2585 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2586 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2587 }
2588 if (asserted & ATTN_GENERAL_ATTN_3) {
2589 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2590 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2591 }
2592 } else {
2593 if (asserted & ATTN_GENERAL_ATTN_4) {
2594 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2595 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2596 }
2597 if (asserted & ATTN_GENERAL_ATTN_5) {
2598 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2599 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2600 }
2601 if (asserted & ATTN_GENERAL_ATTN_6) {
2602 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2604 }
2605 }
2606
2607 } /* if hardwired */
2608
5c862848
EG
2609 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2610 asserted, hc_addr);
2611 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2612
2613 /* now set back the mask */
a5e9a7cf 2614 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2615 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2616 bnx2x_release_phy_lock(bp);
2617 }
a2fbb9ea
ET
2618}
2619
fd4ef40d
EG
2620static inline void bnx2x_fan_failure(struct bnx2x *bp)
2621{
2622 int port = BP_PORT(bp);
2623
2624 /* mark the failure */
2625 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2626 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2628 bp->link_params.ext_phy_config);
2629
2630 /* log the failure */
2631 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2632 " the driver to shutdown the card to prevent permanent"
2633 " damage. Please contact Dell Support for assistance\n",
2634 bp->dev->name);
2635}
877e9aa4 2636static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2637{
34f80b04 2638 int port = BP_PORT(bp);
877e9aa4 2639 int reg_offset;
4d295db0 2640 u32 val, swap_val, swap_override;
877e9aa4 2641
34f80b04
EG
2642 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2643 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2644
34f80b04 2645 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2646
2647 val = REG_RD(bp, reg_offset);
2648 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2649 REG_WR(bp, reg_offset, val);
2650
2651 BNX2X_ERR("SPIO5 hw attention\n");
2652
fd4ef40d 2653 /* Fan failure attention */
35b19ba5
EG
2654 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2655 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2656 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2657 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2658 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2659 /* The PHY reset is controlled by GPIO 1 */
2660 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2661 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2662 break;
2663
4d295db0
EG
2664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2665 /* The PHY reset is controlled by GPIO 1 */
2666 /* fake the port number to cancel the swap done in
2667 set_gpio() */
2668 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2669 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2670 port = (swap_val && swap_override) ^ 1;
2671 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2672 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2673 break;
2674
877e9aa4
ET
2675 default:
2676 break;
2677 }
fd4ef40d 2678 bnx2x_fan_failure(bp);
877e9aa4 2679 }
34f80b04 2680
589abe3a
EG
2681 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2682 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2683 bnx2x_acquire_phy_lock(bp);
2684 bnx2x_handle_module_detect_int(&bp->link_params);
2685 bnx2x_release_phy_lock(bp);
2686 }
2687
34f80b04
EG
2688 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2689
2690 val = REG_RD(bp, reg_offset);
2691 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2692 REG_WR(bp, reg_offset, val);
2693
2694 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2695 (attn & HW_INTERRUT_ASSERT_SET_0));
2696 bnx2x_panic();
2697 }
877e9aa4
ET
2698}
2699
2700static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2701{
2702 u32 val;
2703
0626b899 2704 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2705
2706 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2707 BNX2X_ERR("DB hw attention 0x%x\n", val);
2708 /* DORQ discard attention */
2709 if (val & 0x2)
2710 BNX2X_ERR("FATAL error from DORQ\n");
2711 }
34f80b04
EG
2712
2713 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2714
2715 int port = BP_PORT(bp);
2716 int reg_offset;
2717
2718 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2719 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2720
2721 val = REG_RD(bp, reg_offset);
2722 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2723 REG_WR(bp, reg_offset, val);
2724
2725 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2726 (attn & HW_INTERRUT_ASSERT_SET_1));
2727 bnx2x_panic();
2728 }
877e9aa4
ET
2729}
2730
2731static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2732{
2733 u32 val;
2734
2735 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2736
2737 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2738 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2739 /* CFC error attention */
2740 if (val & 0x2)
2741 BNX2X_ERR("FATAL error from CFC\n");
2742 }
2743
2744 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2745
2746 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2747 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2748 /* RQ_USDMDP_FIFO_OVERFLOW */
2749 if (val & 0x18000)
2750 BNX2X_ERR("FATAL error from PXP\n");
2751 }
34f80b04
EG
2752
2753 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2754
2755 int port = BP_PORT(bp);
2756 int reg_offset;
2757
2758 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2759 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2760
2761 val = REG_RD(bp, reg_offset);
2762 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2763 REG_WR(bp, reg_offset, val);
2764
2765 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2766 (attn & HW_INTERRUT_ASSERT_SET_2));
2767 bnx2x_panic();
2768 }
877e9aa4
ET
2769}
2770
2771static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2772{
34f80b04
EG
2773 u32 val;
2774
877e9aa4
ET
2775 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2776
34f80b04
EG
2777 if (attn & BNX2X_PMF_LINK_ASSERT) {
2778 int func = BP_FUNC(bp);
2779
2780 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2781 bnx2x__link_status_update(bp);
2782 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2783 DRV_STATUS_PMF)
2784 bnx2x_pmf_update(bp);
2785
2786 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2787
2788 BNX2X_ERR("MC assert!\n");
2789 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2790 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2791 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2792 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2793 bnx2x_panic();
2794
2795 } else if (attn & BNX2X_MCP_ASSERT) {
2796
2797 BNX2X_ERR("MCP assert!\n");
2798 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2799 bnx2x_fw_dump(bp);
877e9aa4
ET
2800
2801 } else
2802 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2803 }
2804
2805 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2806 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2807 if (attn & BNX2X_GRC_TIMEOUT) {
2808 val = CHIP_IS_E1H(bp) ?
2809 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2810 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2811 }
2812 if (attn & BNX2X_GRC_RSV) {
2813 val = CHIP_IS_E1H(bp) ?
2814 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2815 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2816 }
877e9aa4 2817 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2818 }
2819}
2820
2821static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2822{
a2fbb9ea
ET
2823 struct attn_route attn;
2824 struct attn_route group_mask;
34f80b04 2825 int port = BP_PORT(bp);
877e9aa4 2826 int index;
a2fbb9ea
ET
2827 u32 reg_addr;
2828 u32 val;
3fcaf2e5 2829 u32 aeu_mask;
a2fbb9ea
ET
2830
2831 /* need to take HW lock because MCP or other port might also
2832 try to handle this event */
4a37fb66 2833 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2834
2835 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2836 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2837 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2838 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2839 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2840 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2841
2842 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2843 if (deasserted & (1 << index)) {
2844 group_mask = bp->attn_group[index];
2845
34f80b04
EG
2846 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2847 index, group_mask.sig[0], group_mask.sig[1],
2848 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2849
877e9aa4
ET
2850 bnx2x_attn_int_deasserted3(bp,
2851 attn.sig[3] & group_mask.sig[3]);
2852 bnx2x_attn_int_deasserted1(bp,
2853 attn.sig[1] & group_mask.sig[1]);
2854 bnx2x_attn_int_deasserted2(bp,
2855 attn.sig[2] & group_mask.sig[2]);
2856 bnx2x_attn_int_deasserted0(bp,
2857 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2858
a2fbb9ea
ET
2859 if ((attn.sig[0] & group_mask.sig[0] &
2860 HW_PRTY_ASSERT_SET_0) ||
2861 (attn.sig[1] & group_mask.sig[1] &
2862 HW_PRTY_ASSERT_SET_1) ||
2863 (attn.sig[2] & group_mask.sig[2] &
2864 HW_PRTY_ASSERT_SET_2))
6378c025 2865 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2866 }
2867 }
2868
4a37fb66 2869 bnx2x_release_alr(bp);
a2fbb9ea 2870
5c862848 2871 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2872
2873 val = ~deasserted;
3fcaf2e5
EG
2874 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2875 val, reg_addr);
5c862848 2876 REG_WR(bp, reg_addr, val);
a2fbb9ea 2877
a2fbb9ea 2878 if (~bp->attn_state & deasserted)
3fcaf2e5 2879 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2880
2881 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2882 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2883
3fcaf2e5
EG
2884 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2885 aeu_mask = REG_RD(bp, reg_addr);
2886
2887 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2888 aeu_mask, deasserted);
2889 aeu_mask |= (deasserted & 0xff);
2890 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2891
3fcaf2e5
EG
2892 REG_WR(bp, reg_addr, aeu_mask);
2893 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2894
2895 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2896 bp->attn_state &= ~deasserted;
2897 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2898}
2899
2900static void bnx2x_attn_int(struct bnx2x *bp)
2901{
2902 /* read local copy of bits */
68d59484
EG
2903 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2904 attn_bits);
2905 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2906 attn_bits_ack);
a2fbb9ea
ET
2907 u32 attn_state = bp->attn_state;
2908
2909 /* look for changed bits */
2910 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2911 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2912
2913 DP(NETIF_MSG_HW,
2914 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2915 attn_bits, attn_ack, asserted, deasserted);
2916
2917 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2918 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2919
2920 /* handle bits that were raised */
2921 if (asserted)
2922 bnx2x_attn_int_asserted(bp, asserted);
2923
2924 if (deasserted)
2925 bnx2x_attn_int_deasserted(bp, deasserted);
2926}
2927
2928static void bnx2x_sp_task(struct work_struct *work)
2929{
1cf167f2 2930 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2931 u16 status;
2932
34f80b04 2933
a2fbb9ea
ET
2934 /* Return here if interrupt is disabled */
2935 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2936 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2937 return;
2938 }
2939
2940 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2941/* if (status == 0) */
2942/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2943
3196a88a 2944 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2945
877e9aa4
ET
2946 /* HW attentions */
2947 if (status & 0x1)
a2fbb9ea 2948 bnx2x_attn_int(bp);
a2fbb9ea 2949
68d59484 2950 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2951 IGU_INT_NOP, 1);
2952 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2953 IGU_INT_NOP, 1);
2954 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2955 IGU_INT_NOP, 1);
2956 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2957 IGU_INT_NOP, 1);
2958 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2959 IGU_INT_ENABLE, 1);
877e9aa4 2960
a2fbb9ea
ET
2961}
2962
2963static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2964{
2965 struct net_device *dev = dev_instance;
2966 struct bnx2x *bp = netdev_priv(dev);
2967
2968 /* Return here if interrupt is disabled */
2969 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2970 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2971 return IRQ_HANDLED;
2972 }
2973
8d9c5f34 2974 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2975
2976#ifdef BNX2X_STOP_ON_ERROR
2977 if (unlikely(bp->panic))
2978 return IRQ_HANDLED;
2979#endif
2980
1cf167f2 2981 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2982
2983 return IRQ_HANDLED;
2984}
2985
2986/* end of slow path */
2987
2988/* Statistics */
2989
2990/****************************************************************************
2991* Macros
2992****************************************************************************/
2993
a2fbb9ea
ET
2994/* sum[hi:lo] += add[hi:lo] */
2995#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2996 do { \
2997 s_lo += a_lo; \
f5ba6772 2998 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2999 } while (0)
3000
3001/* difference = minuend - subtrahend */
3002#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3003 do { \
bb2a0f7a
YG
3004 if (m_lo < s_lo) { \
3005 /* underflow */ \
a2fbb9ea 3006 d_hi = m_hi - s_hi; \
bb2a0f7a 3007 if (d_hi > 0) { \
6378c025 3008 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3009 d_hi--; \
3010 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3011 } else { \
6378c025 3012 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3013 d_hi = 0; \
3014 d_lo = 0; \
3015 } \
bb2a0f7a
YG
3016 } else { \
3017 /* m_lo >= s_lo */ \
a2fbb9ea 3018 if (m_hi < s_hi) { \
bb2a0f7a
YG
3019 d_hi = 0; \
3020 d_lo = 0; \
3021 } else { \
6378c025 3022 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3023 d_hi = m_hi - s_hi; \
3024 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3025 } \
3026 } \
3027 } while (0)
3028
bb2a0f7a 3029#define UPDATE_STAT64(s, t) \
a2fbb9ea 3030 do { \
bb2a0f7a
YG
3031 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3032 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3033 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3034 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3035 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3036 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3037 } while (0)
3038
bb2a0f7a 3039#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3040 do { \
bb2a0f7a
YG
3041 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3042 diff.lo, new->s##_lo, old->s##_lo); \
3043 ADD_64(estats->t##_hi, diff.hi, \
3044 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3045 } while (0)
3046
3047/* sum[hi:lo] += add */
3048#define ADD_EXTEND_64(s_hi, s_lo, a) \
3049 do { \
3050 s_lo += a; \
3051 s_hi += (s_lo < a) ? 1 : 0; \
3052 } while (0)
3053
bb2a0f7a 3054#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3055 do { \
bb2a0f7a
YG
3056 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3057 pstats->mac_stx[1].s##_lo, \
3058 new->s); \
a2fbb9ea
ET
3059 } while (0)
3060
bb2a0f7a 3061#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3062 do { \
4781bfad
EG
3063 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3064 old_tclient->s = tclient->s; \
de832a55
EG
3065 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3066 } while (0)
3067
3068#define UPDATE_EXTEND_USTAT(s, t) \
3069 do { \
3070 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3071 old_uclient->s = uclient->s; \
3072 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3073 } while (0)
3074
3075#define UPDATE_EXTEND_XSTAT(s, t) \
3076 do { \
4781bfad
EG
3077 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3078 old_xclient->s = xclient->s; \
de832a55
EG
3079 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3080 } while (0)
3081
3082/* minuend -= subtrahend */
3083#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3084 do { \
3085 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3086 } while (0)
3087
3088/* minuend[hi:lo] -= subtrahend */
3089#define SUB_EXTEND_64(m_hi, m_lo, s) \
3090 do { \
3091 SUB_64(m_hi, 0, m_lo, s); \
3092 } while (0)
3093
3094#define SUB_EXTEND_USTAT(s, t) \
3095 do { \
3096 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3097 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3098 } while (0)
3099
3100/*
3101 * General service functions
3102 */
3103
3104static inline long bnx2x_hilo(u32 *hiref)
3105{
3106 u32 lo = *(hiref + 1);
3107#if (BITS_PER_LONG == 64)
3108 u32 hi = *hiref;
3109
3110 return HILO_U64(hi, lo);
3111#else
3112 return lo;
3113#endif
3114}
3115
3116/*
3117 * Init service functions
3118 */
3119
bb2a0f7a
YG
3120static void bnx2x_storm_stats_post(struct bnx2x *bp)
3121{
3122 if (!bp->stats_pending) {
3123 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3124 int i, rc;
bb2a0f7a
YG
3125
3126 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3127 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3128 for_each_queue(bp, i)
3129 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3130
3131 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3132 ((u32 *)&ramrod_data)[1],
3133 ((u32 *)&ramrod_data)[0], 0);
3134 if (rc == 0) {
3135 /* stats ramrod has it's own slot on the spq */
3136 bp->spq_left++;
3137 bp->stats_pending = 1;
3138 }
3139 }
3140}
3141
3142static void bnx2x_stats_init(struct bnx2x *bp)
3143{
3144 int port = BP_PORT(bp);
de832a55 3145 int i;
bb2a0f7a 3146
de832a55 3147 bp->stats_pending = 0;
bb2a0f7a
YG
3148 bp->executer_idx = 0;
3149 bp->stats_counter = 0;
3150
3151 /* port stats */
3152 if (!BP_NOMCP(bp))
3153 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3154 else
3155 bp->port.port_stx = 0;
3156 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3157
3158 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3159 bp->port.old_nig_stats.brb_discard =
3160 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3161 bp->port.old_nig_stats.brb_truncate =
3162 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3163 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3164 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3165 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3166 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3167
3168 /* function stats */
de832a55
EG
3169 for_each_queue(bp, i) {
3170 struct bnx2x_fastpath *fp = &bp->fp[i];
3171
3172 memset(&fp->old_tclient, 0,
3173 sizeof(struct tstorm_per_client_stats));
3174 memset(&fp->old_uclient, 0,
3175 sizeof(struct ustorm_per_client_stats));
3176 memset(&fp->old_xclient, 0,
3177 sizeof(struct xstorm_per_client_stats));
3178 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3179 }
3180
bb2a0f7a 3181 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3182 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3183
3184 bp->stats_state = STATS_STATE_DISABLED;
3185 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3186 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3187}
3188
3189static void bnx2x_hw_stats_post(struct bnx2x *bp)
3190{
3191 struct dmae_command *dmae = &bp->stats_dmae;
3192 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3193
3194 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3195 if (CHIP_REV_IS_SLOW(bp))
3196 return;
bb2a0f7a
YG
3197
3198 /* loader */
3199 if (bp->executer_idx) {
3200 int loader_idx = PMF_DMAE_C(bp);
3201
3202 memset(dmae, 0, sizeof(struct dmae_command));
3203
3204 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3205 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3206 DMAE_CMD_DST_RESET |
3207#ifdef __BIG_ENDIAN
3208 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3209#else
3210 DMAE_CMD_ENDIANITY_DW_SWAP |
3211#endif
3212 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3213 DMAE_CMD_PORT_0) |
3214 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3215 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3216 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3217 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3218 sizeof(struct dmae_command) *
3219 (loader_idx + 1)) >> 2;
3220 dmae->dst_addr_hi = 0;
3221 dmae->len = sizeof(struct dmae_command) >> 2;
3222 if (CHIP_IS_E1(bp))
3223 dmae->len--;
3224 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3225 dmae->comp_addr_hi = 0;
3226 dmae->comp_val = 1;
3227
3228 *stats_comp = 0;
3229 bnx2x_post_dmae(bp, dmae, loader_idx);
3230
3231 } else if (bp->func_stx) {
3232 *stats_comp = 0;
3233 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3234 }
3235}
3236
3237static int bnx2x_stats_comp(struct bnx2x *bp)
3238{
3239 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3240 int cnt = 10;
3241
3242 might_sleep();
3243 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3244 if (!cnt) {
3245 BNX2X_ERR("timeout waiting for stats finished\n");
3246 break;
3247 }
3248 cnt--;
12469401 3249 msleep(1);
bb2a0f7a
YG
3250 }
3251 return 1;
3252}
3253
3254/*
3255 * Statistics service functions
3256 */
3257
3258static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3259{
3260 struct dmae_command *dmae;
3261 u32 opcode;
3262 int loader_idx = PMF_DMAE_C(bp);
3263 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3264
3265 /* sanity */
3266 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3267 BNX2X_ERR("BUG!\n");
3268 return;
3269 }
3270
3271 bp->executer_idx = 0;
3272
3273 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3274 DMAE_CMD_C_ENABLE |
3275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3276#ifdef __BIG_ENDIAN
3277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3278#else
3279 DMAE_CMD_ENDIANITY_DW_SWAP |
3280#endif
3281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3283
3284 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3286 dmae->src_addr_lo = bp->port.port_stx >> 2;
3287 dmae->src_addr_hi = 0;
3288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3290 dmae->len = DMAE_LEN32_RD_MAX;
3291 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292 dmae->comp_addr_hi = 0;
3293 dmae->comp_val = 1;
3294
3295 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3297 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3298 dmae->src_addr_hi = 0;
7a9b2557
VZ
3299 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3300 DMAE_LEN32_RD_MAX * 4);
3301 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3302 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3303 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3304 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3305 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3306 dmae->comp_val = DMAE_COMP_VAL;
3307
3308 *stats_comp = 0;
3309 bnx2x_hw_stats_post(bp);
3310 bnx2x_stats_comp(bp);
3311}
3312
3313static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3314{
3315 struct dmae_command *dmae;
34f80b04 3316 int port = BP_PORT(bp);
bb2a0f7a 3317 int vn = BP_E1HVN(bp);
a2fbb9ea 3318 u32 opcode;
bb2a0f7a 3319 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3320 u32 mac_addr;
bb2a0f7a
YG
3321 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3322
3323 /* sanity */
3324 if (!bp->link_vars.link_up || !bp->port.pmf) {
3325 BNX2X_ERR("BUG!\n");
3326 return;
3327 }
a2fbb9ea
ET
3328
3329 bp->executer_idx = 0;
bb2a0f7a
YG
3330
3331 /* MCP */
3332 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3333 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3334 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3335#ifdef __BIG_ENDIAN
bb2a0f7a 3336 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3337#else
bb2a0f7a 3338 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3339#endif
bb2a0f7a
YG
3340 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3341 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3342
bb2a0f7a 3343 if (bp->port.port_stx) {
a2fbb9ea
ET
3344
3345 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3346 dmae->opcode = opcode;
bb2a0f7a
YG
3347 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3348 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3349 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3350 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3351 dmae->len = sizeof(struct host_port_stats) >> 2;
3352 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3353 dmae->comp_addr_hi = 0;
3354 dmae->comp_val = 1;
a2fbb9ea
ET
3355 }
3356
bb2a0f7a
YG
3357 if (bp->func_stx) {
3358
3359 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360 dmae->opcode = opcode;
3361 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3362 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3363 dmae->dst_addr_lo = bp->func_stx >> 2;
3364 dmae->dst_addr_hi = 0;
3365 dmae->len = sizeof(struct host_func_stats) >> 2;
3366 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3367 dmae->comp_addr_hi = 0;
3368 dmae->comp_val = 1;
a2fbb9ea
ET
3369 }
3370
bb2a0f7a 3371 /* MAC */
a2fbb9ea
ET
3372 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3373 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3374 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3375#ifdef __BIG_ENDIAN
3376 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3377#else
3378 DMAE_CMD_ENDIANITY_DW_SWAP |
3379#endif
bb2a0f7a
YG
3380 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3381 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3382
c18487ee 3383 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3384
3385 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3386 NIG_REG_INGRESS_BMAC0_MEM);
3387
3388 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3389 BIGMAC_REGISTER_TX_STAT_GTBYT */
3390 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3391 dmae->opcode = opcode;
3392 dmae->src_addr_lo = (mac_addr +
3393 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3394 dmae->src_addr_hi = 0;
3395 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3396 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3397 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3398 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3399 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3400 dmae->comp_addr_hi = 0;
3401 dmae->comp_val = 1;
3402
3403 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3404 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3405 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3406 dmae->opcode = opcode;
3407 dmae->src_addr_lo = (mac_addr +
3408 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3409 dmae->src_addr_hi = 0;
3410 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3411 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3412 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3413 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3414 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3415 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3416 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3417 dmae->comp_addr_hi = 0;
3418 dmae->comp_val = 1;
3419
c18487ee 3420 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3421
3422 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3423
3424 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3425 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3426 dmae->opcode = opcode;
3427 dmae->src_addr_lo = (mac_addr +
3428 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3429 dmae->src_addr_hi = 0;
3430 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3431 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3432 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3433 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3434 dmae->comp_addr_hi = 0;
3435 dmae->comp_val = 1;
3436
3437 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3438 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3439 dmae->opcode = opcode;
3440 dmae->src_addr_lo = (mac_addr +
3441 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3442 dmae->src_addr_hi = 0;
3443 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3444 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3445 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3446 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3447 dmae->len = 1;
3448 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3449 dmae->comp_addr_hi = 0;
3450 dmae->comp_val = 1;
3451
3452 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3453 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454 dmae->opcode = opcode;
3455 dmae->src_addr_lo = (mac_addr +
3456 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3457 dmae->src_addr_hi = 0;
3458 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3459 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3460 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3461 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3462 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3463 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3464 dmae->comp_addr_hi = 0;
3465 dmae->comp_val = 1;
3466 }
3467
3468 /* NIG */
bb2a0f7a
YG
3469 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3470 dmae->opcode = opcode;
3471 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3472 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3473 dmae->src_addr_hi = 0;
3474 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3475 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3476 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3477 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3478 dmae->comp_addr_hi = 0;
3479 dmae->comp_val = 1;
3480
3481 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3482 dmae->opcode = opcode;
3483 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3484 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3485 dmae->src_addr_hi = 0;
3486 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3487 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3488 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3489 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3490 dmae->len = (2*sizeof(u32)) >> 2;
3491 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3492 dmae->comp_addr_hi = 0;
3493 dmae->comp_val = 1;
3494
a2fbb9ea
ET
3495 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3496 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3497 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3498 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3499#ifdef __BIG_ENDIAN
3500 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3501#else
3502 DMAE_CMD_ENDIANITY_DW_SWAP |
3503#endif
bb2a0f7a
YG
3504 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3505 (vn << DMAE_CMD_E1HVN_SHIFT));
3506 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3507 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3508 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3509 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3510 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3511 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3512 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3513 dmae->len = (2*sizeof(u32)) >> 2;
3514 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3515 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3516 dmae->comp_val = DMAE_COMP_VAL;
3517
3518 *stats_comp = 0;
a2fbb9ea
ET
3519}
3520
bb2a0f7a 3521static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3522{
bb2a0f7a
YG
3523 struct dmae_command *dmae = &bp->stats_dmae;
3524 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3525
bb2a0f7a
YG
3526 /* sanity */
3527 if (!bp->func_stx) {
3528 BNX2X_ERR("BUG!\n");
3529 return;
3530 }
a2fbb9ea 3531
bb2a0f7a
YG
3532 bp->executer_idx = 0;
3533 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3534
bb2a0f7a
YG
3535 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3536 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3537 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3538#ifdef __BIG_ENDIAN
3539 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3540#else
3541 DMAE_CMD_ENDIANITY_DW_SWAP |
3542#endif
3543 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3544 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3545 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3546 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3547 dmae->dst_addr_lo = bp->func_stx >> 2;
3548 dmae->dst_addr_hi = 0;
3549 dmae->len = sizeof(struct host_func_stats) >> 2;
3550 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3551 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3552 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3553
bb2a0f7a
YG
3554 *stats_comp = 0;
3555}
a2fbb9ea 3556
bb2a0f7a
YG
3557static void bnx2x_stats_start(struct bnx2x *bp)
3558{
3559 if (bp->port.pmf)
3560 bnx2x_port_stats_init(bp);
3561
3562 else if (bp->func_stx)
3563 bnx2x_func_stats_init(bp);
3564
3565 bnx2x_hw_stats_post(bp);
3566 bnx2x_storm_stats_post(bp);
3567}
3568
3569static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3570{
3571 bnx2x_stats_comp(bp);
3572 bnx2x_stats_pmf_update(bp);
3573 bnx2x_stats_start(bp);
3574}
3575
3576static void bnx2x_stats_restart(struct bnx2x *bp)
3577{
3578 bnx2x_stats_comp(bp);
3579 bnx2x_stats_start(bp);
3580}
3581
3582static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3583{
3584 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3585 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3586 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3587 struct {
3588 u32 lo;
3589 u32 hi;
3590 } diff;
bb2a0f7a
YG
3591
3592 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3593 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3594 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3595 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3596 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3597 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3598 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3599 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3600 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3601 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3602 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3603 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3604 UPDATE_STAT64(tx_stat_gt127,
3605 tx_stat_etherstatspkts65octetsto127octets);
3606 UPDATE_STAT64(tx_stat_gt255,
3607 tx_stat_etherstatspkts128octetsto255octets);
3608 UPDATE_STAT64(tx_stat_gt511,
3609 tx_stat_etherstatspkts256octetsto511octets);
3610 UPDATE_STAT64(tx_stat_gt1023,
3611 tx_stat_etherstatspkts512octetsto1023octets);
3612 UPDATE_STAT64(tx_stat_gt1518,
3613 tx_stat_etherstatspkts1024octetsto1522octets);
3614 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3615 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3616 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3617 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3618 UPDATE_STAT64(tx_stat_gterr,
3619 tx_stat_dot3statsinternalmactransmiterrors);
3620 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3621
3622 estats->pause_frames_received_hi =
3623 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3624 estats->pause_frames_received_lo =
3625 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3626
3627 estats->pause_frames_sent_hi =
3628 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3629 estats->pause_frames_sent_lo =
3630 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3631}
3632
3633static void bnx2x_emac_stats_update(struct bnx2x *bp)
3634{
3635 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3636 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3637 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3638
3639 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3640 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3641 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3642 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3643 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3644 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3645 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3646 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3647 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3648 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3649 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3650 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3651 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3652 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3653 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3654 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3655 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3656 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3657 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3658 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3659 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3660 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3661 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3662 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3663 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3664 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3665 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3666 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3667 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3668 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3669 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3670
3671 estats->pause_frames_received_hi =
3672 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3673 estats->pause_frames_received_lo =
3674 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3675 ADD_64(estats->pause_frames_received_hi,
3676 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3677 estats->pause_frames_received_lo,
3678 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3679
3680 estats->pause_frames_sent_hi =
3681 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3682 estats->pause_frames_sent_lo =
3683 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3684 ADD_64(estats->pause_frames_sent_hi,
3685 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3686 estats->pause_frames_sent_lo,
3687 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3688}
3689
3690static int bnx2x_hw_stats_update(struct bnx2x *bp)
3691{
3692 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3693 struct nig_stats *old = &(bp->port.old_nig_stats);
3694 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3695 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3696 struct {
3697 u32 lo;
3698 u32 hi;
3699 } diff;
de832a55 3700 u32 nig_timer_max;
bb2a0f7a
YG
3701
3702 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3703 bnx2x_bmac_stats_update(bp);
3704
3705 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3706 bnx2x_emac_stats_update(bp);
3707
3708 else { /* unreached */
c3eefaf6 3709 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3710 return -1;
3711 }
a2fbb9ea 3712
bb2a0f7a
YG
3713 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3714 new->brb_discard - old->brb_discard);
66e855f3
YG
3715 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3716 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3717
bb2a0f7a
YG
3718 UPDATE_STAT64_NIG(egress_mac_pkt0,
3719 etherstatspkts1024octetsto1522octets);
3720 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3721
bb2a0f7a 3722 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3723
bb2a0f7a
YG
3724 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3725 sizeof(struct mac_stx));
3726 estats->brb_drop_hi = pstats->brb_drop_hi;
3727 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3728
bb2a0f7a 3729 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3730
de832a55
EG
3731 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3732 if (nig_timer_max != estats->nig_timer_max) {
3733 estats->nig_timer_max = nig_timer_max;
3734 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3735 }
3736
bb2a0f7a 3737 return 0;
a2fbb9ea
ET
3738}
3739
bb2a0f7a 3740static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3741{
3742 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3743 struct tstorm_per_port_stats *tport =
de832a55 3744 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3745 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3746 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3747 int i;
3748
3749 memset(&(fstats->total_bytes_received_hi), 0,
3750 sizeof(struct host_func_stats) - 2*sizeof(u32));
3751 estats->error_bytes_received_hi = 0;
3752 estats->error_bytes_received_lo = 0;
3753 estats->etherstatsoverrsizepkts_hi = 0;
3754 estats->etherstatsoverrsizepkts_lo = 0;
3755 estats->no_buff_discard_hi = 0;
3756 estats->no_buff_discard_lo = 0;
a2fbb9ea 3757
de832a55
EG
3758 for_each_queue(bp, i) {
3759 struct bnx2x_fastpath *fp = &bp->fp[i];
3760 int cl_id = fp->cl_id;
3761 struct tstorm_per_client_stats *tclient =
3762 &stats->tstorm_common.client_statistics[cl_id];
3763 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3764 struct ustorm_per_client_stats *uclient =
3765 &stats->ustorm_common.client_statistics[cl_id];
3766 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3767 struct xstorm_per_client_stats *xclient =
3768 &stats->xstorm_common.client_statistics[cl_id];
3769 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3770 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3771 u32 diff;
3772
3773 /* are storm stats valid? */
3774 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3775 bp->stats_counter) {
de832a55
EG
3776 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3777 " xstorm counter (%d) != stats_counter (%d)\n",
3778 i, xclient->stats_counter, bp->stats_counter);
3779 return -1;
3780 }
3781 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3782 bp->stats_counter) {
de832a55
EG
3783 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3784 " tstorm counter (%d) != stats_counter (%d)\n",
3785 i, tclient->stats_counter, bp->stats_counter);
3786 return -2;
3787 }
3788 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3789 bp->stats_counter) {
3790 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3791 " ustorm counter (%d) != stats_counter (%d)\n",
3792 i, uclient->stats_counter, bp->stats_counter);
3793 return -4;
3794 }
a2fbb9ea 3795
de832a55
EG
3796 qstats->total_bytes_received_hi =
3797 qstats->valid_bytes_received_hi =
a2fbb9ea 3798 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3799 qstats->total_bytes_received_lo =
3800 qstats->valid_bytes_received_lo =
a2fbb9ea 3801 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3802
de832a55 3803 qstats->error_bytes_received_hi =
bb2a0f7a 3804 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3805 qstats->error_bytes_received_lo =
bb2a0f7a 3806 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3807
de832a55
EG
3808 ADD_64(qstats->total_bytes_received_hi,
3809 qstats->error_bytes_received_hi,
3810 qstats->total_bytes_received_lo,
3811 qstats->error_bytes_received_lo);
3812
3813 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3814 total_unicast_packets_received);
3815 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3816 total_multicast_packets_received);
3817 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3818 total_broadcast_packets_received);
3819 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3820 etherstatsoverrsizepkts);
3821 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3822
3823 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3824 total_unicast_packets_received);
3825 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3826 total_multicast_packets_received);
3827 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3828 total_broadcast_packets_received);
3829 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3830 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3831 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3832
3833 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3834 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3835 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3836 le32_to_cpu(xclient->total_sent_bytes.lo);
3837
de832a55
EG
3838 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3839 total_unicast_packets_transmitted);
3840 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3841 total_multicast_packets_transmitted);
3842 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3843 total_broadcast_packets_transmitted);
3844
3845 old_tclient->checksum_discard = tclient->checksum_discard;
3846 old_tclient->ttl0_discard = tclient->ttl0_discard;
3847
3848 ADD_64(fstats->total_bytes_received_hi,
3849 qstats->total_bytes_received_hi,
3850 fstats->total_bytes_received_lo,
3851 qstats->total_bytes_received_lo);
3852 ADD_64(fstats->total_bytes_transmitted_hi,
3853 qstats->total_bytes_transmitted_hi,
3854 fstats->total_bytes_transmitted_lo,
3855 qstats->total_bytes_transmitted_lo);
3856 ADD_64(fstats->total_unicast_packets_received_hi,
3857 qstats->total_unicast_packets_received_hi,
3858 fstats->total_unicast_packets_received_lo,
3859 qstats->total_unicast_packets_received_lo);
3860 ADD_64(fstats->total_multicast_packets_received_hi,
3861 qstats->total_multicast_packets_received_hi,
3862 fstats->total_multicast_packets_received_lo,
3863 qstats->total_multicast_packets_received_lo);
3864 ADD_64(fstats->total_broadcast_packets_received_hi,
3865 qstats->total_broadcast_packets_received_hi,
3866 fstats->total_broadcast_packets_received_lo,
3867 qstats->total_broadcast_packets_received_lo);
3868 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3869 qstats->total_unicast_packets_transmitted_hi,
3870 fstats->total_unicast_packets_transmitted_lo,
3871 qstats->total_unicast_packets_transmitted_lo);
3872 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3873 qstats->total_multicast_packets_transmitted_hi,
3874 fstats->total_multicast_packets_transmitted_lo,
3875 qstats->total_multicast_packets_transmitted_lo);
3876 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3877 qstats->total_broadcast_packets_transmitted_hi,
3878 fstats->total_broadcast_packets_transmitted_lo,
3879 qstats->total_broadcast_packets_transmitted_lo);
3880 ADD_64(fstats->valid_bytes_received_hi,
3881 qstats->valid_bytes_received_hi,
3882 fstats->valid_bytes_received_lo,
3883 qstats->valid_bytes_received_lo);
3884
3885 ADD_64(estats->error_bytes_received_hi,
3886 qstats->error_bytes_received_hi,
3887 estats->error_bytes_received_lo,
3888 qstats->error_bytes_received_lo);
3889 ADD_64(estats->etherstatsoverrsizepkts_hi,
3890 qstats->etherstatsoverrsizepkts_hi,
3891 estats->etherstatsoverrsizepkts_lo,
3892 qstats->etherstatsoverrsizepkts_lo);
3893 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3894 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3895 }
3896
3897 ADD_64(fstats->total_bytes_received_hi,
3898 estats->rx_stat_ifhcinbadoctets_hi,
3899 fstats->total_bytes_received_lo,
3900 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3901
3902 memcpy(estats, &(fstats->total_bytes_received_hi),
3903 sizeof(struct host_func_stats) - 2*sizeof(u32));
3904
de832a55
EG
3905 ADD_64(estats->etherstatsoverrsizepkts_hi,
3906 estats->rx_stat_dot3statsframestoolong_hi,
3907 estats->etherstatsoverrsizepkts_lo,
3908 estats->rx_stat_dot3statsframestoolong_lo);
3909 ADD_64(estats->error_bytes_received_hi,
3910 estats->rx_stat_ifhcinbadoctets_hi,
3911 estats->error_bytes_received_lo,
3912 estats->rx_stat_ifhcinbadoctets_lo);
3913
3914 if (bp->port.pmf) {
3915 estats->mac_filter_discard =
3916 le32_to_cpu(tport->mac_filter_discard);
3917 estats->xxoverflow_discard =
3918 le32_to_cpu(tport->xxoverflow_discard);
3919 estats->brb_truncate_discard =
bb2a0f7a 3920 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3921 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3922 }
bb2a0f7a
YG
3923
3924 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3925
de832a55
EG
3926 bp->stats_pending = 0;
3927
a2fbb9ea
ET
3928 return 0;
3929}
3930
bb2a0f7a 3931static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3932{
bb2a0f7a 3933 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3934 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3935 int i;
a2fbb9ea
ET
3936
3937 nstats->rx_packets =
3938 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3939 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3940 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3941
3942 nstats->tx_packets =
3943 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3944 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3945 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3946
de832a55 3947 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3948
0e39e645 3949 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3950
de832a55
EG
3951 nstats->rx_dropped = estats->mac_discard;
3952 for_each_queue(bp, i)
3953 nstats->rx_dropped +=
3954 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3955
a2fbb9ea
ET
3956 nstats->tx_dropped = 0;
3957
3958 nstats->multicast =
de832a55 3959 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3960
bb2a0f7a 3961 nstats->collisions =
de832a55 3962 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3963
3964 nstats->rx_length_errors =
de832a55
EG
3965 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3966 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3967 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3968 bnx2x_hilo(&estats->brb_truncate_hi);
3969 nstats->rx_crc_errors =
3970 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3971 nstats->rx_frame_errors =
3972 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3973 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3974 nstats->rx_missed_errors = estats->xxoverflow_discard;
3975
3976 nstats->rx_errors = nstats->rx_length_errors +
3977 nstats->rx_over_errors +
3978 nstats->rx_crc_errors +
3979 nstats->rx_frame_errors +
0e39e645
ET
3980 nstats->rx_fifo_errors +
3981 nstats->rx_missed_errors;
a2fbb9ea 3982
bb2a0f7a 3983 nstats->tx_aborted_errors =
de832a55
EG
3984 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3985 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3986 nstats->tx_carrier_errors =
3987 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3988 nstats->tx_fifo_errors = 0;
3989 nstats->tx_heartbeat_errors = 0;
3990 nstats->tx_window_errors = 0;
3991
3992 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3993 nstats->tx_carrier_errors +
3994 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3995}
3996
3997static void bnx2x_drv_stats_update(struct bnx2x *bp)
3998{
3999 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4000 int i;
4001
4002 estats->driver_xoff = 0;
4003 estats->rx_err_discard_pkt = 0;
4004 estats->rx_skb_alloc_failed = 0;
4005 estats->hw_csum_err = 0;
4006 for_each_queue(bp, i) {
4007 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4008
4009 estats->driver_xoff += qstats->driver_xoff;
4010 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4011 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4012 estats->hw_csum_err += qstats->hw_csum_err;
4013 }
a2fbb9ea
ET
4014}
4015
bb2a0f7a 4016static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4017{
bb2a0f7a 4018 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4019
bb2a0f7a
YG
4020 if (*stats_comp != DMAE_COMP_VAL)
4021 return;
4022
4023 if (bp->port.pmf)
de832a55 4024 bnx2x_hw_stats_update(bp);
a2fbb9ea 4025
de832a55
EG
4026 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4027 BNX2X_ERR("storm stats were not updated for 3 times\n");
4028 bnx2x_panic();
4029 return;
a2fbb9ea
ET
4030 }
4031
de832a55
EG
4032 bnx2x_net_stats_update(bp);
4033 bnx2x_drv_stats_update(bp);
4034
a2fbb9ea 4035 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
4036 struct tstorm_per_client_stats *old_tclient =
4037 &bp->fp->old_tclient;
4038 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4039 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4040 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4041 int i;
a2fbb9ea
ET
4042
4043 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4044 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4045 " tx pkt (%lx)\n",
4046 bnx2x_tx_avail(bp->fp),
7a9b2557 4047 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4048 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4049 " rx pkt (%lx)\n",
7a9b2557
VZ
4050 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4051 bp->fp->rx_comp_cons),
4052 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4053 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4054 "brb truncate %u\n",
4055 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4056 qstats->driver_xoff,
4057 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4058 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4059 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4060 "mac_discard %u mac_filter_discard %u "
4061 "xxovrflow_discard %u brb_truncate_discard %u "
4062 "ttl0_discard %u\n",
4781bfad 4063 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4064 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4065 bnx2x_hilo(&qstats->no_buff_discard_hi),
4066 estats->mac_discard, estats->mac_filter_discard,
4067 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4068 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4069
4070 for_each_queue(bp, i) {
4071 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4072 bnx2x_fp(bp, i, tx_pkt),
4073 bnx2x_fp(bp, i, rx_pkt),
4074 bnx2x_fp(bp, i, rx_calls));
4075 }
4076 }
4077
bb2a0f7a
YG
4078 bnx2x_hw_stats_post(bp);
4079 bnx2x_storm_stats_post(bp);
4080}
a2fbb9ea 4081
bb2a0f7a
YG
4082static void bnx2x_port_stats_stop(struct bnx2x *bp)
4083{
4084 struct dmae_command *dmae;
4085 u32 opcode;
4086 int loader_idx = PMF_DMAE_C(bp);
4087 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4088
bb2a0f7a 4089 bp->executer_idx = 0;
a2fbb9ea 4090
bb2a0f7a
YG
4091 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4092 DMAE_CMD_C_ENABLE |
4093 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4094#ifdef __BIG_ENDIAN
bb2a0f7a 4095 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4096#else
bb2a0f7a 4097 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4098#endif
bb2a0f7a
YG
4099 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4100 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4101
4102 if (bp->port.port_stx) {
4103
4104 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4105 if (bp->func_stx)
4106 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4107 else
4108 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4109 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4110 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4111 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4112 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4113 dmae->len = sizeof(struct host_port_stats) >> 2;
4114 if (bp->func_stx) {
4115 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4116 dmae->comp_addr_hi = 0;
4117 dmae->comp_val = 1;
4118 } else {
4119 dmae->comp_addr_lo =
4120 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4121 dmae->comp_addr_hi =
4122 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4123 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4124
bb2a0f7a
YG
4125 *stats_comp = 0;
4126 }
a2fbb9ea
ET
4127 }
4128
bb2a0f7a
YG
4129 if (bp->func_stx) {
4130
4131 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4132 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4133 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4134 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4135 dmae->dst_addr_lo = bp->func_stx >> 2;
4136 dmae->dst_addr_hi = 0;
4137 dmae->len = sizeof(struct host_func_stats) >> 2;
4138 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4139 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4140 dmae->comp_val = DMAE_COMP_VAL;
4141
4142 *stats_comp = 0;
a2fbb9ea 4143 }
bb2a0f7a
YG
4144}
4145
4146static void bnx2x_stats_stop(struct bnx2x *bp)
4147{
4148 int update = 0;
4149
4150 bnx2x_stats_comp(bp);
4151
4152 if (bp->port.pmf)
4153 update = (bnx2x_hw_stats_update(bp) == 0);
4154
4155 update |= (bnx2x_storm_stats_update(bp) == 0);
4156
4157 if (update) {
4158 bnx2x_net_stats_update(bp);
a2fbb9ea 4159
bb2a0f7a
YG
4160 if (bp->port.pmf)
4161 bnx2x_port_stats_stop(bp);
4162
4163 bnx2x_hw_stats_post(bp);
4164 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4165 }
4166}
4167
bb2a0f7a
YG
4168static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4169{
4170}
4171
4172static const struct {
4173 void (*action)(struct bnx2x *bp);
4174 enum bnx2x_stats_state next_state;
4175} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4176/* state event */
4177{
4178/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4179/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4180/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4181/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4182},
4183{
4184/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4185/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4186/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4187/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4188}
4189};
4190
4191static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4192{
4193 enum bnx2x_stats_state state = bp->stats_state;
4194
4195 bnx2x_stats_stm[state][event].action(bp);
4196 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4197
4198 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4199 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4200 state, event, bp->stats_state);
4201}
4202
a2fbb9ea
ET
4203static void bnx2x_timer(unsigned long data)
4204{
4205 struct bnx2x *bp = (struct bnx2x *) data;
4206
4207 if (!netif_running(bp->dev))
4208 return;
4209
4210 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4211 goto timer_restart;
a2fbb9ea
ET
4212
4213 if (poll) {
4214 struct bnx2x_fastpath *fp = &bp->fp[0];
4215 int rc;
4216
7961f791 4217 bnx2x_tx_int(fp);
a2fbb9ea
ET
4218 rc = bnx2x_rx_int(fp, 1000);
4219 }
4220
34f80b04
EG
4221 if (!BP_NOMCP(bp)) {
4222 int func = BP_FUNC(bp);
a2fbb9ea
ET
4223 u32 drv_pulse;
4224 u32 mcp_pulse;
4225
4226 ++bp->fw_drv_pulse_wr_seq;
4227 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4228 /* TBD - add SYSTEM_TIME */
4229 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4230 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4231
34f80b04 4232 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4233 MCP_PULSE_SEQ_MASK);
4234 /* The delta between driver pulse and mcp response
4235 * should be 1 (before mcp response) or 0 (after mcp response)
4236 */
4237 if ((drv_pulse != mcp_pulse) &&
4238 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4239 /* someone lost a heartbeat... */
4240 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4241 drv_pulse, mcp_pulse);
4242 }
4243 }
4244
bb2a0f7a
YG
4245 if ((bp->state == BNX2X_STATE_OPEN) ||
4246 (bp->state == BNX2X_STATE_DISABLED))
4247 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4248
f1410647 4249timer_restart:
a2fbb9ea
ET
4250 mod_timer(&bp->timer, jiffies + bp->current_interval);
4251}
4252
4253/* end of Statistics */
4254
4255/* nic init */
4256
4257/*
4258 * nic init service functions
4259 */
4260
34f80b04 4261static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4262{
34f80b04
EG
4263 int port = BP_PORT(bp);
4264
490c3c9b 4265 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04 4266 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4267 sizeof(struct ustorm_status_block)/4);
490c3c9b 4268 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04 4269 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4270 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4271}
4272
5c862848
EG
4273static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4274 dma_addr_t mapping, int sb_id)
34f80b04
EG
4275{
4276 int port = BP_PORT(bp);
bb2a0f7a 4277 int func = BP_FUNC(bp);
a2fbb9ea 4278 int index;
34f80b04 4279 u64 section;
a2fbb9ea
ET
4280
4281 /* USTORM */
4282 section = ((u64)mapping) + offsetof(struct host_status_block,
4283 u_status_block);
34f80b04 4284 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4285
4286 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4287 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4288 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4289 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4290 U64_HI(section));
bb2a0f7a
YG
4291 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4292 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4293
4294 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4295 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4296 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4297
4298 /* CSTORM */
4299 section = ((u64)mapping) + offsetof(struct host_status_block,
4300 c_status_block);
34f80b04 4301 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4302
4303 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4304 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4305 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4306 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4307 U64_HI(section));
7a9b2557
VZ
4308 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4309 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4310
4311 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4312 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4313 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4314
4315 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4316}
4317
4318static void bnx2x_zero_def_sb(struct bnx2x *bp)
4319{
4320 int func = BP_FUNC(bp);
a2fbb9ea 4321
490c3c9b
EG
4322 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4323 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4324 sizeof(struct tstorm_def_status_block)/4);
4325 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04
EG
4326 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4327 sizeof(struct ustorm_def_status_block)/4);
490c3c9b 4328 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04
EG
4329 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4330 sizeof(struct cstorm_def_status_block)/4);
490c3c9b 4331 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
34f80b04
EG
4332 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4333 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4334}
4335
4336static void bnx2x_init_def_sb(struct bnx2x *bp,
4337 struct host_def_status_block *def_sb,
34f80b04 4338 dma_addr_t mapping, int sb_id)
a2fbb9ea 4339{
34f80b04
EG
4340 int port = BP_PORT(bp);
4341 int func = BP_FUNC(bp);
a2fbb9ea
ET
4342 int index, val, reg_offset;
4343 u64 section;
4344
4345 /* ATTN */
4346 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4347 atten_status_block);
34f80b04 4348 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4349
49d66772
ET
4350 bp->attn_state = 0;
4351
a2fbb9ea
ET
4352 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4353 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4354
34f80b04 4355 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4356 bp->attn_group[index].sig[0] = REG_RD(bp,
4357 reg_offset + 0x10*index);
4358 bp->attn_group[index].sig[1] = REG_RD(bp,
4359 reg_offset + 0x4 + 0x10*index);
4360 bp->attn_group[index].sig[2] = REG_RD(bp,
4361 reg_offset + 0x8 + 0x10*index);
4362 bp->attn_group[index].sig[3] = REG_RD(bp,
4363 reg_offset + 0xc + 0x10*index);
4364 }
4365
a2fbb9ea
ET
4366 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4367 HC_REG_ATTN_MSG0_ADDR_L);
4368
4369 REG_WR(bp, reg_offset, U64_LO(section));
4370 REG_WR(bp, reg_offset + 4, U64_HI(section));
4371
4372 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4373
4374 val = REG_RD(bp, reg_offset);
34f80b04 4375 val |= sb_id;
a2fbb9ea
ET
4376 REG_WR(bp, reg_offset, val);
4377
4378 /* USTORM */
4379 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4380 u_def_status_block);
34f80b04 4381 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4382
4383 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4384 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4385 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4386 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4387 U64_HI(section));
5c862848 4388 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4389 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4390
4391 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4392 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4393 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4394
4395 /* CSTORM */
4396 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4397 c_def_status_block);
34f80b04 4398 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4399
4400 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4401 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4402 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4403 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4404 U64_HI(section));
5c862848 4405 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4406 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4407
4408 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4409 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4410 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4411
4412 /* TSTORM */
4413 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4414 t_def_status_block);
34f80b04 4415 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4416
4417 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4418 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4419 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4420 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4421 U64_HI(section));
5c862848 4422 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4423 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4424
4425 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4426 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4427 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4428
4429 /* XSTORM */
4430 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4431 x_def_status_block);
34f80b04 4432 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4433
4434 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4435 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4436 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4437 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4438 U64_HI(section));
5c862848 4439 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4440 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4441
4442 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4443 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4444 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4445
bb2a0f7a 4446 bp->stats_pending = 0;
66e855f3 4447 bp->set_mac_pending = 0;
bb2a0f7a 4448
34f80b04 4449 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4450}
4451
4452static void bnx2x_update_coalesce(struct bnx2x *bp)
4453{
34f80b04 4454 int port = BP_PORT(bp);
a2fbb9ea
ET
4455 int i;
4456
4457 for_each_queue(bp, i) {
34f80b04 4458 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4459
4460 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4461 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4462 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4463 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4464 bp->rx_ticks/12);
a2fbb9ea 4465 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4466 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4467 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4468 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4469
4470 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4471 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4472 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4473 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4474 bp->tx_ticks/12);
a2fbb9ea 4475 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4476 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4477 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4478 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4479 }
4480}
4481
7a9b2557
VZ
4482static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4483 struct bnx2x_fastpath *fp, int last)
4484{
4485 int i;
4486
4487 for (i = 0; i < last; i++) {
4488 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4489 struct sk_buff *skb = rx_buf->skb;
4490
4491 if (skb == NULL) {
4492 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4493 continue;
4494 }
4495
4496 if (fp->tpa_state[i] == BNX2X_TPA_START)
4497 pci_unmap_single(bp->pdev,
4498 pci_unmap_addr(rx_buf, mapping),
356e2385 4499 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4500
4501 dev_kfree_skb(skb);
4502 rx_buf->skb = NULL;
4503 }
4504}
4505
a2fbb9ea
ET
4506static void bnx2x_init_rx_rings(struct bnx2x *bp)
4507{
7a9b2557 4508 int func = BP_FUNC(bp);
32626230
EG
4509 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4510 ETH_MAX_AGGREGATION_QUEUES_E1H;
4511 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4512 int i, j;
a2fbb9ea 4513
87942b46 4514 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4515 DP(NETIF_MSG_IFUP,
4516 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4517
7a9b2557 4518 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4519
555f6c78 4520 for_each_rx_queue(bp, j) {
32626230 4521 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4522
32626230 4523 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4524 fp->tpa_pool[i].skb =
4525 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4526 if (!fp->tpa_pool[i].skb) {
4527 BNX2X_ERR("Failed to allocate TPA "
4528 "skb pool for queue[%d] - "
4529 "disabling TPA on this "
4530 "queue!\n", j);
4531 bnx2x_free_tpa_pool(bp, fp, i);
4532 fp->disable_tpa = 1;
4533 break;
4534 }
4535 pci_unmap_addr_set((struct sw_rx_bd *)
4536 &bp->fp->tpa_pool[i],
4537 mapping, 0);
4538 fp->tpa_state[i] = BNX2X_TPA_STOP;
4539 }
4540 }
4541 }
4542
555f6c78 4543 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4544 struct bnx2x_fastpath *fp = &bp->fp[j];
4545
4546 fp->rx_bd_cons = 0;
4547 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4548 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4549
4550 /* "next page" elements initialization */
4551 /* SGE ring */
4552 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4553 struct eth_rx_sge *sge;
4554
4555 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4556 sge->addr_hi =
4557 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4558 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4559 sge->addr_lo =
4560 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4561 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4562 }
4563
4564 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4565
7a9b2557 4566 /* RX BD ring */
a2fbb9ea
ET
4567 for (i = 1; i <= NUM_RX_RINGS; i++) {
4568 struct eth_rx_bd *rx_bd;
4569
4570 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4571 rx_bd->addr_hi =
4572 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4573 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4574 rx_bd->addr_lo =
4575 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4576 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4577 }
4578
34f80b04 4579 /* CQ ring */
a2fbb9ea
ET
4580 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4581 struct eth_rx_cqe_next_page *nextpg;
4582
4583 nextpg = (struct eth_rx_cqe_next_page *)
4584 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4585 nextpg->addr_hi =
4586 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4587 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4588 nextpg->addr_lo =
4589 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4590 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4591 }
4592
7a9b2557
VZ
4593 /* Allocate SGEs and initialize the ring elements */
4594 for (i = 0, ring_prod = 0;
4595 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4596
7a9b2557
VZ
4597 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4598 BNX2X_ERR("was only able to allocate "
4599 "%d rx sges\n", i);
4600 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4601 /* Cleanup already allocated elements */
4602 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4603 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4604 fp->disable_tpa = 1;
4605 ring_prod = 0;
4606 break;
4607 }
4608 ring_prod = NEXT_SGE_IDX(ring_prod);
4609 }
4610 fp->rx_sge_prod = ring_prod;
4611
4612 /* Allocate BDs and initialize BD ring */
66e855f3 4613 fp->rx_comp_cons = 0;
7a9b2557 4614 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4615 for (i = 0; i < bp->rx_ring_size; i++) {
4616 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4617 BNX2X_ERR("was only able to allocate "
de832a55
EG
4618 "%d rx skbs on queue[%d]\n", i, j);
4619 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4620 break;
4621 }
4622 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4623 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4624 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4625 }
4626
7a9b2557
VZ
4627 fp->rx_bd_prod = ring_prod;
4628 /* must not have more available CQEs than BDs */
4629 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4630 cqe_ring_prod);
a2fbb9ea
ET
4631 fp->rx_pkt = fp->rx_calls = 0;
4632
7a9b2557
VZ
4633 /* Warning!
4634 * this will generate an interrupt (to the TSTORM)
4635 * must only be done after chip is initialized
4636 */
4637 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4638 fp->rx_sge_prod);
a2fbb9ea
ET
4639 if (j != 0)
4640 continue;
4641
4642 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4643 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4644 U64_LO(fp->rx_comp_mapping));
4645 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4646 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4647 U64_HI(fp->rx_comp_mapping));
4648 }
4649}
4650
4651static void bnx2x_init_tx_ring(struct bnx2x *bp)
4652{
4653 int i, j;
4654
555f6c78 4655 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4656 struct bnx2x_fastpath *fp = &bp->fp[j];
4657
4658 for (i = 1; i <= NUM_TX_RINGS; i++) {
4659 struct eth_tx_bd *tx_bd =
4660 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4661
4662 tx_bd->addr_hi =
4663 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4664 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4665 tx_bd->addr_lo =
4666 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4667 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4668 }
4669
4670 fp->tx_pkt_prod = 0;
4671 fp->tx_pkt_cons = 0;
4672 fp->tx_bd_prod = 0;
4673 fp->tx_bd_cons = 0;
4674 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4675 fp->tx_pkt = 0;
4676 }
4677}
4678
4679static void bnx2x_init_sp_ring(struct bnx2x *bp)
4680{
34f80b04 4681 int func = BP_FUNC(bp);
a2fbb9ea
ET
4682
4683 spin_lock_init(&bp->spq_lock);
4684
4685 bp->spq_left = MAX_SPQ_PENDING;
4686 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4687 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4688 bp->spq_prod_bd = bp->spq;
4689 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4690
34f80b04 4691 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4692 U64_LO(bp->spq_mapping));
34f80b04
EG
4693 REG_WR(bp,
4694 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4695 U64_HI(bp->spq_mapping));
4696
34f80b04 4697 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4698 bp->spq_prod_idx);
4699}
4700
4701static void bnx2x_init_context(struct bnx2x *bp)
4702{
4703 int i;
4704
4705 for_each_queue(bp, i) {
4706 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4707 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4708 u8 cl_id = fp->cl_id;
0626b899 4709 u8 sb_id = fp->sb_id;
a2fbb9ea 4710
34f80b04
EG
4711 context->ustorm_st_context.common.sb_index_numbers =
4712 BNX2X_RX_SB_INDEX_NUM;
0626b899 4713 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4714 context->ustorm_st_context.common.status_block_id = sb_id;
4715 context->ustorm_st_context.common.flags =
de832a55
EG
4716 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4717 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4718 context->ustorm_st_context.common.statistics_counter_id =
4719 cl_id;
8d9c5f34 4720 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4721 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4722 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4723 bp->rx_buf_size;
34f80b04 4724 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4725 U64_HI(fp->rx_desc_mapping);
34f80b04 4726 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4727 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4728 if (!fp->disable_tpa) {
4729 context->ustorm_st_context.common.flags |=
4730 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4731 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4732 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4733 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4734 (u32)0xffff);
7a9b2557
VZ
4735 context->ustorm_st_context.common.sge_page_base_hi =
4736 U64_HI(fp->rx_sge_mapping);
4737 context->ustorm_st_context.common.sge_page_base_lo =
4738 U64_LO(fp->rx_sge_mapping);
4739 }
4740
8d9c5f34
EG
4741 context->ustorm_ag_context.cdu_usage =
4742 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4743 CDU_REGION_NUMBER_UCM_AG,
4744 ETH_CONNECTION_TYPE);
4745
4746 context->xstorm_st_context.tx_bd_page_base_hi =
4747 U64_HI(fp->tx_desc_mapping);
4748 context->xstorm_st_context.tx_bd_page_base_lo =
4749 U64_LO(fp->tx_desc_mapping);
4750 context->xstorm_st_context.db_data_addr_hi =
4751 U64_HI(fp->tx_prods_mapping);
4752 context->xstorm_st_context.db_data_addr_lo =
4753 U64_LO(fp->tx_prods_mapping);
0626b899 4754 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4755 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4756 context->cstorm_st_context.sb_index_number =
5c862848 4757 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4758 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4759
4760 context->xstorm_ag_context.cdu_reserved =
4761 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4762 CDU_REGION_NUMBER_XCM_AG,
4763 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4764 }
4765}
4766
4767static void bnx2x_init_ind_table(struct bnx2x *bp)
4768{
26c8fa4d 4769 int func = BP_FUNC(bp);
a2fbb9ea
ET
4770 int i;
4771
555f6c78 4772 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4773 return;
4774
555f6c78
EG
4775 DP(NETIF_MSG_IFUP,
4776 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4777 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4778 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4779 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4780 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4781}
4782
49d66772
ET
4783static void bnx2x_set_client_config(struct bnx2x *bp)
4784{
49d66772 4785 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4786 int port = BP_PORT(bp);
4787 int i;
49d66772 4788
e7799c5f 4789 tstorm_client.mtu = bp->dev->mtu;
49d66772 4790 tstorm_client.config_flags =
de832a55
EG
4791 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4792 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4793#ifdef BCM_VLAN
0c6671b0 4794 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4795 tstorm_client.config_flags |=
8d9c5f34 4796 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4797 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4798 }
4799#endif
49d66772 4800
7a9b2557
VZ
4801 if (bp->flags & TPA_ENABLE_FLAG) {
4802 tstorm_client.max_sges_for_packet =
4f40f2cb 4803 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4804 tstorm_client.max_sges_for_packet =
4805 ((tstorm_client.max_sges_for_packet +
4806 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4807 PAGES_PER_SGE_SHIFT;
4808
4809 tstorm_client.config_flags |=
4810 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4811 }
4812
49d66772 4813 for_each_queue(bp, i) {
de832a55
EG
4814 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4815
49d66772 4816 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4817 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4818 ((u32 *)&tstorm_client)[0]);
4819 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4820 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4821 ((u32 *)&tstorm_client)[1]);
4822 }
4823
34f80b04
EG
4824 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4825 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4826}
4827
a2fbb9ea
ET
4828static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4829{
a2fbb9ea 4830 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4831 int mode = bp->rx_mode;
4832 int mask = (1 << BP_L_ID(bp));
4833 int func = BP_FUNC(bp);
581ce43d 4834 int port = BP_PORT(bp);
a2fbb9ea 4835 int i;
581ce43d
EG
4836 /* All but management unicast packets should pass to the host as well */
4837 u32 llh_mask =
4838 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4839 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4840 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4841 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4842
3196a88a 4843 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4844
4845 switch (mode) {
4846 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4847 tstorm_mac_filter.ucast_drop_all = mask;
4848 tstorm_mac_filter.mcast_drop_all = mask;
4849 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4850 break;
356e2385 4851
a2fbb9ea 4852 case BNX2X_RX_MODE_NORMAL:
34f80b04 4853 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4854 break;
356e2385 4855
a2fbb9ea 4856 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4857 tstorm_mac_filter.mcast_accept_all = mask;
4858 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4859 break;
356e2385 4860
a2fbb9ea 4861 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4862 tstorm_mac_filter.ucast_accept_all = mask;
4863 tstorm_mac_filter.mcast_accept_all = mask;
4864 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
4865 /* pass management unicast packets as well */
4866 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4867 break;
356e2385 4868
a2fbb9ea 4869 default:
34f80b04
EG
4870 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4871 break;
a2fbb9ea
ET
4872 }
4873
581ce43d
EG
4874 REG_WR(bp,
4875 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4876 llh_mask);
4877
a2fbb9ea
ET
4878 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4879 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4880 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4881 ((u32 *)&tstorm_mac_filter)[i]);
4882
34f80b04 4883/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4884 ((u32 *)&tstorm_mac_filter)[i]); */
4885 }
a2fbb9ea 4886
49d66772
ET
4887 if (mode != BNX2X_RX_MODE_NONE)
4888 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4889}
4890
471de716
EG
4891static void bnx2x_init_internal_common(struct bnx2x *bp)
4892{
4893 int i;
4894
3cdf1db7
YG
4895 if (bp->flags & TPA_ENABLE_FLAG) {
4896 struct tstorm_eth_tpa_exist tpa = {0};
4897
4898 tpa.tpa_exist = 1;
4899
4900 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4901 ((u32 *)&tpa)[0]);
4902 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4903 ((u32 *)&tpa)[1]);
4904 }
4905
471de716
EG
4906 /* Zero this manually as its initialization is
4907 currently missing in the initTool */
4908 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4909 REG_WR(bp, BAR_USTRORM_INTMEM +
4910 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4911}
4912
4913static void bnx2x_init_internal_port(struct bnx2x *bp)
4914{
4915 int port = BP_PORT(bp);
4916
4917 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4918 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4919 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4920 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4921}
4922
8a1c38d1
EG
4923/* Calculates the sum of vn_min_rates.
4924 It's needed for further normalizing of the min_rates.
4925 Returns:
4926 sum of vn_min_rates.
4927 or
4928 0 - if all the min_rates are 0.
4929 In the later case fainess algorithm should be deactivated.
4930 If not all min_rates are zero then those that are zeroes will be set to 1.
4931 */
4932static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4933{
4934 int all_zero = 1;
4935 int port = BP_PORT(bp);
4936 int vn;
4937
4938 bp->vn_weight_sum = 0;
4939 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4940 int func = 2*vn + port;
4941 u32 vn_cfg =
4942 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4943 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4944 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4945
4946 /* Skip hidden vns */
4947 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4948 continue;
4949
4950 /* If min rate is zero - set it to 1 */
4951 if (!vn_min_rate)
4952 vn_min_rate = DEF_MIN_RATE;
4953 else
4954 all_zero = 0;
4955
4956 bp->vn_weight_sum += vn_min_rate;
4957 }
4958
4959 /* ... only if all min rates are zeros - disable fairness */
4960 if (all_zero)
4961 bp->vn_weight_sum = 0;
4962}
4963
471de716 4964static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4965{
a2fbb9ea
ET
4966 struct tstorm_eth_function_common_config tstorm_config = {0};
4967 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4968 int port = BP_PORT(bp);
4969 int func = BP_FUNC(bp);
de832a55
EG
4970 int i, j;
4971 u32 offset;
471de716 4972 u16 max_agg_size;
a2fbb9ea
ET
4973
4974 if (is_multi(bp)) {
555f6c78 4975 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4976 tstorm_config.rss_result_mask = MULTI_MASK;
4977 }
8d9c5f34
EG
4978 if (IS_E1HMF(bp))
4979 tstorm_config.config_flags |=
4980 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4981
34f80b04
EG
4982 tstorm_config.leading_client_id = BP_L_ID(bp);
4983
a2fbb9ea 4984 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4985 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4986 (*(u32 *)&tstorm_config));
4987
c14423fe 4988 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4989 bnx2x_set_storm_rx_mode(bp);
4990
de832a55
EG
4991 for_each_queue(bp, i) {
4992 u8 cl_id = bp->fp[i].cl_id;
4993
4994 /* reset xstorm per client statistics */
4995 offset = BAR_XSTRORM_INTMEM +
4996 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4997 for (j = 0;
4998 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4999 REG_WR(bp, offset + j*4, 0);
5000
5001 /* reset tstorm per client statistics */
5002 offset = BAR_TSTRORM_INTMEM +
5003 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5004 for (j = 0;
5005 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5006 REG_WR(bp, offset + j*4, 0);
5007
5008 /* reset ustorm per client statistics */
5009 offset = BAR_USTRORM_INTMEM +
5010 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5011 for (j = 0;
5012 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5013 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5014 }
5015
5016 /* Init statistics related context */
34f80b04 5017 stats_flags.collect_eth = 1;
a2fbb9ea 5018
66e855f3 5019 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5020 ((u32 *)&stats_flags)[0]);
66e855f3 5021 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5022 ((u32 *)&stats_flags)[1]);
5023
66e855f3 5024 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5025 ((u32 *)&stats_flags)[0]);
66e855f3 5026 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5027 ((u32 *)&stats_flags)[1]);
5028
de832a55
EG
5029 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5030 ((u32 *)&stats_flags)[0]);
5031 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5032 ((u32 *)&stats_flags)[1]);
5033
66e855f3 5034 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5035 ((u32 *)&stats_flags)[0]);
66e855f3 5036 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5037 ((u32 *)&stats_flags)[1]);
5038
66e855f3
YG
5039 REG_WR(bp, BAR_XSTRORM_INTMEM +
5040 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5041 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5042 REG_WR(bp, BAR_XSTRORM_INTMEM +
5043 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5044 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5045
5046 REG_WR(bp, BAR_TSTRORM_INTMEM +
5047 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5048 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5049 REG_WR(bp, BAR_TSTRORM_INTMEM +
5050 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5051 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5052
de832a55
EG
5053 REG_WR(bp, BAR_USTRORM_INTMEM +
5054 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5055 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5056 REG_WR(bp, BAR_USTRORM_INTMEM +
5057 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5058 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5059
34f80b04
EG
5060 if (CHIP_IS_E1H(bp)) {
5061 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5062 IS_E1HMF(bp));
5063 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5064 IS_E1HMF(bp));
5065 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5066 IS_E1HMF(bp));
5067 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5068 IS_E1HMF(bp));
5069
7a9b2557
VZ
5070 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5071 bp->e1hov);
34f80b04
EG
5072 }
5073
4f40f2cb
EG
5074 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5075 max_agg_size =
5076 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5077 SGE_PAGE_SIZE * PAGES_PER_SGE),
5078 (u32)0xffff);
555f6c78 5079 for_each_rx_queue(bp, i) {
7a9b2557 5080 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5081
5082 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5083 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5084 U64_LO(fp->rx_comp_mapping));
5085 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5086 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5087 U64_HI(fp->rx_comp_mapping));
5088
7a9b2557 5089 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5090 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5091 max_agg_size);
5092 }
8a1c38d1 5093
1c06328c
EG
5094 /* dropless flow control */
5095 if (CHIP_IS_E1H(bp)) {
5096 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5097
5098 rx_pause.bd_thr_low = 250;
5099 rx_pause.cqe_thr_low = 250;
5100 rx_pause.cos = 1;
5101 rx_pause.sge_thr_low = 0;
5102 rx_pause.bd_thr_high = 350;
5103 rx_pause.cqe_thr_high = 350;
5104 rx_pause.sge_thr_high = 0;
5105
5106 for_each_rx_queue(bp, i) {
5107 struct bnx2x_fastpath *fp = &bp->fp[i];
5108
5109 if (!fp->disable_tpa) {
5110 rx_pause.sge_thr_low = 150;
5111 rx_pause.sge_thr_high = 250;
5112 }
5113
5114
5115 offset = BAR_USTRORM_INTMEM +
5116 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5117 fp->cl_id);
5118 for (j = 0;
5119 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5120 j++)
5121 REG_WR(bp, offset + j*4,
5122 ((u32 *)&rx_pause)[j]);
5123 }
5124 }
5125
8a1c38d1
EG
5126 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5127
5128 /* Init rate shaping and fairness contexts */
5129 if (IS_E1HMF(bp)) {
5130 int vn;
5131
5132 /* During init there is no active link
5133 Until link is up, set link rate to 10Gbps */
5134 bp->link_vars.line_speed = SPEED_10000;
5135 bnx2x_init_port_minmax(bp);
5136
5137 bnx2x_calc_vn_weight_sum(bp);
5138
5139 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5140 bnx2x_init_vn_minmax(bp, 2*vn + port);
5141
5142 /* Enable rate shaping and fairness */
5143 bp->cmng.flags.cmng_enables =
5144 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5145 if (bp->vn_weight_sum)
5146 bp->cmng.flags.cmng_enables |=
5147 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5148 else
5149 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5150 " fairness will be disabled\n");
5151 } else {
5152 /* rate shaping and fairness are disabled */
5153 DP(NETIF_MSG_IFUP,
5154 "single function mode minmax will be disabled\n");
5155 }
5156
5157
5158 /* Store it to internal memory */
5159 if (bp->port.pmf)
5160 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5161 REG_WR(bp, BAR_XSTRORM_INTMEM +
5162 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5163 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5164}
5165
471de716
EG
5166static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5167{
5168 switch (load_code) {
5169 case FW_MSG_CODE_DRV_LOAD_COMMON:
5170 bnx2x_init_internal_common(bp);
5171 /* no break */
5172
5173 case FW_MSG_CODE_DRV_LOAD_PORT:
5174 bnx2x_init_internal_port(bp);
5175 /* no break */
5176
5177 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5178 bnx2x_init_internal_func(bp);
5179 break;
5180
5181 default:
5182 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5183 break;
5184 }
5185}
5186
5187static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5188{
5189 int i;
5190
5191 for_each_queue(bp, i) {
5192 struct bnx2x_fastpath *fp = &bp->fp[i];
5193
34f80b04 5194 fp->bp = bp;
a2fbb9ea 5195 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5196 fp->index = i;
34f80b04
EG
5197 fp->cl_id = BP_L_ID(bp) + i;
5198 fp->sb_id = fp->cl_id;
5199 DP(NETIF_MSG_IFUP,
f5372251
EG
5200 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5201 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5202 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5203 fp->sb_id);
5c862848 5204 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5205 }
5206
16119785
EG
5207 /* ensure status block indices were read */
5208 rmb();
5209
5210
5c862848
EG
5211 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5212 DEF_SB_ID);
5213 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5214 bnx2x_update_coalesce(bp);
5215 bnx2x_init_rx_rings(bp);
5216 bnx2x_init_tx_ring(bp);
5217 bnx2x_init_sp_ring(bp);
5218 bnx2x_init_context(bp);
471de716 5219 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5220 bnx2x_init_ind_table(bp);
0ef00459
EG
5221 bnx2x_stats_init(bp);
5222
5223 /* At this point, we are ready for interrupts */
5224 atomic_set(&bp->intr_sem, 0);
5225
5226 /* flush all before enabling interrupts */
5227 mb();
5228 mmiowb();
5229
615f8fd9 5230 bnx2x_int_enable(bp);
eb8da205
EG
5231
5232 /* Check for SPIO5 */
5233 bnx2x_attn_int_deasserted0(bp,
5234 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5235 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5236}
5237
5238/* end of nic init */
5239
5240/*
5241 * gzip service functions
5242 */
5243
5244static int bnx2x_gunzip_init(struct bnx2x *bp)
5245{
5246 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5247 &bp->gunzip_mapping);
5248 if (bp->gunzip_buf == NULL)
5249 goto gunzip_nomem1;
5250
5251 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5252 if (bp->strm == NULL)
5253 goto gunzip_nomem2;
5254
5255 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5256 GFP_KERNEL);
5257 if (bp->strm->workspace == NULL)
5258 goto gunzip_nomem3;
5259
5260 return 0;
5261
5262gunzip_nomem3:
5263 kfree(bp->strm);
5264 bp->strm = NULL;
5265
5266gunzip_nomem2:
5267 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5268 bp->gunzip_mapping);
5269 bp->gunzip_buf = NULL;
5270
5271gunzip_nomem1:
5272 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5273 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5274 return -ENOMEM;
5275}
5276
5277static void bnx2x_gunzip_end(struct bnx2x *bp)
5278{
5279 kfree(bp->strm->workspace);
5280
5281 kfree(bp->strm);
5282 bp->strm = NULL;
5283
5284 if (bp->gunzip_buf) {
5285 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5286 bp->gunzip_mapping);
5287 bp->gunzip_buf = NULL;
5288 }
5289}
5290
94a78b79 5291static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5292{
5293 int n, rc;
5294
5295 /* check gzip header */
94a78b79
VZ
5296 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5297 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5298 return -EINVAL;
94a78b79 5299 }
a2fbb9ea
ET
5300
5301 n = 10;
5302
34f80b04 5303#define FNAME 0x8
a2fbb9ea
ET
5304
5305 if (zbuf[3] & FNAME)
5306 while ((zbuf[n++] != 0) && (n < len));
5307
94a78b79 5308 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5309 bp->strm->avail_in = len - n;
5310 bp->strm->next_out = bp->gunzip_buf;
5311 bp->strm->avail_out = FW_BUF_SIZE;
5312
5313 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5314 if (rc != Z_OK)
5315 return rc;
5316
5317 rc = zlib_inflate(bp->strm, Z_FINISH);
5318 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5319 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5320 bp->dev->name, bp->strm->msg);
5321
5322 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5323 if (bp->gunzip_outlen & 0x3)
5324 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5325 " gunzip_outlen (%d) not aligned\n",
5326 bp->dev->name, bp->gunzip_outlen);
5327 bp->gunzip_outlen >>= 2;
5328
5329 zlib_inflateEnd(bp->strm);
5330
5331 if (rc == Z_STREAM_END)
5332 return 0;
5333
5334 return rc;
5335}
5336
5337/* nic load/unload */
5338
5339/*
34f80b04 5340 * General service functions
a2fbb9ea
ET
5341 */
5342
5343/* send a NIG loopback debug packet */
5344static void bnx2x_lb_pckt(struct bnx2x *bp)
5345{
a2fbb9ea 5346 u32 wb_write[3];
a2fbb9ea
ET
5347
5348 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5349 wb_write[0] = 0x55555555;
5350 wb_write[1] = 0x55555555;
34f80b04 5351 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5352 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5353
5354 /* NON-IP protocol */
a2fbb9ea
ET
5355 wb_write[0] = 0x09000000;
5356 wb_write[1] = 0x55555555;
34f80b04 5357 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5358 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5359}
5360
5361/* some of the internal memories
5362 * are not directly readable from the driver
5363 * to test them we send debug packets
5364 */
5365static int bnx2x_int_mem_test(struct bnx2x *bp)
5366{
5367 int factor;
5368 int count, i;
5369 u32 val = 0;
5370
ad8d3948 5371 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5372 factor = 120;
ad8d3948
EG
5373 else if (CHIP_REV_IS_EMUL(bp))
5374 factor = 200;
5375 else
a2fbb9ea 5376 factor = 1;
a2fbb9ea
ET
5377
5378 DP(NETIF_MSG_HW, "start part1\n");
5379
5380 /* Disable inputs of parser neighbor blocks */
5381 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5382 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5383 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5384 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5385
5386 /* Write 0 to parser credits for CFC search request */
5387 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5388
5389 /* send Ethernet packet */
5390 bnx2x_lb_pckt(bp);
5391
5392 /* TODO do i reset NIG statistic? */
5393 /* Wait until NIG register shows 1 packet of size 0x10 */
5394 count = 1000 * factor;
5395 while (count) {
34f80b04 5396
a2fbb9ea
ET
5397 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5398 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5399 if (val == 0x10)
5400 break;
5401
5402 msleep(10);
5403 count--;
5404 }
5405 if (val != 0x10) {
5406 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5407 return -1;
5408 }
5409
5410 /* Wait until PRS register shows 1 packet */
5411 count = 1000 * factor;
5412 while (count) {
5413 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5414 if (val == 1)
5415 break;
5416
5417 msleep(10);
5418 count--;
5419 }
5420 if (val != 0x1) {
5421 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5422 return -2;
5423 }
5424
5425 /* Reset and init BRB, PRS */
34f80b04 5426 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5427 msleep(50);
34f80b04 5428 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5429 msleep(50);
94a78b79
VZ
5430 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5431 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5432
5433 DP(NETIF_MSG_HW, "part2\n");
5434
5435 /* Disable inputs of parser neighbor blocks */
5436 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5437 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5438 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5439 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5440
5441 /* Write 0 to parser credits for CFC search request */
5442 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5443
5444 /* send 10 Ethernet packets */
5445 for (i = 0; i < 10; i++)
5446 bnx2x_lb_pckt(bp);
5447
5448 /* Wait until NIG register shows 10 + 1
5449 packets of size 11*0x10 = 0xb0 */
5450 count = 1000 * factor;
5451 while (count) {
34f80b04 5452
a2fbb9ea
ET
5453 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5454 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5455 if (val == 0xb0)
5456 break;
5457
5458 msleep(10);
5459 count--;
5460 }
5461 if (val != 0xb0) {
5462 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5463 return -3;
5464 }
5465
5466 /* Wait until PRS register shows 2 packets */
5467 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5468 if (val != 2)
5469 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5470
5471 /* Write 1 to parser credits for CFC search request */
5472 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5473
5474 /* Wait until PRS register shows 3 packets */
5475 msleep(10 * factor);
5476 /* Wait until NIG register shows 1 packet of size 0x10 */
5477 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5478 if (val != 3)
5479 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5480
5481 /* clear NIG EOP FIFO */
5482 for (i = 0; i < 11; i++)
5483 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5484 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5485 if (val != 1) {
5486 BNX2X_ERR("clear of NIG failed\n");
5487 return -4;
5488 }
5489
5490 /* Reset and init BRB, PRS, NIG */
5491 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5492 msleep(50);
5493 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5494 msleep(50);
94a78b79
VZ
5495 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5496 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5497#ifndef BCM_ISCSI
5498 /* set NIC mode */
5499 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5500#endif
5501
5502 /* Enable inputs of parser neighbor blocks */
5503 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5504 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5505 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5506 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5507
5508 DP(NETIF_MSG_HW, "done\n");
5509
5510 return 0; /* OK */
5511}
5512
5513static void enable_blocks_attention(struct bnx2x *bp)
5514{
5515 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5516 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5517 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5518 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5519 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5520 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5521 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5522 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5523 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5524/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5525/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5526 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5527 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5528 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5529/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5530/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5531 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5532 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5533 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5534 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5535/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5536/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5537 if (CHIP_REV_IS_FPGA(bp))
5538 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5539 else
5540 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5541 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5542 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5543 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5544/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5545/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5546 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5547 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5548/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5549 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5550}
5551
34f80b04 5552
81f75bbf
EG
5553static void bnx2x_reset_common(struct bnx2x *bp)
5554{
5555 /* reset_common */
5556 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5557 0xd3ffff7f);
5558 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5559}
5560
fd4ef40d
EG
5561
5562static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5563{
5564 u32 val;
5565 u8 port;
5566 u8 is_required = 0;
5567
5568 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5569 SHARED_HW_CFG_FAN_FAILURE_MASK;
5570
5571 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5572 is_required = 1;
5573
5574 /*
5575 * The fan failure mechanism is usually related to the PHY type since
5576 * the power consumption of the board is affected by the PHY. Currently,
5577 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5578 */
5579 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5580 for (port = PORT_0; port < PORT_MAX; port++) {
5581 u32 phy_type =
5582 SHMEM_RD(bp, dev_info.port_hw_config[port].
5583 external_phy_config) &
5584 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5585 is_required |=
5586 ((phy_type ==
5587 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5588 (phy_type ==
5589 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5590 (phy_type ==
5591 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5592 }
5593
5594 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5595
5596 if (is_required == 0)
5597 return;
5598
5599 /* Fan failure is indicated by SPIO 5 */
5600 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5601 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5602
5603 /* set to active low mode */
5604 val = REG_RD(bp, MISC_REG_SPIO_INT);
5605 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5606 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5607 REG_WR(bp, MISC_REG_SPIO_INT, val);
5608
5609 /* enable interrupt to signal the IGU */
5610 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5611 val |= (1 << MISC_REGISTERS_SPIO_5);
5612 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5613}
5614
34f80b04 5615static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5616{
a2fbb9ea 5617 u32 val, i;
a2fbb9ea 5618
34f80b04 5619 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5620
81f75bbf 5621 bnx2x_reset_common(bp);
34f80b04
EG
5622 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5624
94a78b79 5625 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5626 if (CHIP_IS_E1H(bp))
5627 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5628
34f80b04
EG
5629 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5630 msleep(30);
5631 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5632
94a78b79 5633 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5634 if (CHIP_IS_E1(bp)) {
5635 /* enable HW interrupt from PXP on USDM overflow
5636 bit 16 on INT_MASK_0 */
5637 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5638 }
a2fbb9ea 5639
94a78b79 5640 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5641 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5642
5643#ifdef __BIG_ENDIAN
34f80b04
EG
5644 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5645 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5646 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5647 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5648 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5649 /* make sure this value is 0 */
5650 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5651
5652/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5653 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5654 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5655 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5656 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5657#endif
5658
34f80b04 5659 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5660#ifdef BCM_ISCSI
34f80b04
EG
5661 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5662 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5663 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5664#endif
5665
34f80b04
EG
5666 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5667 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5668
34f80b04
EG
5669 /* let the HW do it's magic ... */
5670 msleep(100);
5671 /* finish PXP init */
5672 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5673 if (val != 1) {
5674 BNX2X_ERR("PXP2 CFG failed\n");
5675 return -EBUSY;
5676 }
5677 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5678 if (val != 1) {
5679 BNX2X_ERR("PXP2 RD_INIT failed\n");
5680 return -EBUSY;
5681 }
a2fbb9ea 5682
34f80b04
EG
5683 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5684 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5685
94a78b79 5686 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5687
34f80b04
EG
5688 /* clean the DMAE memory */
5689 bp->dmae_ready = 1;
5690 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5691
94a78b79
VZ
5692 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5693 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5694 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5695 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5696
34f80b04
EG
5697 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5698 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5699 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5700 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5701
94a78b79 5702 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
5703 /* soft reset pulse */
5704 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5705 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5706
5707#ifdef BCM_ISCSI
94a78b79 5708 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5709#endif
a2fbb9ea 5710
94a78b79 5711 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5712 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5713 if (!CHIP_REV_IS_SLOW(bp)) {
5714 /* enable hw interrupt from doorbell Q */
5715 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5716 }
a2fbb9ea 5717
94a78b79
VZ
5718 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5719 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5720 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5721 /* set NIC mode */
5722 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5723 if (CHIP_IS_E1H(bp))
5724 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5725
94a78b79
VZ
5726 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5727 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5728 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5729 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5730
490c3c9b
EG
5731 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5732 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5733 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5734 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5735
94a78b79
VZ
5736 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5737 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5738 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5739 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5740
34f80b04
EG
5741 /* sync semi rtc */
5742 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5743 0x80000000);
5744 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5745 0x80000000);
a2fbb9ea 5746
94a78b79
VZ
5747 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5748 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5749 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5750
34f80b04
EG
5751 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5752 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5753 REG_WR(bp, i, 0xc0cac01a);
5754 /* TODO: replace with something meaningful */
5755 }
94a78b79 5756 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 5757 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5758
34f80b04
EG
5759 if (sizeof(union cdu_context) != 1024)
5760 /* we currently assume that a context is 1024 bytes */
5761 printk(KERN_ALERT PFX "please adjust the size of"
5762 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5763
94a78b79 5764 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5765 val = (4 << 24) + (0 << 12) + 1024;
5766 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5767 if (CHIP_IS_E1(bp)) {
5768 /* !!! fix pxp client crdit until excel update */
5769 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5770 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5771 }
a2fbb9ea 5772
94a78b79 5773 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5774 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5775 /* enable context validation interrupt from CFC */
5776 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5777
5778 /* set the thresholds to prevent CFC/CDU race */
5779 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5780
94a78b79
VZ
5781 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5782 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5783
34f80b04 5784 /* PXPCS COMMON comes here */
94a78b79 5785 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5786 /* Reset PCIE errors for debug */
5787 REG_WR(bp, 0x2814, 0xffffffff);
5788 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5789
34f80b04 5790 /* EMAC0 COMMON comes here */
94a78b79 5791 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
34f80b04 5792 /* EMAC1 COMMON comes here */
94a78b79 5793 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
34f80b04 5794 /* DBU COMMON comes here */
94a78b79 5795 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
34f80b04 5796 /* DBG COMMON comes here */
94a78b79 5797 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5798
94a78b79 5799 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
5800 if (CHIP_IS_E1H(bp)) {
5801 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5802 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5803 }
5804
5805 if (CHIP_REV_IS_SLOW(bp))
5806 msleep(200);
5807
5808 /* finish CFC init */
5809 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5810 if (val != 1) {
5811 BNX2X_ERR("CFC LL_INIT failed\n");
5812 return -EBUSY;
5813 }
5814 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5815 if (val != 1) {
5816 BNX2X_ERR("CFC AC_INIT failed\n");
5817 return -EBUSY;
5818 }
5819 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5820 if (val != 1) {
5821 BNX2X_ERR("CFC CAM_INIT failed\n");
5822 return -EBUSY;
5823 }
5824 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5825
34f80b04
EG
5826 /* read NIG statistic
5827 to see if this is our first up since powerup */
5828 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5829 val = *bnx2x_sp(bp, wb_data[0]);
5830
5831 /* do internal memory self test */
5832 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5833 BNX2X_ERR("internal mem self test failed\n");
5834 return -EBUSY;
5835 }
5836
35b19ba5 5837 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5838 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5839 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5840 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 5841 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
5842 bp->port.need_hw_lock = 1;
5843 break;
5844
34f80b04
EG
5845 default:
5846 break;
5847 }
f1410647 5848
fd4ef40d
EG
5849 bnx2x_setup_fan_failure_detection(bp);
5850
34f80b04
EG
5851 /* clear PXP2 attentions */
5852 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5853
34f80b04 5854 enable_blocks_attention(bp);
a2fbb9ea 5855
6bbca910
YR
5856 if (!BP_NOMCP(bp)) {
5857 bnx2x_acquire_phy_lock(bp);
5858 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5859 bnx2x_release_phy_lock(bp);
5860 } else
5861 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5862
34f80b04
EG
5863 return 0;
5864}
a2fbb9ea 5865
34f80b04
EG
5866static int bnx2x_init_port(struct bnx2x *bp)
5867{
5868 int port = BP_PORT(bp);
94a78b79 5869 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5870 u32 low, high;
34f80b04 5871 u32 val;
a2fbb9ea 5872
34f80b04
EG
5873 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5874
5875 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5876
5877 /* Port PXP comes here */
94a78b79 5878 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
a2fbb9ea 5879 /* Port PXP2 comes here */
94a78b79 5880 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
a2fbb9ea
ET
5881#ifdef BCM_ISCSI
5882 /* Port0 1
5883 * Port1 385 */
5884 i++;
5885 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5886 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5887 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5888 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5889
5890 /* Port0 2
5891 * Port1 386 */
5892 i++;
5893 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5894 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5895 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5896 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5897
5898 /* Port0 3
5899 * Port1 387 */
5900 i++;
5901 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5902 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5903 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5904 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5905#endif
34f80b04 5906 /* Port CMs come here */
94a78b79 5907 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea
ET
5908
5909 /* Port QM comes here */
a2fbb9ea
ET
5910#ifdef BCM_ISCSI
5911 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5912 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5913
94a78b79 5914 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea
ET
5915#endif
5916 /* Port DQ comes here */
94a78b79 5917 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5918
94a78b79 5919 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
5920 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5921 /* no pause for emulation and FPGA */
5922 low = 0;
5923 high = 513;
5924 } else {
5925 if (IS_E1HMF(bp))
5926 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5927 else if (bp->dev->mtu > 4096) {
5928 if (bp->flags & ONE_PORT_FLAG)
5929 low = 160;
5930 else {
5931 val = bp->dev->mtu;
5932 /* (24*1024 + val*4)/256 */
5933 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5934 }
5935 } else
5936 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5937 high = low + 56; /* 14*1024/256 */
5938 }
5939 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5940 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5941
5942
ad8d3948 5943 /* Port PRS comes here */
94a78b79 5944 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
a2fbb9ea 5945 /* Port TSDM comes here */
94a78b79 5946 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
a2fbb9ea 5947 /* Port CSDM comes here */
94a78b79 5948 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
a2fbb9ea 5949 /* Port USDM comes here */
94a78b79 5950 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
a2fbb9ea 5951 /* Port XSDM comes here */
94a78b79 5952 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5953
94a78b79
VZ
5954 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5955 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5956 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5957 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 5958
a2fbb9ea 5959 /* Port UPB comes here */
94a78b79 5960 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
34f80b04 5961 /* Port XPB comes here */
94a78b79 5962 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5963
94a78b79 5964 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
5965
5966 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5967 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5968
5969 /* update threshold */
34f80b04 5970 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5971 /* update init credit */
34f80b04 5972 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5973
5974 /* probe changes */
34f80b04 5975 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5976 msleep(5);
34f80b04 5977 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5978
5979#ifdef BCM_ISCSI
5980 /* tell the searcher where the T2 table is */
5981 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5982
5983 wb_write[0] = U64_LO(bp->t2_mapping);
5984 wb_write[1] = U64_HI(bp->t2_mapping);
5985 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5986 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5987 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5988 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5989
5990 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5991 /* Port SRCH comes here */
5992#endif
5993 /* Port CDU comes here */
94a78b79 5994 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
a2fbb9ea 5995 /* Port CFC comes here */
94a78b79 5996 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5997
5998 if (CHIP_IS_E1(bp)) {
5999 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6000 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6001 }
94a78b79 6002 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6003
94a78b79 6004 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6005 /* init aeu_mask_attn_func_0/1:
6006 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6007 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6008 * bits 4-7 are used for "per vn group attention" */
6009 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6010 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6011
a2fbb9ea 6012 /* Port PXPCS comes here */
94a78b79 6013 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
a2fbb9ea 6014 /* Port EMAC0 comes here */
94a78b79 6015 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
a2fbb9ea 6016 /* Port EMAC1 comes here */
94a78b79 6017 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
a2fbb9ea 6018 /* Port DBU comes here */
94a78b79 6019 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
a2fbb9ea 6020 /* Port DBG comes here */
94a78b79 6021 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6022
94a78b79 6023 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6024
6025 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6026
6027 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6028 /* 0x2 disable e1hov, 0x1 enable */
6029 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6030 (IS_E1HMF(bp) ? 0x1 : 0x2));
6031
1c06328c
EG
6032 /* support pause requests from USDM, TSDM and BRB */
6033 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6034
6035 {
6036 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6037 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6038 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6039 }
34f80b04
EG
6040 }
6041
a2fbb9ea 6042 /* Port MCP comes here */
94a78b79 6043 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
a2fbb9ea 6044 /* Port DMAE comes here */
94a78b79 6045 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6046
35b19ba5 6047 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6048 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6049 {
6050 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6051
6052 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6053 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6054
6055 /* The GPIO should be swapped if the swap register is
6056 set and active */
6057 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6058 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6059
6060 /* Select function upon port-swap configuration */
6061 if (port == 0) {
6062 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6063 aeu_gpio_mask = (swap_val && swap_override) ?
6064 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6065 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6066 } else {
6067 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6068 aeu_gpio_mask = (swap_val && swap_override) ?
6069 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6070 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6071 }
6072 val = REG_RD(bp, offset);
6073 /* add GPIO3 to group */
6074 val |= aeu_gpio_mask;
6075 REG_WR(bp, offset, val);
6076 }
6077 break;
6078
35b19ba5 6079 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6080 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6081 /* add SPIO 5 to group 0 */
4d295db0
EG
6082 {
6083 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6084 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6085 val = REG_RD(bp, reg_addr);
f1410647 6086 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6087 REG_WR(bp, reg_addr, val);
6088 }
f1410647
ET
6089 break;
6090
6091 default:
6092 break;
6093 }
6094
c18487ee 6095 bnx2x__link_reset(bp);
a2fbb9ea 6096
34f80b04
EG
6097 return 0;
6098}
6099
6100#define ILT_PER_FUNC (768/2)
6101#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6102/* the phys address is shifted right 12 bits and has an added
6103 1=valid bit added to the 53rd bit
6104 then since this is a wide register(TM)
6105 we split it into two 32 bit writes
6106 */
6107#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6108#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6109#define PXP_ONE_ILT(x) (((x) << 10) | x)
6110#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6111
6112#define CNIC_ILT_LINES 0
6113
6114static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6115{
6116 int reg;
6117
6118 if (CHIP_IS_E1H(bp))
6119 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6120 else /* E1 */
6121 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6122
6123 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6124}
6125
6126static int bnx2x_init_func(struct bnx2x *bp)
6127{
6128 int port = BP_PORT(bp);
6129 int func = BP_FUNC(bp);
8badd27a 6130 u32 addr, val;
34f80b04
EG
6131 int i;
6132
6133 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6134
8badd27a
EG
6135 /* set MSI reconfigure capability */
6136 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6137 val = REG_RD(bp, addr);
6138 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6139 REG_WR(bp, addr, val);
6140
34f80b04
EG
6141 i = FUNC_ILT_BASE(func);
6142
6143 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6144 if (CHIP_IS_E1H(bp)) {
6145 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6146 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6147 } else /* E1 */
6148 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6149 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6150
6151
6152 if (CHIP_IS_E1H(bp)) {
6153 for (i = 0; i < 9; i++)
6154 bnx2x_init_block(bp,
94a78b79 6155 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6156
6157 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6158 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6159 }
6160
6161 /* HC init per function */
6162 if (CHIP_IS_E1H(bp)) {
6163 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6164
6165 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6166 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6167 }
94a78b79 6168 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6169
c14423fe 6170 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6171 REG_WR(bp, 0x2114, 0xffffffff);
6172 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6173
34f80b04
EG
6174 return 0;
6175}
6176
6177static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6178{
6179 int i, rc = 0;
a2fbb9ea 6180
34f80b04
EG
6181 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6182 BP_FUNC(bp), load_code);
a2fbb9ea 6183
34f80b04
EG
6184 bp->dmae_ready = 0;
6185 mutex_init(&bp->dmae_mutex);
6186 bnx2x_gunzip_init(bp);
a2fbb9ea 6187
34f80b04
EG
6188 switch (load_code) {
6189 case FW_MSG_CODE_DRV_LOAD_COMMON:
6190 rc = bnx2x_init_common(bp);
6191 if (rc)
6192 goto init_hw_err;
6193 /* no break */
6194
6195 case FW_MSG_CODE_DRV_LOAD_PORT:
6196 bp->dmae_ready = 1;
6197 rc = bnx2x_init_port(bp);
6198 if (rc)
6199 goto init_hw_err;
6200 /* no break */
6201
6202 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6203 bp->dmae_ready = 1;
6204 rc = bnx2x_init_func(bp);
6205 if (rc)
6206 goto init_hw_err;
6207 break;
6208
6209 default:
6210 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6211 break;
6212 }
6213
6214 if (!BP_NOMCP(bp)) {
6215 int func = BP_FUNC(bp);
a2fbb9ea
ET
6216
6217 bp->fw_drv_pulse_wr_seq =
34f80b04 6218 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6219 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6220 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6221 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6222 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6223 } else
6224 bp->func_stx = 0;
a2fbb9ea 6225
34f80b04
EG
6226 /* this needs to be done before gunzip end */
6227 bnx2x_zero_def_sb(bp);
6228 for_each_queue(bp, i)
6229 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6230
6231init_hw_err:
6232 bnx2x_gunzip_end(bp);
6233
6234 return rc;
a2fbb9ea
ET
6235}
6236
c14423fe 6237/* send the MCP a request, block until there is a reply */
4d295db0 6238u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
a2fbb9ea 6239{
34f80b04 6240 int func = BP_FUNC(bp);
f1410647
ET
6241 u32 seq = ++bp->fw_seq;
6242 u32 rc = 0;
19680c48
EG
6243 u32 cnt = 1;
6244 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6245
34f80b04 6246 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6247 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6248
19680c48
EG
6249 do {
6250 /* let the FW do it's magic ... */
6251 msleep(delay);
a2fbb9ea 6252
19680c48 6253 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6254
19680c48
EG
6255 /* Give the FW up to 2 second (200*10ms) */
6256 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6257
6258 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6259 cnt*delay, rc, seq);
a2fbb9ea
ET
6260
6261 /* is this a reply to our command? */
6262 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6263 rc &= FW_MSG_CODE_MASK;
f1410647 6264
a2fbb9ea
ET
6265 } else {
6266 /* FW BUG! */
6267 BNX2X_ERR("FW failed to respond!\n");
6268 bnx2x_fw_dump(bp);
6269 rc = 0;
6270 }
f1410647 6271
a2fbb9ea
ET
6272 return rc;
6273}
6274
6275static void bnx2x_free_mem(struct bnx2x *bp)
6276{
6277
6278#define BNX2X_PCI_FREE(x, y, size) \
6279 do { \
6280 if (x) { \
6281 pci_free_consistent(bp->pdev, size, x, y); \
6282 x = NULL; \
6283 y = 0; \
6284 } \
6285 } while (0)
6286
6287#define BNX2X_FREE(x) \
6288 do { \
6289 if (x) { \
6290 vfree(x); \
6291 x = NULL; \
6292 } \
6293 } while (0)
6294
6295 int i;
6296
6297 /* fastpath */
555f6c78 6298 /* Common */
a2fbb9ea
ET
6299 for_each_queue(bp, i) {
6300
555f6c78 6301 /* status blocks */
a2fbb9ea
ET
6302 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6303 bnx2x_fp(bp, i, status_blk_mapping),
6304 sizeof(struct host_status_block) +
6305 sizeof(struct eth_tx_db_data));
555f6c78
EG
6306 }
6307 /* Rx */
6308 for_each_rx_queue(bp, i) {
a2fbb9ea 6309
555f6c78 6310 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6311 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6312 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6313 bnx2x_fp(bp, i, rx_desc_mapping),
6314 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6315
6316 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6317 bnx2x_fp(bp, i, rx_comp_mapping),
6318 sizeof(struct eth_fast_path_rx_cqe) *
6319 NUM_RCQ_BD);
a2fbb9ea 6320
7a9b2557 6321 /* SGE ring */
32626230 6322 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6323 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6324 bnx2x_fp(bp, i, rx_sge_mapping),
6325 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6326 }
555f6c78
EG
6327 /* Tx */
6328 for_each_tx_queue(bp, i) {
6329
6330 /* fastpath tx rings: tx_buf tx_desc */
6331 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6332 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6333 bnx2x_fp(bp, i, tx_desc_mapping),
6334 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6335 }
a2fbb9ea
ET
6336 /* end of fastpath */
6337
6338 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6339 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6340
6341 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6342 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6343
6344#ifdef BCM_ISCSI
6345 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6346 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6347 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6348 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6349#endif
7a9b2557 6350 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6351
6352#undef BNX2X_PCI_FREE
6353#undef BNX2X_KFREE
6354}
6355
6356static int bnx2x_alloc_mem(struct bnx2x *bp)
6357{
6358
6359#define BNX2X_PCI_ALLOC(x, y, size) \
6360 do { \
6361 x = pci_alloc_consistent(bp->pdev, size, y); \
6362 if (x == NULL) \
6363 goto alloc_mem_err; \
6364 memset(x, 0, size); \
6365 } while (0)
6366
6367#define BNX2X_ALLOC(x, size) \
6368 do { \
6369 x = vmalloc(size); \
6370 if (x == NULL) \
6371 goto alloc_mem_err; \
6372 memset(x, 0, size); \
6373 } while (0)
6374
6375 int i;
6376
6377 /* fastpath */
555f6c78 6378 /* Common */
a2fbb9ea
ET
6379 for_each_queue(bp, i) {
6380 bnx2x_fp(bp, i, bp) = bp;
6381
555f6c78 6382 /* status blocks */
a2fbb9ea
ET
6383 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6384 &bnx2x_fp(bp, i, status_blk_mapping),
6385 sizeof(struct host_status_block) +
6386 sizeof(struct eth_tx_db_data));
555f6c78
EG
6387 }
6388 /* Rx */
6389 for_each_rx_queue(bp, i) {
a2fbb9ea 6390
555f6c78 6391 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6392 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6393 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6394 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6395 &bnx2x_fp(bp, i, rx_desc_mapping),
6396 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6397
6398 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6399 &bnx2x_fp(bp, i, rx_comp_mapping),
6400 sizeof(struct eth_fast_path_rx_cqe) *
6401 NUM_RCQ_BD);
6402
7a9b2557
VZ
6403 /* SGE ring */
6404 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6405 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6406 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6407 &bnx2x_fp(bp, i, rx_sge_mapping),
6408 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6409 }
555f6c78
EG
6410 /* Tx */
6411 for_each_tx_queue(bp, i) {
6412
6413 bnx2x_fp(bp, i, hw_tx_prods) =
6414 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6415
6416 bnx2x_fp(bp, i, tx_prods_mapping) =
6417 bnx2x_fp(bp, i, status_blk_mapping) +
6418 sizeof(struct host_status_block);
6419
6420 /* fastpath tx rings: tx_buf tx_desc */
6421 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6422 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6423 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6424 &bnx2x_fp(bp, i, tx_desc_mapping),
6425 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6426 }
a2fbb9ea
ET
6427 /* end of fastpath */
6428
6429 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6430 sizeof(struct host_def_status_block));
6431
6432 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6433 sizeof(struct bnx2x_slowpath));
6434
6435#ifdef BCM_ISCSI
6436 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6437
6438 /* Initialize T1 */
6439 for (i = 0; i < 64*1024; i += 64) {
6440 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6441 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6442 }
6443
6444 /* allocate searcher T2 table
6445 we allocate 1/4 of alloc num for T2
6446 (which is not entered into the ILT) */
6447 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6448
6449 /* Initialize T2 */
6450 for (i = 0; i < 16*1024; i += 64)
6451 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6452
c14423fe 6453 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6454 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6455
6456 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6457 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6458
6459 /* QM queues (128*MAX_CONN) */
6460 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6461#endif
6462
6463 /* Slow path ring */
6464 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6465
6466 return 0;
6467
6468alloc_mem_err:
6469 bnx2x_free_mem(bp);
6470 return -ENOMEM;
6471
6472#undef BNX2X_PCI_ALLOC
6473#undef BNX2X_ALLOC
6474}
6475
6476static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6477{
6478 int i;
6479
555f6c78 6480 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6481 struct bnx2x_fastpath *fp = &bp->fp[i];
6482
6483 u16 bd_cons = fp->tx_bd_cons;
6484 u16 sw_prod = fp->tx_pkt_prod;
6485 u16 sw_cons = fp->tx_pkt_cons;
6486
a2fbb9ea
ET
6487 while (sw_cons != sw_prod) {
6488 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6489 sw_cons++;
6490 }
6491 }
6492}
6493
6494static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6495{
6496 int i, j;
6497
555f6c78 6498 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6499 struct bnx2x_fastpath *fp = &bp->fp[j];
6500
a2fbb9ea
ET
6501 for (i = 0; i < NUM_RX_BD; i++) {
6502 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6503 struct sk_buff *skb = rx_buf->skb;
6504
6505 if (skb == NULL)
6506 continue;
6507
6508 pci_unmap_single(bp->pdev,
6509 pci_unmap_addr(rx_buf, mapping),
356e2385 6510 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6511
6512 rx_buf->skb = NULL;
6513 dev_kfree_skb(skb);
6514 }
7a9b2557 6515 if (!fp->disable_tpa)
32626230
EG
6516 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6517 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6518 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6519 }
6520}
6521
6522static void bnx2x_free_skbs(struct bnx2x *bp)
6523{
6524 bnx2x_free_tx_skbs(bp);
6525 bnx2x_free_rx_skbs(bp);
6526}
6527
6528static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6529{
34f80b04 6530 int i, offset = 1;
a2fbb9ea
ET
6531
6532 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6533 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6534 bp->msix_table[0].vector);
6535
6536 for_each_queue(bp, i) {
c14423fe 6537 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6538 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6539 bnx2x_fp(bp, i, state));
6540
34f80b04 6541 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6542 }
a2fbb9ea
ET
6543}
6544
6545static void bnx2x_free_irq(struct bnx2x *bp)
6546{
a2fbb9ea 6547 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6548 bnx2x_free_msix_irqs(bp);
6549 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6550 bp->flags &= ~USING_MSIX_FLAG;
6551
8badd27a
EG
6552 } else if (bp->flags & USING_MSI_FLAG) {
6553 free_irq(bp->pdev->irq, bp->dev);
6554 pci_disable_msi(bp->pdev);
6555 bp->flags &= ~USING_MSI_FLAG;
6556
a2fbb9ea
ET
6557 } else
6558 free_irq(bp->pdev->irq, bp->dev);
6559}
6560
6561static int bnx2x_enable_msix(struct bnx2x *bp)
6562{
8badd27a
EG
6563 int i, rc, offset = 1;
6564 int igu_vec = 0;
a2fbb9ea 6565
8badd27a
EG
6566 bp->msix_table[0].entry = igu_vec;
6567 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6568
34f80b04 6569 for_each_queue(bp, i) {
8badd27a 6570 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6571 bp->msix_table[i + offset].entry = igu_vec;
6572 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6573 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6574 }
6575
34f80b04 6576 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6577 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6578 if (rc) {
8badd27a
EG
6579 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6580 return rc;
34f80b04 6581 }
8badd27a 6582
a2fbb9ea
ET
6583 bp->flags |= USING_MSIX_FLAG;
6584
6585 return 0;
a2fbb9ea
ET
6586}
6587
a2fbb9ea
ET
6588static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6589{
34f80b04 6590 int i, rc, offset = 1;
a2fbb9ea 6591
a2fbb9ea
ET
6592 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6593 bp->dev->name, bp->dev);
a2fbb9ea
ET
6594 if (rc) {
6595 BNX2X_ERR("request sp irq failed\n");
6596 return -EBUSY;
6597 }
6598
6599 for_each_queue(bp, i) {
555f6c78
EG
6600 struct bnx2x_fastpath *fp = &bp->fp[i];
6601
6602 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6603 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6604 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6605 if (rc) {
555f6c78 6606 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6607 bnx2x_free_msix_irqs(bp);
6608 return -EBUSY;
6609 }
6610
555f6c78 6611 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6612 }
6613
555f6c78
EG
6614 i = BNX2X_NUM_QUEUES(bp);
6615 if (is_multi(bp))
6616 printk(KERN_INFO PFX
6617 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6618 bp->dev->name, bp->msix_table[0].vector,
6619 bp->msix_table[offset].vector,
6620 bp->msix_table[offset + i - 1].vector);
6621 else
6622 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6623 bp->dev->name, bp->msix_table[0].vector,
6624 bp->msix_table[offset + i - 1].vector);
6625
a2fbb9ea 6626 return 0;
a2fbb9ea
ET
6627}
6628
8badd27a
EG
6629static int bnx2x_enable_msi(struct bnx2x *bp)
6630{
6631 int rc;
6632
6633 rc = pci_enable_msi(bp->pdev);
6634 if (rc) {
6635 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6636 return -1;
6637 }
6638 bp->flags |= USING_MSI_FLAG;
6639
6640 return 0;
6641}
6642
a2fbb9ea
ET
6643static int bnx2x_req_irq(struct bnx2x *bp)
6644{
8badd27a 6645 unsigned long flags;
34f80b04 6646 int rc;
a2fbb9ea 6647
8badd27a
EG
6648 if (bp->flags & USING_MSI_FLAG)
6649 flags = 0;
6650 else
6651 flags = IRQF_SHARED;
6652
6653 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6654 bp->dev->name, bp->dev);
a2fbb9ea
ET
6655 if (!rc)
6656 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6657
6658 return rc;
a2fbb9ea
ET
6659}
6660
65abd74d
YG
6661static void bnx2x_napi_enable(struct bnx2x *bp)
6662{
6663 int i;
6664
555f6c78 6665 for_each_rx_queue(bp, i)
65abd74d
YG
6666 napi_enable(&bnx2x_fp(bp, i, napi));
6667}
6668
6669static void bnx2x_napi_disable(struct bnx2x *bp)
6670{
6671 int i;
6672
555f6c78 6673 for_each_rx_queue(bp, i)
65abd74d
YG
6674 napi_disable(&bnx2x_fp(bp, i, napi));
6675}
6676
6677static void bnx2x_netif_start(struct bnx2x *bp)
6678{
e1510706
EG
6679 int intr_sem;
6680
6681 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6682 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6683
6684 if (intr_sem) {
65abd74d 6685 if (netif_running(bp->dev)) {
65abd74d
YG
6686 bnx2x_napi_enable(bp);
6687 bnx2x_int_enable(bp);
555f6c78
EG
6688 if (bp->state == BNX2X_STATE_OPEN)
6689 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6690 }
6691 }
6692}
6693
f8ef6e44 6694static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6695{
f8ef6e44 6696 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6697 bnx2x_napi_disable(bp);
762d5f6c
EG
6698 netif_tx_disable(bp->dev);
6699 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6700}
6701
a2fbb9ea
ET
6702/*
6703 * Init service functions
6704 */
6705
3101c2bc 6706static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6707{
6708 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6709 int port = BP_PORT(bp);
a2fbb9ea
ET
6710
6711 /* CAM allocation
6712 * unicasts 0-31:port0 32-63:port1
6713 * multicast 64-127:port0 128-191:port1
6714 */
8d9c5f34 6715 config->hdr.length = 2;
af246401 6716 config->hdr.offset = port ? 32 : 0;
0626b899 6717 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6718 config->hdr.reserved1 = 0;
6719
6720 /* primary MAC */
6721 config->config_table[0].cam_entry.msb_mac_addr =
6722 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6723 config->config_table[0].cam_entry.middle_mac_addr =
6724 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6725 config->config_table[0].cam_entry.lsb_mac_addr =
6726 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6727 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6728 if (set)
6729 config->config_table[0].target_table_entry.flags = 0;
6730 else
6731 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6732 config->config_table[0].target_table_entry.client_id = 0;
6733 config->config_table[0].target_table_entry.vlan_id = 0;
6734
3101c2bc
YG
6735 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6736 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6737 config->config_table[0].cam_entry.msb_mac_addr,
6738 config->config_table[0].cam_entry.middle_mac_addr,
6739 config->config_table[0].cam_entry.lsb_mac_addr);
6740
6741 /* broadcast */
4781bfad
EG
6742 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6743 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6744 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6745 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6746 if (set)
6747 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6748 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6749 else
6750 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6751 config->config_table[1].target_table_entry.client_id = 0;
6752 config->config_table[1].target_table_entry.vlan_id = 0;
6753
6754 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6755 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6756 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6757}
6758
3101c2bc 6759static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6760{
6761 struct mac_configuration_cmd_e1h *config =
6762 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6763
3101c2bc 6764 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6765 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6766 return;
6767 }
6768
6769 /* CAM allocation for E1H
6770 * unicasts: by func number
6771 * multicast: 20+FUNC*20, 20 each
6772 */
8d9c5f34 6773 config->hdr.length = 1;
34f80b04 6774 config->hdr.offset = BP_FUNC(bp);
0626b899 6775 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6776 config->hdr.reserved1 = 0;
6777
6778 /* primary MAC */
6779 config->config_table[0].msb_mac_addr =
6780 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6781 config->config_table[0].middle_mac_addr =
6782 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6783 config->config_table[0].lsb_mac_addr =
6784 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6785 config->config_table[0].client_id = BP_L_ID(bp);
6786 config->config_table[0].vlan_id = 0;
6787 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6788 if (set)
6789 config->config_table[0].flags = BP_PORT(bp);
6790 else
6791 config->config_table[0].flags =
6792 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6793
3101c2bc
YG
6794 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6795 (set ? "setting" : "clearing"),
34f80b04
EG
6796 config->config_table[0].msb_mac_addr,
6797 config->config_table[0].middle_mac_addr,
6798 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6799
6800 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6801 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6802 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6803}
6804
a2fbb9ea
ET
6805static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6806 int *state_p, int poll)
6807{
6808 /* can take a while if any port is running */
8b3a0f0b 6809 int cnt = 5000;
a2fbb9ea 6810
c14423fe
ET
6811 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6812 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6813
6814 might_sleep();
34f80b04 6815 while (cnt--) {
a2fbb9ea
ET
6816 if (poll) {
6817 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6818 /* if index is different from 0
6819 * the reply for some commands will
3101c2bc 6820 * be on the non default queue
a2fbb9ea
ET
6821 */
6822 if (idx)
6823 bnx2x_rx_int(&bp->fp[idx], 10);
6824 }
a2fbb9ea 6825
3101c2bc 6826 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6827 if (*state_p == state) {
6828#ifdef BNX2X_STOP_ON_ERROR
6829 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6830#endif
a2fbb9ea 6831 return 0;
8b3a0f0b 6832 }
a2fbb9ea 6833
a2fbb9ea 6834 msleep(1);
a2fbb9ea
ET
6835 }
6836
a2fbb9ea 6837 /* timeout! */
49d66772
ET
6838 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6839 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6840#ifdef BNX2X_STOP_ON_ERROR
6841 bnx2x_panic();
6842#endif
a2fbb9ea 6843
49d66772 6844 return -EBUSY;
a2fbb9ea
ET
6845}
6846
6847static int bnx2x_setup_leading(struct bnx2x *bp)
6848{
34f80b04 6849 int rc;
a2fbb9ea 6850
c14423fe 6851 /* reset IGU state */
34f80b04 6852 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6853
6854 /* SETUP ramrod */
6855 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6856
34f80b04
EG
6857 /* Wait for completion */
6858 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6859
34f80b04 6860 return rc;
a2fbb9ea
ET
6861}
6862
6863static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6864{
555f6c78
EG
6865 struct bnx2x_fastpath *fp = &bp->fp[index];
6866
a2fbb9ea 6867 /* reset IGU state */
555f6c78 6868 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6869
228241eb 6870 /* SETUP ramrod */
555f6c78
EG
6871 fp->state = BNX2X_FP_STATE_OPENING;
6872 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6873 fp->cl_id, 0);
a2fbb9ea
ET
6874
6875 /* Wait for completion */
6876 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6877 &(fp->state), 0);
a2fbb9ea
ET
6878}
6879
a2fbb9ea 6880static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6881
8badd27a 6882static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6883{
555f6c78 6884 int num_queues;
a2fbb9ea 6885
8badd27a
EG
6886 switch (int_mode) {
6887 case INT_MODE_INTx:
6888 case INT_MODE_MSI:
555f6c78
EG
6889 num_queues = 1;
6890 bp->num_rx_queues = num_queues;
6891 bp->num_tx_queues = num_queues;
6892 DP(NETIF_MSG_IFUP,
6893 "set number of queues to %d\n", num_queues);
8badd27a
EG
6894 break;
6895
6896 case INT_MODE_MSIX:
6897 default:
555f6c78
EG
6898 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6899 num_queues = min_t(u32, num_online_cpus(),
6900 BNX2X_MAX_QUEUES(bp));
34f80b04 6901 else
555f6c78
EG
6902 num_queues = 1;
6903 bp->num_rx_queues = num_queues;
6904 bp->num_tx_queues = num_queues;
6905 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6906 " number of tx queues to %d\n",
6907 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6908 /* if we can't use MSI-X we only need one fp,
6909 * so try to enable MSI-X with the requested number of fp's
6910 * and fallback to MSI or legacy INTx with one fp
6911 */
8badd27a 6912 if (bnx2x_enable_msix(bp)) {
34f80b04 6913 /* failed to enable MSI-X */
555f6c78
EG
6914 num_queues = 1;
6915 bp->num_rx_queues = num_queues;
6916 bp->num_tx_queues = num_queues;
6917 if (bp->multi_mode)
6918 BNX2X_ERR("Multi requested but failed to "
6919 "enable MSI-X set number of "
6920 "queues to %d\n", num_queues);
a2fbb9ea 6921 }
8badd27a 6922 break;
a2fbb9ea 6923 }
555f6c78 6924 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6925}
6926
6927static void bnx2x_set_rx_mode(struct net_device *dev);
6928
6929/* must be called with rtnl_lock */
6930static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6931{
6932 u32 load_code;
6933 int i, rc = 0;
6934#ifdef BNX2X_STOP_ON_ERROR
6935 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6936 if (unlikely(bp->panic))
6937 return -EPERM;
6938#endif
6939
6940 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6941
6942 bnx2x_set_int_mode(bp);
c14423fe 6943
a2fbb9ea
ET
6944 if (bnx2x_alloc_mem(bp))
6945 return -ENOMEM;
6946
555f6c78 6947 for_each_rx_queue(bp, i)
7a9b2557
VZ
6948 bnx2x_fp(bp, i, disable_tpa) =
6949 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6950
555f6c78 6951 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6952 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6953 bnx2x_poll, 128);
6954
6955#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6956 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6957 struct bnx2x_fastpath *fp = &bp->fp[i];
6958
6959 fp->poll_no_work = 0;
6960 fp->poll_calls = 0;
6961 fp->poll_max_calls = 0;
6962 fp->poll_complete = 0;
6963 fp->poll_exit = 0;
6964 }
6965#endif
6966 bnx2x_napi_enable(bp);
6967
34f80b04
EG
6968 if (bp->flags & USING_MSIX_FLAG) {
6969 rc = bnx2x_req_msix_irqs(bp);
6970 if (rc) {
6971 pci_disable_msix(bp->pdev);
2dfe0e1f 6972 goto load_error1;
34f80b04
EG
6973 }
6974 } else {
8badd27a
EG
6975 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6976 bnx2x_enable_msi(bp);
34f80b04
EG
6977 bnx2x_ack_int(bp);
6978 rc = bnx2x_req_irq(bp);
6979 if (rc) {
2dfe0e1f 6980 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6981 if (bp->flags & USING_MSI_FLAG)
6982 pci_disable_msi(bp->pdev);
2dfe0e1f 6983 goto load_error1;
a2fbb9ea 6984 }
8badd27a
EG
6985 if (bp->flags & USING_MSI_FLAG) {
6986 bp->dev->irq = bp->pdev->irq;
6987 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6988 bp->dev->name, bp->pdev->irq);
6989 }
a2fbb9ea
ET
6990 }
6991
2dfe0e1f
EG
6992 /* Send LOAD_REQUEST command to MCP
6993 Returns the type of LOAD command:
6994 if it is the first port to be initialized
6995 common blocks should be initialized, otherwise - not
6996 */
6997 if (!BP_NOMCP(bp)) {
6998 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6999 if (!load_code) {
7000 BNX2X_ERR("MCP response failure, aborting\n");
7001 rc = -EBUSY;
7002 goto load_error2;
7003 }
7004 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7005 rc = -EBUSY; /* other port in diagnostic mode */
7006 goto load_error2;
7007 }
7008
7009 } else {
7010 int port = BP_PORT(bp);
7011
f5372251 7012 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7013 load_count[0], load_count[1], load_count[2]);
7014 load_count[0]++;
7015 load_count[1 + port]++;
f5372251 7016 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7017 load_count[0], load_count[1], load_count[2]);
7018 if (load_count[0] == 1)
7019 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7020 else if (load_count[1 + port] == 1)
7021 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7022 else
7023 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7024 }
7025
7026 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7027 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7028 bp->port.pmf = 1;
7029 else
7030 bp->port.pmf = 0;
7031 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7032
a2fbb9ea 7033 /* Initialize HW */
34f80b04
EG
7034 rc = bnx2x_init_hw(bp, load_code);
7035 if (rc) {
a2fbb9ea 7036 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7037 goto load_error2;
a2fbb9ea
ET
7038 }
7039
a2fbb9ea 7040 /* Setup NIC internals and enable interrupts */
471de716 7041 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
7042
7043 /* Send LOAD_DONE command to MCP */
34f80b04 7044 if (!BP_NOMCP(bp)) {
228241eb
ET
7045 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7046 if (!load_code) {
da5a662a 7047 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7048 rc = -EBUSY;
2dfe0e1f 7049 goto load_error3;
a2fbb9ea
ET
7050 }
7051 }
7052
7053 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7054
34f80b04
EG
7055 rc = bnx2x_setup_leading(bp);
7056 if (rc) {
da5a662a 7057 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7058 goto load_error3;
34f80b04 7059 }
a2fbb9ea 7060
34f80b04
EG
7061 if (CHIP_IS_E1H(bp))
7062 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7063 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7064 bp->state = BNX2X_STATE_DISABLED;
7065 }
a2fbb9ea 7066
34f80b04
EG
7067 if (bp->state == BNX2X_STATE_OPEN)
7068 for_each_nondefault_queue(bp, i) {
7069 rc = bnx2x_setup_multi(bp, i);
7070 if (rc)
2dfe0e1f 7071 goto load_error3;
34f80b04 7072 }
a2fbb9ea 7073
34f80b04 7074 if (CHIP_IS_E1(bp))
3101c2bc 7075 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 7076 else
3101c2bc 7077 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
7078
7079 if (bp->port.pmf)
b5bf9068 7080 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7081
7082 /* Start fast path */
34f80b04
EG
7083 switch (load_mode) {
7084 case LOAD_NORMAL:
7085 /* Tx queue should be only reenabled */
555f6c78 7086 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 7087 /* Initialize the receive filter. */
34f80b04
EG
7088 bnx2x_set_rx_mode(bp->dev);
7089 break;
7090
7091 case LOAD_OPEN:
555f6c78 7092 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 7093 /* Initialize the receive filter. */
34f80b04 7094 bnx2x_set_rx_mode(bp->dev);
34f80b04 7095 break;
a2fbb9ea 7096
34f80b04 7097 case LOAD_DIAG:
2dfe0e1f 7098 /* Initialize the receive filter. */
a2fbb9ea 7099 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7100 bp->state = BNX2X_STATE_DIAG;
7101 break;
7102
7103 default:
7104 break;
a2fbb9ea
ET
7105 }
7106
34f80b04
EG
7107 if (!bp->port.pmf)
7108 bnx2x__link_status_update(bp);
7109
a2fbb9ea
ET
7110 /* start the timer */
7111 mod_timer(&bp->timer, jiffies + bp->current_interval);
7112
34f80b04 7113
a2fbb9ea
ET
7114 return 0;
7115
2dfe0e1f
EG
7116load_error3:
7117 bnx2x_int_disable_sync(bp, 1);
7118 if (!BP_NOMCP(bp)) {
7119 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7120 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7121 }
7122 bp->port.pmf = 0;
7a9b2557
VZ
7123 /* Free SKBs, SGEs, TPA pool and driver internals */
7124 bnx2x_free_skbs(bp);
555f6c78 7125 for_each_rx_queue(bp, i)
3196a88a 7126 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7127load_error2:
d1014634
YG
7128 /* Release IRQs */
7129 bnx2x_free_irq(bp);
2dfe0e1f
EG
7130load_error1:
7131 bnx2x_napi_disable(bp);
555f6c78 7132 for_each_rx_queue(bp, i)
7cde1c8b 7133 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7134 bnx2x_free_mem(bp);
7135
34f80b04 7136 return rc;
a2fbb9ea
ET
7137}
7138
7139static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7140{
555f6c78 7141 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7142 int rc;
7143
c14423fe 7144 /* halt the connection */
555f6c78
EG
7145 fp->state = BNX2X_FP_STATE_HALTING;
7146 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7147
34f80b04 7148 /* Wait for completion */
a2fbb9ea 7149 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7150 &(fp->state), 1);
c14423fe 7151 if (rc) /* timeout */
a2fbb9ea
ET
7152 return rc;
7153
7154 /* delete cfc entry */
7155 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7156
34f80b04
EG
7157 /* Wait for completion */
7158 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7159 &(fp->state), 1);
34f80b04 7160 return rc;
a2fbb9ea
ET
7161}
7162
da5a662a 7163static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7164{
4781bfad 7165 __le16 dsb_sp_prod_idx;
c14423fe 7166 /* if the other port is handling traffic,
a2fbb9ea 7167 this can take a lot of time */
34f80b04
EG
7168 int cnt = 500;
7169 int rc;
a2fbb9ea
ET
7170
7171 might_sleep();
7172
7173 /* Send HALT ramrod */
7174 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7175 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7176
34f80b04
EG
7177 /* Wait for completion */
7178 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7179 &(bp->fp[0].state), 1);
7180 if (rc) /* timeout */
da5a662a 7181 return rc;
a2fbb9ea 7182
49d66772 7183 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7184
228241eb 7185 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7186 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7187
49d66772 7188 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7189 we are going to reset the chip anyway
7190 so there is not much to do if this times out
7191 */
34f80b04 7192 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7193 if (!cnt) {
7194 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7195 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7196 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7197#ifdef BNX2X_STOP_ON_ERROR
7198 bnx2x_panic();
7199#endif
36e552ab 7200 rc = -EBUSY;
34f80b04
EG
7201 break;
7202 }
7203 cnt--;
da5a662a 7204 msleep(1);
5650d9d4 7205 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7206 }
7207 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7208 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7209
7210 return rc;
a2fbb9ea
ET
7211}
7212
34f80b04
EG
7213static void bnx2x_reset_func(struct bnx2x *bp)
7214{
7215 int port = BP_PORT(bp);
7216 int func = BP_FUNC(bp);
7217 int base, i;
7218
7219 /* Configure IGU */
7220 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7221 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7222
34f80b04
EG
7223 /* Clear ILT */
7224 base = FUNC_ILT_BASE(func);
7225 for (i = base; i < base + ILT_PER_FUNC; i++)
7226 bnx2x_ilt_wr(bp, i, 0);
7227}
7228
7229static void bnx2x_reset_port(struct bnx2x *bp)
7230{
7231 int port = BP_PORT(bp);
7232 u32 val;
7233
7234 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7235
7236 /* Do not rcv packets to BRB */
7237 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7238 /* Do not direct rcv packets that are not for MCP to the BRB */
7239 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7240 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7241
7242 /* Configure AEU */
7243 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7244
7245 msleep(100);
7246 /* Check for BRB port occupancy */
7247 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7248 if (val)
7249 DP(NETIF_MSG_IFDOWN,
33471629 7250 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7251
7252 /* TODO: Close Doorbell port? */
7253}
7254
34f80b04
EG
7255static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7256{
7257 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7258 BP_FUNC(bp), reset_code);
7259
7260 switch (reset_code) {
7261 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7262 bnx2x_reset_port(bp);
7263 bnx2x_reset_func(bp);
7264 bnx2x_reset_common(bp);
7265 break;
7266
7267 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7268 bnx2x_reset_port(bp);
7269 bnx2x_reset_func(bp);
7270 break;
7271
7272 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7273 bnx2x_reset_func(bp);
7274 break;
49d66772 7275
34f80b04
EG
7276 default:
7277 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7278 break;
7279 }
7280}
7281
33471629 7282/* must be called with rtnl_lock */
34f80b04 7283static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7284{
da5a662a 7285 int port = BP_PORT(bp);
a2fbb9ea 7286 u32 reset_code = 0;
da5a662a 7287 int i, cnt, rc;
a2fbb9ea
ET
7288
7289 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7290
228241eb
ET
7291 bp->rx_mode = BNX2X_RX_MODE_NONE;
7292 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7293
f8ef6e44 7294 bnx2x_netif_stop(bp, 1);
e94d8af3 7295
34f80b04
EG
7296 del_timer_sync(&bp->timer);
7297 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7298 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7299 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7300
70b9986c
EG
7301 /* Release IRQs */
7302 bnx2x_free_irq(bp);
7303
555f6c78
EG
7304 /* Wait until tx fastpath tasks complete */
7305 for_each_tx_queue(bp, i) {
228241eb
ET
7306 struct bnx2x_fastpath *fp = &bp->fp[i];
7307
34f80b04 7308 cnt = 1000;
e8b5fc51 7309 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7310
7961f791 7311 bnx2x_tx_int(fp);
34f80b04
EG
7312 if (!cnt) {
7313 BNX2X_ERR("timeout waiting for queue[%d]\n",
7314 i);
7315#ifdef BNX2X_STOP_ON_ERROR
7316 bnx2x_panic();
7317 return -EBUSY;
7318#else
7319 break;
7320#endif
7321 }
7322 cnt--;
da5a662a 7323 msleep(1);
34f80b04 7324 }
228241eb 7325 }
da5a662a
VZ
7326 /* Give HW time to discard old tx messages */
7327 msleep(1);
a2fbb9ea 7328
3101c2bc
YG
7329 if (CHIP_IS_E1(bp)) {
7330 struct mac_configuration_cmd *config =
7331 bnx2x_sp(bp, mcast_config);
7332
7333 bnx2x_set_mac_addr_e1(bp, 0);
7334
8d9c5f34 7335 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7336 CAM_INVALIDATE(config->config_table[i]);
7337
8d9c5f34 7338 config->hdr.length = i;
3101c2bc
YG
7339 if (CHIP_REV_IS_SLOW(bp))
7340 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7341 else
7342 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7343 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7344 config->hdr.reserved1 = 0;
7345
7346 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7347 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7348 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7349
7350 } else { /* E1H */
65abd74d
YG
7351 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7352
3101c2bc
YG
7353 bnx2x_set_mac_addr_e1h(bp, 0);
7354
7355 for (i = 0; i < MC_HASH_SIZE; i++)
7356 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7357
7358 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7359 }
7360
65abd74d
YG
7361 if (unload_mode == UNLOAD_NORMAL)
7362 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7363
7d0446c2 7364 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7365 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7366
7d0446c2 7367 else if (bp->wol) {
65abd74d
YG
7368 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7369 u8 *mac_addr = bp->dev->dev_addr;
7370 u32 val;
7371 /* The mac address is written to entries 1-4 to
7372 preserve entry 0 which is used by the PMF */
7373 u8 entry = (BP_E1HVN(bp) + 1)*8;
7374
7375 val = (mac_addr[0] << 8) | mac_addr[1];
7376 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7377
7378 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7379 (mac_addr[4] << 8) | mac_addr[5];
7380 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7381
7382 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7383
7384 } else
7385 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7386
34f80b04
EG
7387 /* Close multi and leading connections
7388 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7389 for_each_nondefault_queue(bp, i)
7390 if (bnx2x_stop_multi(bp, i))
228241eb 7391 goto unload_error;
a2fbb9ea 7392
da5a662a
VZ
7393 rc = bnx2x_stop_leading(bp);
7394 if (rc) {
34f80b04 7395 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7396#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7397 return -EBUSY;
da5a662a
VZ
7398#else
7399 goto unload_error;
34f80b04 7400#endif
228241eb
ET
7401 }
7402
7403unload_error:
34f80b04 7404 if (!BP_NOMCP(bp))
228241eb 7405 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7406 else {
f5372251 7407 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7408 load_count[0], load_count[1], load_count[2]);
7409 load_count[0]--;
da5a662a 7410 load_count[1 + port]--;
f5372251 7411 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7412 load_count[0], load_count[1], load_count[2]);
7413 if (load_count[0] == 0)
7414 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7415 else if (load_count[1 + port] == 0)
34f80b04
EG
7416 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7417 else
7418 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7419 }
a2fbb9ea 7420
34f80b04
EG
7421 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7422 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7423 bnx2x__link_reset(bp);
a2fbb9ea
ET
7424
7425 /* Reset the chip */
228241eb 7426 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7427
7428 /* Report UNLOAD_DONE to MCP */
34f80b04 7429 if (!BP_NOMCP(bp))
a2fbb9ea 7430 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7431
9a035440 7432 bp->port.pmf = 0;
a2fbb9ea 7433
7a9b2557 7434 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7435 bnx2x_free_skbs(bp);
555f6c78 7436 for_each_rx_queue(bp, i)
3196a88a 7437 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7438 for_each_rx_queue(bp, i)
7cde1c8b 7439 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7440 bnx2x_free_mem(bp);
7441
7442 bp->state = BNX2X_STATE_CLOSED;
228241eb 7443
a2fbb9ea
ET
7444 netif_carrier_off(bp->dev);
7445
7446 return 0;
7447}
7448
34f80b04
EG
7449static void bnx2x_reset_task(struct work_struct *work)
7450{
7451 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7452
7453#ifdef BNX2X_STOP_ON_ERROR
7454 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7455 " so reset not done to allow debug dump,\n"
7456 KERN_ERR " you will need to reboot when done\n");
7457 return;
7458#endif
7459
7460 rtnl_lock();
7461
7462 if (!netif_running(bp->dev))
7463 goto reset_task_exit;
7464
7465 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7466 bnx2x_nic_load(bp, LOAD_NORMAL);
7467
7468reset_task_exit:
7469 rtnl_unlock();
7470}
7471
a2fbb9ea
ET
7472/* end of nic load/unload */
7473
7474/* ethtool_ops */
7475
7476/*
7477 * Init service functions
7478 */
7479
f1ef27ef
EG
7480static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7481{
7482 switch (func) {
7483 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7484 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7485 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7486 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7487 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7488 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7489 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7490 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7491 default:
7492 BNX2X_ERR("Unsupported function index: %d\n", func);
7493 return (u32)(-1);
7494 }
7495}
7496
7497static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7498{
7499 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7500
7501 /* Flush all outstanding writes */
7502 mmiowb();
7503
7504 /* Pretend to be function 0 */
7505 REG_WR(bp, reg, 0);
7506 /* Flush the GRC transaction (in the chip) */
7507 new_val = REG_RD(bp, reg);
7508 if (new_val != 0) {
7509 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7510 new_val);
7511 BUG();
7512 }
7513
7514 /* From now we are in the "like-E1" mode */
7515 bnx2x_int_disable(bp);
7516
7517 /* Flush all outstanding writes */
7518 mmiowb();
7519
7520 /* Restore the original funtion settings */
7521 REG_WR(bp, reg, orig_func);
7522 new_val = REG_RD(bp, reg);
7523 if (new_val != orig_func) {
7524 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7525 orig_func, new_val);
7526 BUG();
7527 }
7528}
7529
7530static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7531{
7532 if (CHIP_IS_E1H(bp))
7533 bnx2x_undi_int_disable_e1h(bp, func);
7534 else
7535 bnx2x_int_disable(bp);
7536}
7537
34f80b04
EG
7538static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7539{
7540 u32 val;
7541
7542 /* Check if there is any driver already loaded */
7543 val = REG_RD(bp, MISC_REG_UNPREPARED);
7544 if (val == 0x1) {
7545 /* Check if it is the UNDI driver
7546 * UNDI driver initializes CID offset for normal bell to 0x7
7547 */
4a37fb66 7548 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7549 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7550 if (val == 0x7) {
7551 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7552 /* save our func */
34f80b04 7553 int func = BP_FUNC(bp);
da5a662a
VZ
7554 u32 swap_en;
7555 u32 swap_val;
34f80b04 7556
b4661739
EG
7557 /* clear the UNDI indication */
7558 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7559
34f80b04
EG
7560 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7561
7562 /* try unload UNDI on port 0 */
7563 bp->func = 0;
da5a662a
VZ
7564 bp->fw_seq =
7565 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7566 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7567 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7568
7569 /* if UNDI is loaded on the other port */
7570 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7571
da5a662a
VZ
7572 /* send "DONE" for previous unload */
7573 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7574
7575 /* unload UNDI on port 1 */
34f80b04 7576 bp->func = 1;
da5a662a
VZ
7577 bp->fw_seq =
7578 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7579 DRV_MSG_SEQ_NUMBER_MASK);
7580 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7581
7582 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7583 }
7584
b4661739
EG
7585 /* now it's safe to release the lock */
7586 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7587
f1ef27ef 7588 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7589
7590 /* close input traffic and wait for it */
7591 /* Do not rcv packets to BRB */
7592 REG_WR(bp,
7593 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7594 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7595 /* Do not direct rcv packets that are not for MCP to
7596 * the BRB */
7597 REG_WR(bp,
7598 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7599 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7600 /* clear AEU */
7601 REG_WR(bp,
7602 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7603 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7604 msleep(10);
7605
7606 /* save NIG port swap info */
7607 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7608 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7609 /* reset device */
7610 REG_WR(bp,
7611 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7612 0xd3ffffff);
34f80b04
EG
7613 REG_WR(bp,
7614 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7615 0x1403);
da5a662a
VZ
7616 /* take the NIG out of reset and restore swap values */
7617 REG_WR(bp,
7618 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7619 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7620 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7621 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7622
7623 /* send unload done to the MCP */
7624 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7625
7626 /* restore our func and fw_seq */
7627 bp->func = func;
7628 bp->fw_seq =
7629 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7630 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7631
7632 } else
7633 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7634 }
7635}
7636
7637static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7638{
7639 u32 val, val2, val3, val4, id;
72ce58c3 7640 u16 pmc;
34f80b04
EG
7641
7642 /* Get the chip revision id and number. */
7643 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7644 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7645 id = ((val & 0xffff) << 16);
7646 val = REG_RD(bp, MISC_REG_CHIP_REV);
7647 id |= ((val & 0xf) << 12);
7648 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7649 id |= ((val & 0xff) << 4);
5a40e08e 7650 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7651 id |= (val & 0xf);
7652 bp->common.chip_id = id;
7653 bp->link_params.chip_id = bp->common.chip_id;
7654 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7655
1c06328c
EG
7656 val = (REG_RD(bp, 0x2874) & 0x55);
7657 if ((bp->common.chip_id & 0x1) ||
7658 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7659 bp->flags |= ONE_PORT_FLAG;
7660 BNX2X_DEV_INFO("single port device\n");
7661 }
7662
34f80b04
EG
7663 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7664 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7665 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7666 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7667 bp->common.flash_size, bp->common.flash_size);
7668
7669 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7670 bp->link_params.shmem_base = bp->common.shmem_base;
7671 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7672
7673 if (!bp->common.shmem_base ||
7674 (bp->common.shmem_base < 0xA0000) ||
7675 (bp->common.shmem_base >= 0xC0000)) {
7676 BNX2X_DEV_INFO("MCP not active\n");
7677 bp->flags |= NO_MCP_FLAG;
7678 return;
7679 }
7680
7681 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7682 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7683 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7684 BNX2X_ERR("BAD MCP validity signature\n");
7685
7686 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7687 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7688
7689 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7690 SHARED_HW_CFG_LED_MODE_MASK) >>
7691 SHARED_HW_CFG_LED_MODE_SHIFT);
7692
c2c8b03e
EG
7693 bp->link_params.feature_config_flags = 0;
7694 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7695 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7696 bp->link_params.feature_config_flags |=
7697 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7698 else
7699 bp->link_params.feature_config_flags &=
7700 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7701
34f80b04
EG
7702 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7703 bp->common.bc_ver = val;
7704 BNX2X_DEV_INFO("bc_ver %X\n", val);
7705 if (val < BNX2X_BC_VER) {
7706 /* for now only warn
7707 * later we might need to enforce this */
7708 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7709 " please upgrade BC\n", BNX2X_BC_VER, val);
7710 }
4d295db0
EG
7711 bp->link_params.feature_config_flags |=
7712 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7713 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
7714
7715 if (BP_E1HVN(bp) == 0) {
7716 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7717 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7718 } else {
7719 /* no WOL capability for E1HVN != 0 */
7720 bp->flags |= NO_WOL_FLAG;
7721 }
7722 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7723 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7724
7725 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7726 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7727 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7728 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7729
7730 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7731 val, val2, val3, val4);
7732}
7733
7734static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7735 u32 switch_cfg)
a2fbb9ea 7736{
34f80b04 7737 int port = BP_PORT(bp);
a2fbb9ea
ET
7738 u32 ext_phy_type;
7739
a2fbb9ea
ET
7740 switch (switch_cfg) {
7741 case SWITCH_CFG_1G:
7742 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7743
c18487ee
YR
7744 ext_phy_type =
7745 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7746 switch (ext_phy_type) {
7747 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7748 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7749 ext_phy_type);
7750
34f80b04
EG
7751 bp->port.supported |= (SUPPORTED_10baseT_Half |
7752 SUPPORTED_10baseT_Full |
7753 SUPPORTED_100baseT_Half |
7754 SUPPORTED_100baseT_Full |
7755 SUPPORTED_1000baseT_Full |
7756 SUPPORTED_2500baseX_Full |
7757 SUPPORTED_TP |
7758 SUPPORTED_FIBRE |
7759 SUPPORTED_Autoneg |
7760 SUPPORTED_Pause |
7761 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7762 break;
7763
7764 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7765 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7766 ext_phy_type);
7767
34f80b04
EG
7768 bp->port.supported |= (SUPPORTED_10baseT_Half |
7769 SUPPORTED_10baseT_Full |
7770 SUPPORTED_100baseT_Half |
7771 SUPPORTED_100baseT_Full |
7772 SUPPORTED_1000baseT_Full |
7773 SUPPORTED_TP |
7774 SUPPORTED_FIBRE |
7775 SUPPORTED_Autoneg |
7776 SUPPORTED_Pause |
7777 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7778 break;
7779
7780 default:
7781 BNX2X_ERR("NVRAM config error. "
7782 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7783 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7784 return;
7785 }
7786
34f80b04
EG
7787 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7788 port*0x10);
7789 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7790 break;
7791
7792 case SWITCH_CFG_10G:
7793 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7794
c18487ee
YR
7795 ext_phy_type =
7796 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7797 switch (ext_phy_type) {
7798 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7799 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7800 ext_phy_type);
7801
34f80b04
EG
7802 bp->port.supported |= (SUPPORTED_10baseT_Half |
7803 SUPPORTED_10baseT_Full |
7804 SUPPORTED_100baseT_Half |
7805 SUPPORTED_100baseT_Full |
7806 SUPPORTED_1000baseT_Full |
7807 SUPPORTED_2500baseX_Full |
7808 SUPPORTED_10000baseT_Full |
7809 SUPPORTED_TP |
7810 SUPPORTED_FIBRE |
7811 SUPPORTED_Autoneg |
7812 SUPPORTED_Pause |
7813 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7814 break;
7815
589abe3a
EG
7816 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7817 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7818 ext_phy_type);
f1410647 7819
34f80b04 7820 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7821 SUPPORTED_1000baseT_Full |
34f80b04 7822 SUPPORTED_FIBRE |
589abe3a 7823 SUPPORTED_Autoneg |
34f80b04
EG
7824 SUPPORTED_Pause |
7825 SUPPORTED_Asym_Pause);
f1410647
ET
7826 break;
7827
589abe3a
EG
7828 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7829 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7830 ext_phy_type);
7831
34f80b04 7832 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7833 SUPPORTED_2500baseX_Full |
34f80b04 7834 SUPPORTED_1000baseT_Full |
589abe3a
EG
7835 SUPPORTED_FIBRE |
7836 SUPPORTED_Autoneg |
7837 SUPPORTED_Pause |
7838 SUPPORTED_Asym_Pause);
7839 break;
7840
7841 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7842 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7843 ext_phy_type);
7844
7845 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7846 SUPPORTED_FIBRE |
7847 SUPPORTED_Pause |
7848 SUPPORTED_Asym_Pause);
f1410647
ET
7849 break;
7850
589abe3a
EG
7851 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7852 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7853 ext_phy_type);
7854
34f80b04
EG
7855 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7856 SUPPORTED_1000baseT_Full |
7857 SUPPORTED_FIBRE |
34f80b04
EG
7858 SUPPORTED_Pause |
7859 SUPPORTED_Asym_Pause);
f1410647
ET
7860 break;
7861
589abe3a
EG
7862 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7863 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7864 ext_phy_type);
7865
34f80b04 7866 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7867 SUPPORTED_1000baseT_Full |
34f80b04 7868 SUPPORTED_Autoneg |
589abe3a 7869 SUPPORTED_FIBRE |
34f80b04
EG
7870 SUPPORTED_Pause |
7871 SUPPORTED_Asym_Pause);
c18487ee
YR
7872 break;
7873
4d295db0
EG
7874 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7875 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7876 ext_phy_type);
7877
7878 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7879 SUPPORTED_1000baseT_Full |
7880 SUPPORTED_Autoneg |
7881 SUPPORTED_FIBRE |
7882 SUPPORTED_Pause |
7883 SUPPORTED_Asym_Pause);
7884 break;
7885
f1410647
ET
7886 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7887 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7888 ext_phy_type);
7889
34f80b04
EG
7890 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7891 SUPPORTED_TP |
7892 SUPPORTED_Autoneg |
7893 SUPPORTED_Pause |
7894 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7895 break;
7896
28577185
EG
7897 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7898 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7899 ext_phy_type);
7900
7901 bp->port.supported |= (SUPPORTED_10baseT_Half |
7902 SUPPORTED_10baseT_Full |
7903 SUPPORTED_100baseT_Half |
7904 SUPPORTED_100baseT_Full |
7905 SUPPORTED_1000baseT_Full |
7906 SUPPORTED_10000baseT_Full |
7907 SUPPORTED_TP |
7908 SUPPORTED_Autoneg |
7909 SUPPORTED_Pause |
7910 SUPPORTED_Asym_Pause);
7911 break;
7912
c18487ee
YR
7913 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7914 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7915 bp->link_params.ext_phy_config);
7916 break;
7917
a2fbb9ea
ET
7918 default:
7919 BNX2X_ERR("NVRAM config error. "
7920 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7921 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7922 return;
7923 }
7924
34f80b04
EG
7925 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7926 port*0x18);
7927 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7928
a2fbb9ea
ET
7929 break;
7930
7931 default:
7932 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7933 bp->port.link_config);
a2fbb9ea
ET
7934 return;
7935 }
34f80b04 7936 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7937
7938 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7939 if (!(bp->link_params.speed_cap_mask &
7940 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7941 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7942
c18487ee
YR
7943 if (!(bp->link_params.speed_cap_mask &
7944 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7945 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7946
c18487ee
YR
7947 if (!(bp->link_params.speed_cap_mask &
7948 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7949 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7950
c18487ee
YR
7951 if (!(bp->link_params.speed_cap_mask &
7952 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7953 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7954
c18487ee
YR
7955 if (!(bp->link_params.speed_cap_mask &
7956 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7957 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7958 SUPPORTED_1000baseT_Full);
a2fbb9ea 7959
c18487ee
YR
7960 if (!(bp->link_params.speed_cap_mask &
7961 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7962 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7963
c18487ee
YR
7964 if (!(bp->link_params.speed_cap_mask &
7965 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7966 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7967
34f80b04 7968 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7969}
7970
34f80b04 7971static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7972{
c18487ee 7973 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7974
34f80b04 7975 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7976 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7977 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7978 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7979 bp->port.advertising = bp->port.supported;
a2fbb9ea 7980 } else {
c18487ee
YR
7981 u32 ext_phy_type =
7982 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7983
7984 if ((ext_phy_type ==
7985 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7986 (ext_phy_type ==
7987 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7988 /* force 10G, no AN */
c18487ee 7989 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7990 bp->port.advertising =
a2fbb9ea
ET
7991 (ADVERTISED_10000baseT_Full |
7992 ADVERTISED_FIBRE);
7993 break;
7994 }
7995 BNX2X_ERR("NVRAM config error. "
7996 "Invalid link_config 0x%x"
7997 " Autoneg not supported\n",
34f80b04 7998 bp->port.link_config);
a2fbb9ea
ET
7999 return;
8000 }
8001 break;
8002
8003 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8004 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8005 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8006 bp->port.advertising = (ADVERTISED_10baseT_Full |
8007 ADVERTISED_TP);
a2fbb9ea
ET
8008 } else {
8009 BNX2X_ERR("NVRAM config error. "
8010 "Invalid link_config 0x%x"
8011 " speed_cap_mask 0x%x\n",
34f80b04 8012 bp->port.link_config,
c18487ee 8013 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8014 return;
8015 }
8016 break;
8017
8018 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8019 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8020 bp->link_params.req_line_speed = SPEED_10;
8021 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8022 bp->port.advertising = (ADVERTISED_10baseT_Half |
8023 ADVERTISED_TP);
a2fbb9ea
ET
8024 } else {
8025 BNX2X_ERR("NVRAM config error. "
8026 "Invalid link_config 0x%x"
8027 " speed_cap_mask 0x%x\n",
34f80b04 8028 bp->port.link_config,
c18487ee 8029 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8030 return;
8031 }
8032 break;
8033
8034 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8035 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8036 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8037 bp->port.advertising = (ADVERTISED_100baseT_Full |
8038 ADVERTISED_TP);
a2fbb9ea
ET
8039 } else {
8040 BNX2X_ERR("NVRAM config error. "
8041 "Invalid link_config 0x%x"
8042 " speed_cap_mask 0x%x\n",
34f80b04 8043 bp->port.link_config,
c18487ee 8044 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8045 return;
8046 }
8047 break;
8048
8049 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8050 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8051 bp->link_params.req_line_speed = SPEED_100;
8052 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8053 bp->port.advertising = (ADVERTISED_100baseT_Half |
8054 ADVERTISED_TP);
a2fbb9ea
ET
8055 } else {
8056 BNX2X_ERR("NVRAM config error. "
8057 "Invalid link_config 0x%x"
8058 " speed_cap_mask 0x%x\n",
34f80b04 8059 bp->port.link_config,
c18487ee 8060 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8061 return;
8062 }
8063 break;
8064
8065 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8066 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8067 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8068 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8069 ADVERTISED_TP);
a2fbb9ea
ET
8070 } else {
8071 BNX2X_ERR("NVRAM config error. "
8072 "Invalid link_config 0x%x"
8073 " speed_cap_mask 0x%x\n",
34f80b04 8074 bp->port.link_config,
c18487ee 8075 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8076 return;
8077 }
8078 break;
8079
8080 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8081 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8082 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8083 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8084 ADVERTISED_TP);
a2fbb9ea
ET
8085 } else {
8086 BNX2X_ERR("NVRAM config error. "
8087 "Invalid link_config 0x%x"
8088 " speed_cap_mask 0x%x\n",
34f80b04 8089 bp->port.link_config,
c18487ee 8090 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8091 return;
8092 }
8093 break;
8094
8095 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8096 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8097 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8098 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8099 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8100 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8101 ADVERTISED_FIBRE);
a2fbb9ea
ET
8102 } else {
8103 BNX2X_ERR("NVRAM config error. "
8104 "Invalid link_config 0x%x"
8105 " speed_cap_mask 0x%x\n",
34f80b04 8106 bp->port.link_config,
c18487ee 8107 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8108 return;
8109 }
8110 break;
8111
8112 default:
8113 BNX2X_ERR("NVRAM config error. "
8114 "BAD link speed link_config 0x%x\n",
34f80b04 8115 bp->port.link_config);
c18487ee 8116 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8117 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8118 break;
8119 }
a2fbb9ea 8120
34f80b04
EG
8121 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8122 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8123 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8124 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8125 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8126
c18487ee 8127 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8128 " advertising 0x%x\n",
c18487ee
YR
8129 bp->link_params.req_line_speed,
8130 bp->link_params.req_duplex,
34f80b04 8131 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8132}
8133
34f80b04 8134static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8135{
34f80b04
EG
8136 int port = BP_PORT(bp);
8137 u32 val, val2;
589abe3a 8138 u32 config;
c2c8b03e 8139 u16 i;
a2fbb9ea 8140
c18487ee 8141 bp->link_params.bp = bp;
34f80b04 8142 bp->link_params.port = port;
c18487ee 8143
c18487ee 8144 bp->link_params.lane_config =
a2fbb9ea 8145 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8146 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8147 SHMEM_RD(bp,
8148 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8149 /* BCM8727_NOC => BCM8727 no over current */
8150 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8151 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8152 bp->link_params.ext_phy_config &=
8153 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8154 bp->link_params.ext_phy_config |=
8155 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8156 bp->link_params.feature_config_flags |=
8157 FEATURE_CONFIG_BCM8727_NOC;
8158 }
8159
c18487ee 8160 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8161 SHMEM_RD(bp,
8162 dev_info.port_hw_config[port].speed_capability_mask);
8163
34f80b04 8164 bp->port.link_config =
a2fbb9ea
ET
8165 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8166
c2c8b03e
EG
8167 /* Get the 4 lanes xgxs config rx and tx */
8168 for (i = 0; i < 2; i++) {
8169 val = SHMEM_RD(bp,
8170 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8171 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8172 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8173
8174 val = SHMEM_RD(bp,
8175 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8176 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8177 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8178 }
8179
3ce2c3f9
EG
8180 /* If the device is capable of WoL, set the default state according
8181 * to the HW
8182 */
4d295db0 8183 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8184 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8185 (config & PORT_FEATURE_WOL_ENABLED));
8186
c2c8b03e
EG
8187 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8188 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8189 bp->link_params.lane_config,
8190 bp->link_params.ext_phy_config,
34f80b04 8191 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8192
4d295db0
EG
8193 bp->link_params.switch_cfg |= (bp->port.link_config &
8194 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8195 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8196
8197 bnx2x_link_settings_requested(bp);
8198
8199 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8200 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8201 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8202 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8203 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8204 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8205 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8206 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8207 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8208 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8209}
8210
8211static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8212{
8213 int func = BP_FUNC(bp);
8214 u32 val, val2;
8215 int rc = 0;
a2fbb9ea 8216
34f80b04 8217 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8218
34f80b04
EG
8219 bp->e1hov = 0;
8220 bp->e1hmf = 0;
8221 if (CHIP_IS_E1H(bp)) {
8222 bp->mf_config =
8223 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8224
3196a88a
EG
8225 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8226 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8227 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8228
34f80b04
EG
8229 bp->e1hov = val;
8230 bp->e1hmf = 1;
8231 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8232 "(0x%04x)\n",
8233 func, bp->e1hov, bp->e1hov);
8234 } else {
f5372251 8235 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8236 if (BP_E1HVN(bp)) {
8237 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8238 " aborting\n", func);
8239 rc = -EPERM;
8240 }
8241 }
8242 }
a2fbb9ea 8243
34f80b04
EG
8244 if (!BP_NOMCP(bp)) {
8245 bnx2x_get_port_hwinfo(bp);
8246
8247 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8248 DRV_MSG_SEQ_NUMBER_MASK);
8249 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8250 }
8251
8252 if (IS_E1HMF(bp)) {
8253 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8254 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8255 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8256 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8257 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8258 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8259 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8260 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8261 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8262 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8263 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8264 ETH_ALEN);
8265 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8266 ETH_ALEN);
a2fbb9ea 8267 }
34f80b04
EG
8268
8269 return rc;
a2fbb9ea
ET
8270 }
8271
34f80b04
EG
8272 if (BP_NOMCP(bp)) {
8273 /* only supposed to happen on emulation/FPGA */
33471629 8274 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8275 random_ether_addr(bp->dev->dev_addr);
8276 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8277 }
a2fbb9ea 8278
34f80b04
EG
8279 return rc;
8280}
8281
8282static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8283{
8284 int func = BP_FUNC(bp);
87942b46 8285 int timer_interval;
34f80b04
EG
8286 int rc;
8287
da5a662a
VZ
8288 /* Disable interrupt handling until HW is initialized */
8289 atomic_set(&bp->intr_sem, 1);
e1510706 8290 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8291
34f80b04 8292 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8293
1cf167f2 8294 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8295 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8296
8297 rc = bnx2x_get_hwinfo(bp);
8298
8299 /* need to reset chip if undi was active */
8300 if (!BP_NOMCP(bp))
8301 bnx2x_undi_unload(bp);
8302
8303 if (CHIP_REV_IS_FPGA(bp))
8304 printk(KERN_ERR PFX "FPGA detected\n");
8305
8306 if (BP_NOMCP(bp) && (func == 0))
8307 printk(KERN_ERR PFX
8308 "MCP disabled, must load devices in order!\n");
8309
555f6c78 8310 /* Set multi queue mode */
8badd27a
EG
8311 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8312 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8313 printk(KERN_ERR PFX
8badd27a 8314 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8315 multi_mode = ETH_RSS_MODE_DISABLED;
8316 }
8317 bp->multi_mode = multi_mode;
8318
8319
7a9b2557
VZ
8320 /* Set TPA flags */
8321 if (disable_tpa) {
8322 bp->flags &= ~TPA_ENABLE_FLAG;
8323 bp->dev->features &= ~NETIF_F_LRO;
8324 } else {
8325 bp->flags |= TPA_ENABLE_FLAG;
8326 bp->dev->features |= NETIF_F_LRO;
8327 }
8328
8d5726c4 8329 bp->mrrs = mrrs;
7a9b2557 8330
34f80b04
EG
8331 bp->tx_ring_size = MAX_TX_AVAIL;
8332 bp->rx_ring_size = MAX_RX_AVAIL;
8333
8334 bp->rx_csum = 1;
34f80b04
EG
8335
8336 bp->tx_ticks = 50;
8337 bp->rx_ticks = 25;
8338
87942b46
EG
8339 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8340 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8341
8342 init_timer(&bp->timer);
8343 bp->timer.expires = jiffies + bp->current_interval;
8344 bp->timer.data = (unsigned long) bp;
8345 bp->timer.function = bnx2x_timer;
8346
8347 return rc;
a2fbb9ea
ET
8348}
8349
8350/*
8351 * ethtool service functions
8352 */
8353
8354/* All ethtool functions called with rtnl_lock */
8355
8356static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8357{
8358 struct bnx2x *bp = netdev_priv(dev);
8359
34f80b04
EG
8360 cmd->supported = bp->port.supported;
8361 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8362
8363 if (netif_carrier_ok(dev)) {
c18487ee
YR
8364 cmd->speed = bp->link_vars.line_speed;
8365 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8366 } else {
c18487ee
YR
8367 cmd->speed = bp->link_params.req_line_speed;
8368 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8369 }
34f80b04
EG
8370 if (IS_E1HMF(bp)) {
8371 u16 vn_max_rate;
8372
8373 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8374 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8375 if (vn_max_rate < cmd->speed)
8376 cmd->speed = vn_max_rate;
8377 }
a2fbb9ea 8378
c18487ee
YR
8379 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8380 u32 ext_phy_type =
8381 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8382
8383 switch (ext_phy_type) {
8384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8385 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8387 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8388 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8389 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8390 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8391 cmd->port = PORT_FIBRE;
8392 break;
8393
8394 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8395 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8396 cmd->port = PORT_TP;
8397 break;
8398
c18487ee
YR
8399 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8400 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8401 bp->link_params.ext_phy_config);
8402 break;
8403
f1410647
ET
8404 default:
8405 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8406 bp->link_params.ext_phy_config);
8407 break;
f1410647
ET
8408 }
8409 } else
a2fbb9ea 8410 cmd->port = PORT_TP;
a2fbb9ea 8411
34f80b04 8412 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8413 cmd->transceiver = XCVR_INTERNAL;
8414
c18487ee 8415 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8416 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8417 else
a2fbb9ea 8418 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8419
8420 cmd->maxtxpkt = 0;
8421 cmd->maxrxpkt = 0;
8422
8423 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8424 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8425 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8426 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8427 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8428 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8429 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8430
8431 return 0;
8432}
8433
8434static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8435{
8436 struct bnx2x *bp = netdev_priv(dev);
8437 u32 advertising;
8438
34f80b04
EG
8439 if (IS_E1HMF(bp))
8440 return 0;
8441
a2fbb9ea
ET
8442 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8443 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8444 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8445 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8446 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8447 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8448 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8449
a2fbb9ea 8450 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8451 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8452 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8453 return -EINVAL;
f1410647 8454 }
a2fbb9ea
ET
8455
8456 /* advertise the requested speed and duplex if supported */
34f80b04 8457 cmd->advertising &= bp->port.supported;
a2fbb9ea 8458
c18487ee
YR
8459 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8460 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8461 bp->port.advertising |= (ADVERTISED_Autoneg |
8462 cmd->advertising);
a2fbb9ea
ET
8463
8464 } else { /* forced speed */
8465 /* advertise the requested speed and duplex if supported */
8466 switch (cmd->speed) {
8467 case SPEED_10:
8468 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8469 if (!(bp->port.supported &
f1410647
ET
8470 SUPPORTED_10baseT_Full)) {
8471 DP(NETIF_MSG_LINK,
8472 "10M full not supported\n");
a2fbb9ea 8473 return -EINVAL;
f1410647 8474 }
a2fbb9ea
ET
8475
8476 advertising = (ADVERTISED_10baseT_Full |
8477 ADVERTISED_TP);
8478 } else {
34f80b04 8479 if (!(bp->port.supported &
f1410647
ET
8480 SUPPORTED_10baseT_Half)) {
8481 DP(NETIF_MSG_LINK,
8482 "10M half not supported\n");
a2fbb9ea 8483 return -EINVAL;
f1410647 8484 }
a2fbb9ea
ET
8485
8486 advertising = (ADVERTISED_10baseT_Half |
8487 ADVERTISED_TP);
8488 }
8489 break;
8490
8491 case SPEED_100:
8492 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8493 if (!(bp->port.supported &
f1410647
ET
8494 SUPPORTED_100baseT_Full)) {
8495 DP(NETIF_MSG_LINK,
8496 "100M full not supported\n");
a2fbb9ea 8497 return -EINVAL;
f1410647 8498 }
a2fbb9ea
ET
8499
8500 advertising = (ADVERTISED_100baseT_Full |
8501 ADVERTISED_TP);
8502 } else {
34f80b04 8503 if (!(bp->port.supported &
f1410647
ET
8504 SUPPORTED_100baseT_Half)) {
8505 DP(NETIF_MSG_LINK,
8506 "100M half not supported\n");
a2fbb9ea 8507 return -EINVAL;
f1410647 8508 }
a2fbb9ea
ET
8509
8510 advertising = (ADVERTISED_100baseT_Half |
8511 ADVERTISED_TP);
8512 }
8513 break;
8514
8515 case SPEED_1000:
f1410647
ET
8516 if (cmd->duplex != DUPLEX_FULL) {
8517 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8518 return -EINVAL;
f1410647 8519 }
a2fbb9ea 8520
34f80b04 8521 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8522 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8523 return -EINVAL;
f1410647 8524 }
a2fbb9ea
ET
8525
8526 advertising = (ADVERTISED_1000baseT_Full |
8527 ADVERTISED_TP);
8528 break;
8529
8530 case SPEED_2500:
f1410647
ET
8531 if (cmd->duplex != DUPLEX_FULL) {
8532 DP(NETIF_MSG_LINK,
8533 "2.5G half not supported\n");
a2fbb9ea 8534 return -EINVAL;
f1410647 8535 }
a2fbb9ea 8536
34f80b04 8537 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8538 DP(NETIF_MSG_LINK,
8539 "2.5G full not supported\n");
a2fbb9ea 8540 return -EINVAL;
f1410647 8541 }
a2fbb9ea 8542
f1410647 8543 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8544 ADVERTISED_TP);
8545 break;
8546
8547 case SPEED_10000:
f1410647
ET
8548 if (cmd->duplex != DUPLEX_FULL) {
8549 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8550 return -EINVAL;
f1410647 8551 }
a2fbb9ea 8552
34f80b04 8553 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8554 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8555 return -EINVAL;
f1410647 8556 }
a2fbb9ea
ET
8557
8558 advertising = (ADVERTISED_10000baseT_Full |
8559 ADVERTISED_FIBRE);
8560 break;
8561
8562 default:
f1410647 8563 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8564 return -EINVAL;
8565 }
8566
c18487ee
YR
8567 bp->link_params.req_line_speed = cmd->speed;
8568 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8569 bp->port.advertising = advertising;
a2fbb9ea
ET
8570 }
8571
c18487ee 8572 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8573 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8574 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8575 bp->port.advertising);
a2fbb9ea 8576
34f80b04 8577 if (netif_running(dev)) {
bb2a0f7a 8578 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8579 bnx2x_link_set(bp);
8580 }
a2fbb9ea
ET
8581
8582 return 0;
8583}
8584
c18487ee
YR
8585#define PHY_FW_VER_LEN 10
8586
a2fbb9ea
ET
8587static void bnx2x_get_drvinfo(struct net_device *dev,
8588 struct ethtool_drvinfo *info)
8589{
8590 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8591 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8592
8593 strcpy(info->driver, DRV_MODULE_NAME);
8594 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8595
8596 phy_fw_ver[0] = '\0';
34f80b04 8597 if (bp->port.pmf) {
4a37fb66 8598 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8599 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8600 (bp->state != BNX2X_STATE_CLOSED),
8601 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8602 bnx2x_release_phy_lock(bp);
34f80b04 8603 }
c18487ee 8604
f0e53a84
EG
8605 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8606 (bp->common.bc_ver & 0xff0000) >> 16,
8607 (bp->common.bc_ver & 0xff00) >> 8,
8608 (bp->common.bc_ver & 0xff),
8609 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8610 strcpy(info->bus_info, pci_name(bp->pdev));
8611 info->n_stats = BNX2X_NUM_STATS;
8612 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8613 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8614 info->regdump_len = 0;
8615}
8616
0a64ea57
EG
8617#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8618#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8619
8620static int bnx2x_get_regs_len(struct net_device *dev)
8621{
8622 static u32 regdump_len;
8623 struct bnx2x *bp = netdev_priv(dev);
8624 int i;
8625
8626 if (regdump_len)
8627 return regdump_len;
8628
8629 if (CHIP_IS_E1(bp)) {
8630 for (i = 0; i < REGS_COUNT; i++)
8631 if (IS_E1_ONLINE(reg_addrs[i].info))
8632 regdump_len += reg_addrs[i].size;
8633
8634 for (i = 0; i < WREGS_COUNT_E1; i++)
8635 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8636 regdump_len += wreg_addrs_e1[i].size *
8637 (1 + wreg_addrs_e1[i].read_regs_count);
8638
8639 } else { /* E1H */
8640 for (i = 0; i < REGS_COUNT; i++)
8641 if (IS_E1H_ONLINE(reg_addrs[i].info))
8642 regdump_len += reg_addrs[i].size;
8643
8644 for (i = 0; i < WREGS_COUNT_E1H; i++)
8645 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8646 regdump_len += wreg_addrs_e1h[i].size *
8647 (1 + wreg_addrs_e1h[i].read_regs_count);
8648 }
8649 regdump_len *= 4;
8650 regdump_len += sizeof(struct dump_hdr);
8651
8652 return regdump_len;
8653}
8654
8655static void bnx2x_get_regs(struct net_device *dev,
8656 struct ethtool_regs *regs, void *_p)
8657{
8658 u32 *p = _p, i, j;
8659 struct bnx2x *bp = netdev_priv(dev);
8660 struct dump_hdr dump_hdr = {0};
8661
8662 regs->version = 0;
8663 memset(p, 0, regs->len);
8664
8665 if (!netif_running(bp->dev))
8666 return;
8667
8668 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8669 dump_hdr.dump_sign = dump_sign_all;
8670 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8671 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8672 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8673 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8674 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8675
8676 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8677 p += dump_hdr.hdr_size + 1;
8678
8679 if (CHIP_IS_E1(bp)) {
8680 for (i = 0; i < REGS_COUNT; i++)
8681 if (IS_E1_ONLINE(reg_addrs[i].info))
8682 for (j = 0; j < reg_addrs[i].size; j++)
8683 *p++ = REG_RD(bp,
8684 reg_addrs[i].addr + j*4);
8685
8686 } else { /* E1H */
8687 for (i = 0; i < REGS_COUNT; i++)
8688 if (IS_E1H_ONLINE(reg_addrs[i].info))
8689 for (j = 0; j < reg_addrs[i].size; j++)
8690 *p++ = REG_RD(bp,
8691 reg_addrs[i].addr + j*4);
8692 }
8693}
8694
a2fbb9ea
ET
8695static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8696{
8697 struct bnx2x *bp = netdev_priv(dev);
8698
8699 if (bp->flags & NO_WOL_FLAG) {
8700 wol->supported = 0;
8701 wol->wolopts = 0;
8702 } else {
8703 wol->supported = WAKE_MAGIC;
8704 if (bp->wol)
8705 wol->wolopts = WAKE_MAGIC;
8706 else
8707 wol->wolopts = 0;
8708 }
8709 memset(&wol->sopass, 0, sizeof(wol->sopass));
8710}
8711
8712static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8713{
8714 struct bnx2x *bp = netdev_priv(dev);
8715
8716 if (wol->wolopts & ~WAKE_MAGIC)
8717 return -EINVAL;
8718
8719 if (wol->wolopts & WAKE_MAGIC) {
8720 if (bp->flags & NO_WOL_FLAG)
8721 return -EINVAL;
8722
8723 bp->wol = 1;
34f80b04 8724 } else
a2fbb9ea 8725 bp->wol = 0;
34f80b04 8726
a2fbb9ea
ET
8727 return 0;
8728}
8729
8730static u32 bnx2x_get_msglevel(struct net_device *dev)
8731{
8732 struct bnx2x *bp = netdev_priv(dev);
8733
8734 return bp->msglevel;
8735}
8736
8737static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8738{
8739 struct bnx2x *bp = netdev_priv(dev);
8740
8741 if (capable(CAP_NET_ADMIN))
8742 bp->msglevel = level;
8743}
8744
8745static int bnx2x_nway_reset(struct net_device *dev)
8746{
8747 struct bnx2x *bp = netdev_priv(dev);
8748
34f80b04
EG
8749 if (!bp->port.pmf)
8750 return 0;
a2fbb9ea 8751
34f80b04 8752 if (netif_running(dev)) {
bb2a0f7a 8753 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8754 bnx2x_link_set(bp);
8755 }
a2fbb9ea
ET
8756
8757 return 0;
8758}
8759
01e53298
NO
8760static u32
8761bnx2x_get_link(struct net_device *dev)
8762{
8763 struct bnx2x *bp = netdev_priv(dev);
8764
8765 return bp->link_vars.link_up;
8766}
8767
a2fbb9ea
ET
8768static int bnx2x_get_eeprom_len(struct net_device *dev)
8769{
8770 struct bnx2x *bp = netdev_priv(dev);
8771
34f80b04 8772 return bp->common.flash_size;
a2fbb9ea
ET
8773}
8774
8775static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8776{
34f80b04 8777 int port = BP_PORT(bp);
a2fbb9ea
ET
8778 int count, i;
8779 u32 val = 0;
8780
8781 /* adjust timeout for emulation/FPGA */
8782 count = NVRAM_TIMEOUT_COUNT;
8783 if (CHIP_REV_IS_SLOW(bp))
8784 count *= 100;
8785
8786 /* request access to nvram interface */
8787 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8788 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8789
8790 for (i = 0; i < count*10; i++) {
8791 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8792 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8793 break;
8794
8795 udelay(5);
8796 }
8797
8798 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8799 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8800 return -EBUSY;
8801 }
8802
8803 return 0;
8804}
8805
8806static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8807{
34f80b04 8808 int port = BP_PORT(bp);
a2fbb9ea
ET
8809 int count, i;
8810 u32 val = 0;
8811
8812 /* adjust timeout for emulation/FPGA */
8813 count = NVRAM_TIMEOUT_COUNT;
8814 if (CHIP_REV_IS_SLOW(bp))
8815 count *= 100;
8816
8817 /* relinquish nvram interface */
8818 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8819 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8820
8821 for (i = 0; i < count*10; i++) {
8822 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8823 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8824 break;
8825
8826 udelay(5);
8827 }
8828
8829 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8830 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8831 return -EBUSY;
8832 }
8833
8834 return 0;
8835}
8836
8837static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8838{
8839 u32 val;
8840
8841 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8842
8843 /* enable both bits, even on read */
8844 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8845 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8846 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8847}
8848
8849static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8850{
8851 u32 val;
8852
8853 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8854
8855 /* disable both bits, even after read */
8856 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8857 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8858 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8859}
8860
4781bfad 8861static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8862 u32 cmd_flags)
8863{
f1410647 8864 int count, i, rc;
a2fbb9ea
ET
8865 u32 val;
8866
8867 /* build the command word */
8868 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8869
8870 /* need to clear DONE bit separately */
8871 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8872
8873 /* address of the NVRAM to read from */
8874 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8875 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8876
8877 /* issue a read command */
8878 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8879
8880 /* adjust timeout for emulation/FPGA */
8881 count = NVRAM_TIMEOUT_COUNT;
8882 if (CHIP_REV_IS_SLOW(bp))
8883 count *= 100;
8884
8885 /* wait for completion */
8886 *ret_val = 0;
8887 rc = -EBUSY;
8888 for (i = 0; i < count; i++) {
8889 udelay(5);
8890 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8891
8892 if (val & MCPR_NVM_COMMAND_DONE) {
8893 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8894 /* we read nvram data in cpu order
8895 * but ethtool sees it as an array of bytes
8896 * converting to big-endian will do the work */
4781bfad 8897 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8898 rc = 0;
8899 break;
8900 }
8901 }
8902
8903 return rc;
8904}
8905
8906static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8907 int buf_size)
8908{
8909 int rc;
8910 u32 cmd_flags;
4781bfad 8911 __be32 val;
a2fbb9ea
ET
8912
8913 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8914 DP(BNX2X_MSG_NVM,
c14423fe 8915 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8916 offset, buf_size);
8917 return -EINVAL;
8918 }
8919
34f80b04
EG
8920 if (offset + buf_size > bp->common.flash_size) {
8921 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8922 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8923 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8924 return -EINVAL;
8925 }
8926
8927 /* request access to nvram interface */
8928 rc = bnx2x_acquire_nvram_lock(bp);
8929 if (rc)
8930 return rc;
8931
8932 /* enable access to nvram interface */
8933 bnx2x_enable_nvram_access(bp);
8934
8935 /* read the first word(s) */
8936 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8937 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8938 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8939 memcpy(ret_buf, &val, 4);
8940
8941 /* advance to the next dword */
8942 offset += sizeof(u32);
8943 ret_buf += sizeof(u32);
8944 buf_size -= sizeof(u32);
8945 cmd_flags = 0;
8946 }
8947
8948 if (rc == 0) {
8949 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8950 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8951 memcpy(ret_buf, &val, 4);
8952 }
8953
8954 /* disable access to nvram interface */
8955 bnx2x_disable_nvram_access(bp);
8956 bnx2x_release_nvram_lock(bp);
8957
8958 return rc;
8959}
8960
8961static int bnx2x_get_eeprom(struct net_device *dev,
8962 struct ethtool_eeprom *eeprom, u8 *eebuf)
8963{
8964 struct bnx2x *bp = netdev_priv(dev);
8965 int rc;
8966
2add3acb
EG
8967 if (!netif_running(dev))
8968 return -EAGAIN;
8969
34f80b04 8970 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8971 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8972 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8973 eeprom->len, eeprom->len);
8974
8975 /* parameters already validated in ethtool_get_eeprom */
8976
8977 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8978
8979 return rc;
8980}
8981
8982static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8983 u32 cmd_flags)
8984{
f1410647 8985 int count, i, rc;
a2fbb9ea
ET
8986
8987 /* build the command word */
8988 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8989
8990 /* need to clear DONE bit separately */
8991 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8992
8993 /* write the data */
8994 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8995
8996 /* address of the NVRAM to write to */
8997 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8998 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8999
9000 /* issue the write command */
9001 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9002
9003 /* adjust timeout for emulation/FPGA */
9004 count = NVRAM_TIMEOUT_COUNT;
9005 if (CHIP_REV_IS_SLOW(bp))
9006 count *= 100;
9007
9008 /* wait for completion */
9009 rc = -EBUSY;
9010 for (i = 0; i < count; i++) {
9011 udelay(5);
9012 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9013 if (val & MCPR_NVM_COMMAND_DONE) {
9014 rc = 0;
9015 break;
9016 }
9017 }
9018
9019 return rc;
9020}
9021
f1410647 9022#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9023
9024static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9025 int buf_size)
9026{
9027 int rc;
9028 u32 cmd_flags;
9029 u32 align_offset;
4781bfad 9030 __be32 val;
a2fbb9ea 9031
34f80b04
EG
9032 if (offset + buf_size > bp->common.flash_size) {
9033 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9034 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9035 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9036 return -EINVAL;
9037 }
9038
9039 /* request access to nvram interface */
9040 rc = bnx2x_acquire_nvram_lock(bp);
9041 if (rc)
9042 return rc;
9043
9044 /* enable access to nvram interface */
9045 bnx2x_enable_nvram_access(bp);
9046
9047 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9048 align_offset = (offset & ~0x03);
9049 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9050
9051 if (rc == 0) {
9052 val &= ~(0xff << BYTE_OFFSET(offset));
9053 val |= (*data_buf << BYTE_OFFSET(offset));
9054
9055 /* nvram data is returned as an array of bytes
9056 * convert it back to cpu order */
9057 val = be32_to_cpu(val);
9058
a2fbb9ea
ET
9059 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9060 cmd_flags);
9061 }
9062
9063 /* disable access to nvram interface */
9064 bnx2x_disable_nvram_access(bp);
9065 bnx2x_release_nvram_lock(bp);
9066
9067 return rc;
9068}
9069
9070static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9071 int buf_size)
9072{
9073 int rc;
9074 u32 cmd_flags;
9075 u32 val;
9076 u32 written_so_far;
9077
34f80b04 9078 if (buf_size == 1) /* ethtool */
a2fbb9ea 9079 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9080
9081 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9082 DP(BNX2X_MSG_NVM,
c14423fe 9083 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9084 offset, buf_size);
9085 return -EINVAL;
9086 }
9087
34f80b04
EG
9088 if (offset + buf_size > bp->common.flash_size) {
9089 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9090 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9091 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9092 return -EINVAL;
9093 }
9094
9095 /* request access to nvram interface */
9096 rc = bnx2x_acquire_nvram_lock(bp);
9097 if (rc)
9098 return rc;
9099
9100 /* enable access to nvram interface */
9101 bnx2x_enable_nvram_access(bp);
9102
9103 written_so_far = 0;
9104 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9105 while ((written_so_far < buf_size) && (rc == 0)) {
9106 if (written_so_far == (buf_size - sizeof(u32)))
9107 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9108 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9109 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9110 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9111 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9112
9113 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9114
9115 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9116
9117 /* advance to the next dword */
9118 offset += sizeof(u32);
9119 data_buf += sizeof(u32);
9120 written_so_far += sizeof(u32);
9121 cmd_flags = 0;
9122 }
9123
9124 /* disable access to nvram interface */
9125 bnx2x_disable_nvram_access(bp);
9126 bnx2x_release_nvram_lock(bp);
9127
9128 return rc;
9129}
9130
9131static int bnx2x_set_eeprom(struct net_device *dev,
9132 struct ethtool_eeprom *eeprom, u8 *eebuf)
9133{
9134 struct bnx2x *bp = netdev_priv(dev);
9135 int rc;
9136
9f4c9583
EG
9137 if (!netif_running(dev))
9138 return -EAGAIN;
9139
34f80b04 9140 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9141 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9142 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9143 eeprom->len, eeprom->len);
9144
9145 /* parameters already validated in ethtool_set_eeprom */
9146
c18487ee 9147 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
9148 if (eeprom->magic == 0x00504859)
9149 if (bp->port.pmf) {
9150
4a37fb66 9151 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9152 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9153 bp->link_params.ext_phy_config,
9154 (bp->state != BNX2X_STATE_CLOSED),
9155 eebuf, eeprom->len);
bb2a0f7a
YG
9156 if ((bp->state == BNX2X_STATE_OPEN) ||
9157 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9158 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9159 &bp->link_vars, 1);
34f80b04
EG
9160 rc |= bnx2x_phy_init(&bp->link_params,
9161 &bp->link_vars);
bb2a0f7a 9162 }
4a37fb66 9163 bnx2x_release_phy_lock(bp);
34f80b04
EG
9164
9165 } else /* Only the PMF can access the PHY */
9166 return -EINVAL;
9167 else
c18487ee 9168 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9169
9170 return rc;
9171}
9172
9173static int bnx2x_get_coalesce(struct net_device *dev,
9174 struct ethtool_coalesce *coal)
9175{
9176 struct bnx2x *bp = netdev_priv(dev);
9177
9178 memset(coal, 0, sizeof(struct ethtool_coalesce));
9179
9180 coal->rx_coalesce_usecs = bp->rx_ticks;
9181 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9182
9183 return 0;
9184}
9185
9186static int bnx2x_set_coalesce(struct net_device *dev,
9187 struct ethtool_coalesce *coal)
9188{
9189 struct bnx2x *bp = netdev_priv(dev);
9190
9191 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
1e9d9987
EG
9192 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9193 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea
ET
9194
9195 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
1e9d9987
EG
9196 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9197 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 9198
34f80b04 9199 if (netif_running(dev))
a2fbb9ea
ET
9200 bnx2x_update_coalesce(bp);
9201
9202 return 0;
9203}
9204
9205static void bnx2x_get_ringparam(struct net_device *dev,
9206 struct ethtool_ringparam *ering)
9207{
9208 struct bnx2x *bp = netdev_priv(dev);
9209
9210 ering->rx_max_pending = MAX_RX_AVAIL;
9211 ering->rx_mini_max_pending = 0;
9212 ering->rx_jumbo_max_pending = 0;
9213
9214 ering->rx_pending = bp->rx_ring_size;
9215 ering->rx_mini_pending = 0;
9216 ering->rx_jumbo_pending = 0;
9217
9218 ering->tx_max_pending = MAX_TX_AVAIL;
9219 ering->tx_pending = bp->tx_ring_size;
9220}
9221
9222static int bnx2x_set_ringparam(struct net_device *dev,
9223 struct ethtool_ringparam *ering)
9224{
9225 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9226 int rc = 0;
a2fbb9ea
ET
9227
9228 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9229 (ering->tx_pending > MAX_TX_AVAIL) ||
9230 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9231 return -EINVAL;
9232
9233 bp->rx_ring_size = ering->rx_pending;
9234 bp->tx_ring_size = ering->tx_pending;
9235
34f80b04
EG
9236 if (netif_running(dev)) {
9237 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9238 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9239 }
9240
34f80b04 9241 return rc;
a2fbb9ea
ET
9242}
9243
9244static void bnx2x_get_pauseparam(struct net_device *dev,
9245 struct ethtool_pauseparam *epause)
9246{
9247 struct bnx2x *bp = netdev_priv(dev);
9248
356e2385
EG
9249 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9250 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9251 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9252
c0700f90
DM
9253 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9254 BNX2X_FLOW_CTRL_RX);
9255 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9256 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9257
9258 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9259 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9260 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9261}
9262
9263static int bnx2x_set_pauseparam(struct net_device *dev,
9264 struct ethtool_pauseparam *epause)
9265{
9266 struct bnx2x *bp = netdev_priv(dev);
9267
34f80b04
EG
9268 if (IS_E1HMF(bp))
9269 return 0;
9270
a2fbb9ea
ET
9271 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9272 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9273 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9274
c0700f90 9275 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9276
f1410647 9277 if (epause->rx_pause)
c0700f90 9278 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9279
f1410647 9280 if (epause->tx_pause)
c0700f90 9281 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9282
c0700f90
DM
9283 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9284 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9285
c18487ee 9286 if (epause->autoneg) {
34f80b04 9287 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9288 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9289 return -EINVAL;
9290 }
a2fbb9ea 9291
c18487ee 9292 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9293 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9294 }
a2fbb9ea 9295
c18487ee
YR
9296 DP(NETIF_MSG_LINK,
9297 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9298
9299 if (netif_running(dev)) {
bb2a0f7a 9300 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9301 bnx2x_link_set(bp);
9302 }
a2fbb9ea
ET
9303
9304 return 0;
9305}
9306
df0f2343
VZ
9307static int bnx2x_set_flags(struct net_device *dev, u32 data)
9308{
9309 struct bnx2x *bp = netdev_priv(dev);
9310 int changed = 0;
9311 int rc = 0;
9312
9313 /* TPA requires Rx CSUM offloading */
9314 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9315 if (!(dev->features & NETIF_F_LRO)) {
9316 dev->features |= NETIF_F_LRO;
9317 bp->flags |= TPA_ENABLE_FLAG;
9318 changed = 1;
9319 }
9320
9321 } else if (dev->features & NETIF_F_LRO) {
9322 dev->features &= ~NETIF_F_LRO;
9323 bp->flags &= ~TPA_ENABLE_FLAG;
9324 changed = 1;
9325 }
9326
9327 if (changed && netif_running(dev)) {
9328 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9329 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9330 }
9331
9332 return rc;
9333}
9334
a2fbb9ea
ET
9335static u32 bnx2x_get_rx_csum(struct net_device *dev)
9336{
9337 struct bnx2x *bp = netdev_priv(dev);
9338
9339 return bp->rx_csum;
9340}
9341
9342static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9343{
9344 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9345 int rc = 0;
a2fbb9ea
ET
9346
9347 bp->rx_csum = data;
df0f2343
VZ
9348
9349 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9350 TPA'ed packets will be discarded due to wrong TCP CSUM */
9351 if (!data) {
9352 u32 flags = ethtool_op_get_flags(dev);
9353
9354 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9355 }
9356
9357 return rc;
a2fbb9ea
ET
9358}
9359
9360static int bnx2x_set_tso(struct net_device *dev, u32 data)
9361{
755735eb 9362 if (data) {
a2fbb9ea 9363 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9364 dev->features |= NETIF_F_TSO6;
9365 } else {
a2fbb9ea 9366 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9367 dev->features &= ~NETIF_F_TSO6;
9368 }
9369
a2fbb9ea
ET
9370 return 0;
9371}
9372
f3c87cdd 9373static const struct {
a2fbb9ea
ET
9374 char string[ETH_GSTRING_LEN];
9375} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9376 { "register_test (offline)" },
9377 { "memory_test (offline)" },
9378 { "loopback_test (offline)" },
9379 { "nvram_test (online)" },
9380 { "interrupt_test (online)" },
9381 { "link_test (online)" },
d3d4f495 9382 { "idle check (online)" }
a2fbb9ea
ET
9383};
9384
9385static int bnx2x_self_test_count(struct net_device *dev)
9386{
9387 return BNX2X_NUM_TESTS;
9388}
9389
f3c87cdd
YG
9390static int bnx2x_test_registers(struct bnx2x *bp)
9391{
9392 int idx, i, rc = -ENODEV;
9393 u32 wr_val = 0;
9dabc424 9394 int port = BP_PORT(bp);
f3c87cdd
YG
9395 static const struct {
9396 u32 offset0;
9397 u32 offset1;
9398 u32 mask;
9399 } reg_tbl[] = {
9400/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9401 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9402 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9403 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9404 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9405 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9406 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9407 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9408 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9409 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9410/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9411 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9412 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9413 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9414 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9415 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9416 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9417 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9418 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9419 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9420/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9421 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9422 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9423 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9424 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9425 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9426 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9427 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9428 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9429 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9430/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9431 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9432 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9433 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9434 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9435 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9436 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9437
9438 { 0xffffffff, 0, 0x00000000 }
9439 };
9440
9441 if (!netif_running(bp->dev))
9442 return rc;
9443
9444 /* Repeat the test twice:
9445 First by writing 0x00000000, second by writing 0xffffffff */
9446 for (idx = 0; idx < 2; idx++) {
9447
9448 switch (idx) {
9449 case 0:
9450 wr_val = 0;
9451 break;
9452 case 1:
9453 wr_val = 0xffffffff;
9454 break;
9455 }
9456
9457 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9458 u32 offset, mask, save_val, val;
f3c87cdd
YG
9459
9460 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9461 mask = reg_tbl[i].mask;
9462
9463 save_val = REG_RD(bp, offset);
9464
9465 REG_WR(bp, offset, wr_val);
9466 val = REG_RD(bp, offset);
9467
9468 /* Restore the original register's value */
9469 REG_WR(bp, offset, save_val);
9470
9471 /* verify that value is as expected value */
9472 if ((val & mask) != (wr_val & mask))
9473 goto test_reg_exit;
9474 }
9475 }
9476
9477 rc = 0;
9478
9479test_reg_exit:
9480 return rc;
9481}
9482
9483static int bnx2x_test_memory(struct bnx2x *bp)
9484{
9485 int i, j, rc = -ENODEV;
9486 u32 val;
9487 static const struct {
9488 u32 offset;
9489 int size;
9490 } mem_tbl[] = {
9491 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9492 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9493 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9494 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9495 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9496 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9497 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9498
9499 { 0xffffffff, 0 }
9500 };
9501 static const struct {
9502 char *name;
9503 u32 offset;
9dabc424
YG
9504 u32 e1_mask;
9505 u32 e1h_mask;
f3c87cdd 9506 } prty_tbl[] = {
9dabc424
YG
9507 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9508 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9509 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9510 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9511 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9512 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9513
9514 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9515 };
9516
9517 if (!netif_running(bp->dev))
9518 return rc;
9519
9520 /* Go through all the memories */
9521 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9522 for (j = 0; j < mem_tbl[i].size; j++)
9523 REG_RD(bp, mem_tbl[i].offset + j*4);
9524
9525 /* Check the parity status */
9526 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9527 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9528 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9529 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9530 DP(NETIF_MSG_HW,
9531 "%s is 0x%x\n", prty_tbl[i].name, val);
9532 goto test_mem_exit;
9533 }
9534 }
9535
9536 rc = 0;
9537
9538test_mem_exit:
9539 return rc;
9540}
9541
f3c87cdd
YG
9542static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9543{
9544 int cnt = 1000;
9545
9546 if (link_up)
9547 while (bnx2x_link_test(bp) && cnt--)
9548 msleep(10);
9549}
9550
9551static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9552{
9553 unsigned int pkt_size, num_pkts, i;
9554 struct sk_buff *skb;
9555 unsigned char *packet;
9556 struct bnx2x_fastpath *fp = &bp->fp[0];
9557 u16 tx_start_idx, tx_idx;
9558 u16 rx_start_idx, rx_idx;
9559 u16 pkt_prod;
9560 struct sw_tx_bd *tx_buf;
9561 struct eth_tx_bd *tx_bd;
9562 dma_addr_t mapping;
9563 union eth_rx_cqe *cqe;
9564 u8 cqe_fp_flags;
9565 struct sw_rx_bd *rx_buf;
9566 u16 len;
9567 int rc = -ENODEV;
9568
b5bf9068
EG
9569 /* check the loopback mode */
9570 switch (loopback_mode) {
9571 case BNX2X_PHY_LOOPBACK:
9572 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9573 return -EINVAL;
9574 break;
9575 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9576 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9577 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9578 break;
9579 default:
f3c87cdd 9580 return -EINVAL;
b5bf9068 9581 }
f3c87cdd 9582
b5bf9068
EG
9583 /* prepare the loopback packet */
9584 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9585 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9586 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9587 if (!skb) {
9588 rc = -ENOMEM;
9589 goto test_loopback_exit;
9590 }
9591 packet = skb_put(skb, pkt_size);
9592 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9593 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9594 for (i = ETH_HLEN; i < pkt_size; i++)
9595 packet[i] = (unsigned char) (i & 0xff);
9596
b5bf9068 9597 /* send the loopback packet */
f3c87cdd
YG
9598 num_pkts = 0;
9599 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9600 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9601
9602 pkt_prod = fp->tx_pkt_prod++;
9603 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9604 tx_buf->first_bd = fp->tx_bd_prod;
9605 tx_buf->skb = skb;
9606
9607 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9608 mapping = pci_map_single(bp->pdev, skb->data,
9609 skb_headlen(skb), PCI_DMA_TODEVICE);
9610 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9611 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9612 tx_bd->nbd = cpu_to_le16(1);
9613 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9614 tx_bd->vlan = cpu_to_le16(pkt_prod);
9615 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9616 ETH_TX_BD_FLAGS_END_BD);
9617 tx_bd->general_data = ((UNICAST_ADDRESS <<
9618 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9619
58f4c4cf
EG
9620 wmb();
9621
4781bfad 9622 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9623 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9624 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9625 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9626
9627 mmiowb();
9628
9629 num_pkts++;
9630 fp->tx_bd_prod++;
9631 bp->dev->trans_start = jiffies;
9632
9633 udelay(100);
9634
9635 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9636 if (tx_idx != tx_start_idx + num_pkts)
9637 goto test_loopback_exit;
9638
9639 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9640 if (rx_idx != rx_start_idx + num_pkts)
9641 goto test_loopback_exit;
9642
9643 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9644 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9645 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9646 goto test_loopback_rx_exit;
9647
9648 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9649 if (len != pkt_size)
9650 goto test_loopback_rx_exit;
9651
9652 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9653 skb = rx_buf->skb;
9654 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9655 for (i = ETH_HLEN; i < pkt_size; i++)
9656 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9657 goto test_loopback_rx_exit;
9658
9659 rc = 0;
9660
9661test_loopback_rx_exit:
f3c87cdd
YG
9662
9663 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9664 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9665 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9666 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9667
9668 /* Update producers */
9669 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9670 fp->rx_sge_prod);
f3c87cdd
YG
9671
9672test_loopback_exit:
9673 bp->link_params.loopback_mode = LOOPBACK_NONE;
9674
9675 return rc;
9676}
9677
9678static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9679{
b5bf9068 9680 int rc = 0, res;
f3c87cdd
YG
9681
9682 if (!netif_running(bp->dev))
9683 return BNX2X_LOOPBACK_FAILED;
9684
f8ef6e44 9685 bnx2x_netif_stop(bp, 1);
3910c8ae 9686 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9687
b5bf9068
EG
9688 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9689 if (res) {
9690 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9691 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9692 }
9693
b5bf9068
EG
9694 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9695 if (res) {
9696 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9697 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9698 }
9699
3910c8ae 9700 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9701 bnx2x_netif_start(bp);
9702
9703 return rc;
9704}
9705
9706#define CRC32_RESIDUAL 0xdebb20e3
9707
9708static int bnx2x_test_nvram(struct bnx2x *bp)
9709{
9710 static const struct {
9711 int offset;
9712 int size;
9713 } nvram_tbl[] = {
9714 { 0, 0x14 }, /* bootstrap */
9715 { 0x14, 0xec }, /* dir */
9716 { 0x100, 0x350 }, /* manuf_info */
9717 { 0x450, 0xf0 }, /* feature_info */
9718 { 0x640, 0x64 }, /* upgrade_key_info */
9719 { 0x6a4, 0x64 },
9720 { 0x708, 0x70 }, /* manuf_key_info */
9721 { 0x778, 0x70 },
9722 { 0, 0 }
9723 };
4781bfad 9724 __be32 buf[0x350 / 4];
f3c87cdd
YG
9725 u8 *data = (u8 *)buf;
9726 int i, rc;
9727 u32 magic, csum;
9728
9729 rc = bnx2x_nvram_read(bp, 0, data, 4);
9730 if (rc) {
f5372251 9731 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9732 goto test_nvram_exit;
9733 }
9734
9735 magic = be32_to_cpu(buf[0]);
9736 if (magic != 0x669955aa) {
9737 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9738 rc = -ENODEV;
9739 goto test_nvram_exit;
9740 }
9741
9742 for (i = 0; nvram_tbl[i].size; i++) {
9743
9744 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9745 nvram_tbl[i].size);
9746 if (rc) {
9747 DP(NETIF_MSG_PROBE,
f5372251 9748 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9749 goto test_nvram_exit;
9750 }
9751
9752 csum = ether_crc_le(nvram_tbl[i].size, data);
9753 if (csum != CRC32_RESIDUAL) {
9754 DP(NETIF_MSG_PROBE,
9755 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9756 rc = -ENODEV;
9757 goto test_nvram_exit;
9758 }
9759 }
9760
9761test_nvram_exit:
9762 return rc;
9763}
9764
9765static int bnx2x_test_intr(struct bnx2x *bp)
9766{
9767 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9768 int i, rc;
9769
9770 if (!netif_running(bp->dev))
9771 return -ENODEV;
9772
8d9c5f34 9773 config->hdr.length = 0;
af246401
EG
9774 if (CHIP_IS_E1(bp))
9775 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9776 else
9777 config->hdr.offset = BP_FUNC(bp);
0626b899 9778 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9779 config->hdr.reserved1 = 0;
9780
9781 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9782 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9783 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9784 if (rc == 0) {
9785 bp->set_mac_pending++;
9786 for (i = 0; i < 10; i++) {
9787 if (!bp->set_mac_pending)
9788 break;
9789 msleep_interruptible(10);
9790 }
9791 if (i == 10)
9792 rc = -ENODEV;
9793 }
9794
9795 return rc;
9796}
9797
a2fbb9ea
ET
9798static void bnx2x_self_test(struct net_device *dev,
9799 struct ethtool_test *etest, u64 *buf)
9800{
9801 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9802
9803 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9804
f3c87cdd 9805 if (!netif_running(dev))
a2fbb9ea 9806 return;
a2fbb9ea 9807
33471629 9808 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9809 if (IS_E1HMF(bp))
9810 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9811
9812 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
9813 int port = BP_PORT(bp);
9814 u32 val;
f3c87cdd
YG
9815 u8 link_up;
9816
279abdf5
EG
9817 /* save current value of input enable for TX port IF */
9818 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9819 /* disable input for TX port IF */
9820 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9821
f3c87cdd
YG
9822 link_up = bp->link_vars.link_up;
9823 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9824 bnx2x_nic_load(bp, LOAD_DIAG);
9825 /* wait until link state is restored */
9826 bnx2x_wait_for_link(bp, link_up);
9827
9828 if (bnx2x_test_registers(bp) != 0) {
9829 buf[0] = 1;
9830 etest->flags |= ETH_TEST_FL_FAILED;
9831 }
9832 if (bnx2x_test_memory(bp) != 0) {
9833 buf[1] = 1;
9834 etest->flags |= ETH_TEST_FL_FAILED;
9835 }
9836 buf[2] = bnx2x_test_loopback(bp, link_up);
9837 if (buf[2] != 0)
9838 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9839
f3c87cdd 9840 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
9841
9842 /* restore input for TX port IF */
9843 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9844
f3c87cdd
YG
9845 bnx2x_nic_load(bp, LOAD_NORMAL);
9846 /* wait until link state is restored */
9847 bnx2x_wait_for_link(bp, link_up);
9848 }
9849 if (bnx2x_test_nvram(bp) != 0) {
9850 buf[3] = 1;
a2fbb9ea
ET
9851 etest->flags |= ETH_TEST_FL_FAILED;
9852 }
f3c87cdd
YG
9853 if (bnx2x_test_intr(bp) != 0) {
9854 buf[4] = 1;
9855 etest->flags |= ETH_TEST_FL_FAILED;
9856 }
9857 if (bp->port.pmf)
9858 if (bnx2x_link_test(bp) != 0) {
9859 buf[5] = 1;
9860 etest->flags |= ETH_TEST_FL_FAILED;
9861 }
f3c87cdd
YG
9862
9863#ifdef BNX2X_EXTRA_DEBUG
9864 bnx2x_panic_dump(bp);
9865#endif
a2fbb9ea
ET
9866}
9867
de832a55
EG
9868static const struct {
9869 long offset;
9870 int size;
9871 u8 string[ETH_GSTRING_LEN];
9872} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9873/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9874 { Q_STATS_OFFSET32(error_bytes_received_hi),
9875 8, "[%d]: rx_error_bytes" },
9876 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9877 8, "[%d]: rx_ucast_packets" },
9878 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9879 8, "[%d]: rx_mcast_packets" },
9880 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9881 8, "[%d]: rx_bcast_packets" },
9882 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9883 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9884 4, "[%d]: rx_phy_ip_err_discards"},
9885 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9886 4, "[%d]: rx_skb_alloc_discard" },
9887 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9888
9889/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9890 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9891 8, "[%d]: tx_packets" }
9892};
9893
bb2a0f7a
YG
9894static const struct {
9895 long offset;
9896 int size;
9897 u32 flags;
66e855f3
YG
9898#define STATS_FLAGS_PORT 1
9899#define STATS_FLAGS_FUNC 2
de832a55 9900#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9901 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9902} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9903/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9904 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9905 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9906 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9907 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9908 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9909 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9910 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9911 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9912 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9913 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9914 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9915 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9916 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9917 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9918 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9919 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9920 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9921/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9922 8, STATS_FLAGS_PORT, "rx_fragments" },
9923 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9924 8, STATS_FLAGS_PORT, "rx_jabbers" },
9925 { STATS_OFFSET32(no_buff_discard_hi),
9926 8, STATS_FLAGS_BOTH, "rx_discards" },
9927 { STATS_OFFSET32(mac_filter_discard),
9928 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9929 { STATS_OFFSET32(xxoverflow_discard),
9930 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9931 { STATS_OFFSET32(brb_drop_hi),
9932 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9933 { STATS_OFFSET32(brb_truncate_hi),
9934 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9935 { STATS_OFFSET32(pause_frames_received_hi),
9936 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9937 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9938 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9939 { STATS_OFFSET32(nig_timer_max),
9940 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9941/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9942 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9943 { STATS_OFFSET32(rx_skb_alloc_failed),
9944 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9945 { STATS_OFFSET32(hw_csum_err),
9946 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9947
9948 { STATS_OFFSET32(total_bytes_transmitted_hi),
9949 8, STATS_FLAGS_BOTH, "tx_bytes" },
9950 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9951 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9952 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9953 8, STATS_FLAGS_BOTH, "tx_packets" },
9954 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9955 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9956 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9957 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9958 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9959 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9960 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9961 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9962/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9963 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9964 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9965 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9966 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9967 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9968 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9969 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9970 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9971 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9972 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9973 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9974 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9975 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9976 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9977 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9978 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9979 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9980 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9981 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9982/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9983 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9984 { STATS_OFFSET32(pause_frames_sent_hi),
9985 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9986};
9987
de832a55
EG
9988#define IS_PORT_STAT(i) \
9989 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9990#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9991#define IS_E1HMF_MODE_STAT(bp) \
9992 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9993
a2fbb9ea
ET
9994static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9995{
bb2a0f7a 9996 struct bnx2x *bp = netdev_priv(dev);
de832a55 9997 int i, j, k;
bb2a0f7a 9998
a2fbb9ea
ET
9999 switch (stringset) {
10000 case ETH_SS_STATS:
de832a55
EG
10001 if (is_multi(bp)) {
10002 k = 0;
10003 for_each_queue(bp, i) {
10004 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10005 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10006 bnx2x_q_stats_arr[j].string, i);
10007 k += BNX2X_NUM_Q_STATS;
10008 }
10009 if (IS_E1HMF_MODE_STAT(bp))
10010 break;
10011 for (j = 0; j < BNX2X_NUM_STATS; j++)
10012 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10013 bnx2x_stats_arr[j].string);
10014 } else {
10015 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10016 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10017 continue;
10018 strcpy(buf + j*ETH_GSTRING_LEN,
10019 bnx2x_stats_arr[i].string);
10020 j++;
10021 }
bb2a0f7a 10022 }
a2fbb9ea
ET
10023 break;
10024
10025 case ETH_SS_TEST:
10026 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10027 break;
10028 }
10029}
10030
10031static int bnx2x_get_stats_count(struct net_device *dev)
10032{
bb2a0f7a 10033 struct bnx2x *bp = netdev_priv(dev);
de832a55 10034 int i, num_stats;
bb2a0f7a 10035
de832a55
EG
10036 if (is_multi(bp)) {
10037 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
10038 if (!IS_E1HMF_MODE_STAT(bp))
10039 num_stats += BNX2X_NUM_STATS;
10040 } else {
10041 if (IS_E1HMF_MODE_STAT(bp)) {
10042 num_stats = 0;
10043 for (i = 0; i < BNX2X_NUM_STATS; i++)
10044 if (IS_FUNC_STAT(i))
10045 num_stats++;
10046 } else
10047 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10048 }
de832a55 10049
bb2a0f7a 10050 return num_stats;
a2fbb9ea
ET
10051}
10052
10053static void bnx2x_get_ethtool_stats(struct net_device *dev,
10054 struct ethtool_stats *stats, u64 *buf)
10055{
10056 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10057 u32 *hw_stats, *offset;
10058 int i, j, k;
bb2a0f7a 10059
de832a55
EG
10060 if (is_multi(bp)) {
10061 k = 0;
10062 for_each_queue(bp, i) {
10063 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10064 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10065 if (bnx2x_q_stats_arr[j].size == 0) {
10066 /* skip this counter */
10067 buf[k + j] = 0;
10068 continue;
10069 }
10070 offset = (hw_stats +
10071 bnx2x_q_stats_arr[j].offset);
10072 if (bnx2x_q_stats_arr[j].size == 4) {
10073 /* 4-byte counter */
10074 buf[k + j] = (u64) *offset;
10075 continue;
10076 }
10077 /* 8-byte counter */
10078 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10079 }
10080 k += BNX2X_NUM_Q_STATS;
10081 }
10082 if (IS_E1HMF_MODE_STAT(bp))
10083 return;
10084 hw_stats = (u32 *)&bp->eth_stats;
10085 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10086 if (bnx2x_stats_arr[j].size == 0) {
10087 /* skip this counter */
10088 buf[k + j] = 0;
10089 continue;
10090 }
10091 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10092 if (bnx2x_stats_arr[j].size == 4) {
10093 /* 4-byte counter */
10094 buf[k + j] = (u64) *offset;
10095 continue;
10096 }
10097 /* 8-byte counter */
10098 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10099 }
de832a55
EG
10100 } else {
10101 hw_stats = (u32 *)&bp->eth_stats;
10102 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10103 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10104 continue;
10105 if (bnx2x_stats_arr[i].size == 0) {
10106 /* skip this counter */
10107 buf[j] = 0;
10108 j++;
10109 continue;
10110 }
10111 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10112 if (bnx2x_stats_arr[i].size == 4) {
10113 /* 4-byte counter */
10114 buf[j] = (u64) *offset;
10115 j++;
10116 continue;
10117 }
10118 /* 8-byte counter */
10119 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10120 j++;
a2fbb9ea 10121 }
a2fbb9ea
ET
10122 }
10123}
10124
10125static int bnx2x_phys_id(struct net_device *dev, u32 data)
10126{
10127 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10128 int port = BP_PORT(bp);
a2fbb9ea
ET
10129 int i;
10130
34f80b04
EG
10131 if (!netif_running(dev))
10132 return 0;
10133
10134 if (!bp->port.pmf)
10135 return 0;
10136
a2fbb9ea
ET
10137 if (data == 0)
10138 data = 2;
10139
10140 for (i = 0; i < (data * 2); i++) {
c18487ee 10141 if ((i % 2) == 0)
34f80b04 10142 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10143 bp->link_params.hw_led_mode,
10144 bp->link_params.chip_id);
10145 else
34f80b04 10146 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10147 bp->link_params.hw_led_mode,
10148 bp->link_params.chip_id);
10149
a2fbb9ea
ET
10150 msleep_interruptible(500);
10151 if (signal_pending(current))
10152 break;
10153 }
10154
c18487ee 10155 if (bp->link_vars.link_up)
34f80b04 10156 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10157 bp->link_vars.line_speed,
10158 bp->link_params.hw_led_mode,
10159 bp->link_params.chip_id);
a2fbb9ea
ET
10160
10161 return 0;
10162}
10163
10164static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10165 .get_settings = bnx2x_get_settings,
10166 .set_settings = bnx2x_set_settings,
10167 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10168 .get_regs_len = bnx2x_get_regs_len,
10169 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10170 .get_wol = bnx2x_get_wol,
10171 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10172 .get_msglevel = bnx2x_get_msglevel,
10173 .set_msglevel = bnx2x_set_msglevel,
10174 .nway_reset = bnx2x_nway_reset,
01e53298 10175 .get_link = bnx2x_get_link,
7a9b2557
VZ
10176 .get_eeprom_len = bnx2x_get_eeprom_len,
10177 .get_eeprom = bnx2x_get_eeprom,
10178 .set_eeprom = bnx2x_set_eeprom,
10179 .get_coalesce = bnx2x_get_coalesce,
10180 .set_coalesce = bnx2x_set_coalesce,
10181 .get_ringparam = bnx2x_get_ringparam,
10182 .set_ringparam = bnx2x_set_ringparam,
10183 .get_pauseparam = bnx2x_get_pauseparam,
10184 .set_pauseparam = bnx2x_set_pauseparam,
10185 .get_rx_csum = bnx2x_get_rx_csum,
10186 .set_rx_csum = bnx2x_set_rx_csum,
10187 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10188 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10189 .set_flags = bnx2x_set_flags,
10190 .get_flags = ethtool_op_get_flags,
10191 .get_sg = ethtool_op_get_sg,
10192 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10193 .get_tso = ethtool_op_get_tso,
10194 .set_tso = bnx2x_set_tso,
10195 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10196 .self_test = bnx2x_self_test,
10197 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10198 .phys_id = bnx2x_phys_id,
10199 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10200 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10201};
10202
10203/* end of ethtool_ops */
10204
10205/****************************************************************************
10206* General service functions
10207****************************************************************************/
10208
10209static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10210{
10211 u16 pmcsr;
10212
10213 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10214
10215 switch (state) {
10216 case PCI_D0:
34f80b04 10217 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10218 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10219 PCI_PM_CTRL_PME_STATUS));
10220
10221 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10222 /* delay required during transition out of D3hot */
a2fbb9ea 10223 msleep(20);
34f80b04 10224 break;
a2fbb9ea 10225
34f80b04
EG
10226 case PCI_D3hot:
10227 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10228 pmcsr |= 3;
a2fbb9ea 10229
34f80b04
EG
10230 if (bp->wol)
10231 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10232
34f80b04
EG
10233 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10234 pmcsr);
a2fbb9ea 10235
34f80b04
EG
10236 /* No more memory access after this point until
10237 * device is brought back to D0.
10238 */
10239 break;
10240
10241 default:
10242 return -EINVAL;
10243 }
10244 return 0;
a2fbb9ea
ET
10245}
10246
237907c1
EG
10247static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10248{
10249 u16 rx_cons_sb;
10250
10251 /* Tell compiler that status block fields can change */
10252 barrier();
10253 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10254 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10255 rx_cons_sb++;
10256 return (fp->rx_comp_cons != rx_cons_sb);
10257}
10258
34f80b04
EG
10259/*
10260 * net_device service functions
10261 */
10262
a2fbb9ea
ET
10263static int bnx2x_poll(struct napi_struct *napi, int budget)
10264{
10265 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10266 napi);
10267 struct bnx2x *bp = fp->bp;
10268 int work_done = 0;
10269
10270#ifdef BNX2X_STOP_ON_ERROR
10271 if (unlikely(bp->panic))
34f80b04 10272 goto poll_panic;
a2fbb9ea
ET
10273#endif
10274
10275 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10276 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10277 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10278
10279 bnx2x_update_fpsb_idx(fp);
10280
237907c1 10281 if (bnx2x_has_tx_work(fp))
7961f791 10282 bnx2x_tx_int(fp);
a2fbb9ea 10283
8534f32c 10284 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10285 work_done = bnx2x_rx_int(fp, budget);
356e2385 10286
8534f32c
EG
10287 /* must not complete if we consumed full budget */
10288 if (work_done >= budget)
10289 goto poll_again;
10290 }
a2fbb9ea 10291
8534f32c
EG
10292 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10293 * ensure that status block indices have been actually read
10294 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10295 * so that we won't write the "newer" value of the status block to IGU
10296 * (if there was a DMA right after BNX2X_HAS_WORK and
10297 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10298 * may be postponed to right before bnx2x_ack_sb). In this case
10299 * there will never be another interrupt until there is another update
10300 * of the status block, while there is still unhandled work.
10301 */
10302 rmb();
a2fbb9ea 10303
8534f32c 10304 if (!BNX2X_HAS_WORK(fp)) {
a2fbb9ea 10305#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10306poll_panic:
a2fbb9ea 10307#endif
288379f0 10308 napi_complete(napi);
a2fbb9ea 10309
0626b899 10310 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10311 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10312 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10313 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10314 }
356e2385 10315
8534f32c 10316poll_again:
a2fbb9ea
ET
10317 return work_done;
10318}
10319
755735eb
EG
10320
10321/* we split the first BD into headers and data BDs
33471629 10322 * to ease the pain of our fellow microcode engineers
755735eb
EG
10323 * we use one mapping for both BDs
10324 * So far this has only been observed to happen
10325 * in Other Operating Systems(TM)
10326 */
10327static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10328 struct bnx2x_fastpath *fp,
10329 struct eth_tx_bd **tx_bd, u16 hlen,
10330 u16 bd_prod, int nbd)
10331{
10332 struct eth_tx_bd *h_tx_bd = *tx_bd;
10333 struct eth_tx_bd *d_tx_bd;
10334 dma_addr_t mapping;
10335 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10336
10337 /* first fix first BD */
10338 h_tx_bd->nbd = cpu_to_le16(nbd);
10339 h_tx_bd->nbytes = cpu_to_le16(hlen);
10340
10341 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10342 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10343 h_tx_bd->addr_lo, h_tx_bd->nbd);
10344
10345 /* now get a new data BD
10346 * (after the pbd) and fill it */
10347 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10348 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10349
10350 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10351 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10352
10353 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10354 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10355 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10356 d_tx_bd->vlan = 0;
10357 /* this marks the BD as one that has no individual mapping
10358 * the FW ignores this flag in a BD not marked start
10359 */
10360 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10361 DP(NETIF_MSG_TX_QUEUED,
10362 "TSO split data size is %d (%x:%x)\n",
10363 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10364
10365 /* update tx_bd for marking the last BD flag */
10366 *tx_bd = d_tx_bd;
10367
10368 return bd_prod;
10369}
10370
10371static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10372{
10373 if (fix > 0)
10374 csum = (u16) ~csum_fold(csum_sub(csum,
10375 csum_partial(t_header - fix, fix, 0)));
10376
10377 else if (fix < 0)
10378 csum = (u16) ~csum_fold(csum_add(csum,
10379 csum_partial(t_header, -fix, 0)));
10380
10381 return swab16(csum);
10382}
10383
10384static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10385{
10386 u32 rc;
10387
10388 if (skb->ip_summed != CHECKSUM_PARTIAL)
10389 rc = XMIT_PLAIN;
10390
10391 else {
4781bfad 10392 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10393 rc = XMIT_CSUM_V6;
10394 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10395 rc |= XMIT_CSUM_TCP;
10396
10397 } else {
10398 rc = XMIT_CSUM_V4;
10399 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10400 rc |= XMIT_CSUM_TCP;
10401 }
10402 }
10403
10404 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10405 rc |= XMIT_GSO_V4;
10406
10407 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10408 rc |= XMIT_GSO_V6;
10409
10410 return rc;
10411}
10412
632da4d6 10413#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10414/* check if packet requires linearization (packet is too fragmented)
10415 no need to check fragmentation if page size > 8K (there will be no
10416 violation to FW restrictions) */
755735eb
EG
10417static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10418 u32 xmit_type)
10419{
10420 int to_copy = 0;
10421 int hlen = 0;
10422 int first_bd_sz = 0;
10423
10424 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10425 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10426
10427 if (xmit_type & XMIT_GSO) {
10428 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10429 /* Check if LSO packet needs to be copied:
10430 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10431 int wnd_size = MAX_FETCH_BD - 3;
33471629 10432 /* Number of windows to check */
755735eb
EG
10433 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10434 int wnd_idx = 0;
10435 int frag_idx = 0;
10436 u32 wnd_sum = 0;
10437
10438 /* Headers length */
10439 hlen = (int)(skb_transport_header(skb) - skb->data) +
10440 tcp_hdrlen(skb);
10441
10442 /* Amount of data (w/o headers) on linear part of SKB*/
10443 first_bd_sz = skb_headlen(skb) - hlen;
10444
10445 wnd_sum = first_bd_sz;
10446
10447 /* Calculate the first sum - it's special */
10448 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10449 wnd_sum +=
10450 skb_shinfo(skb)->frags[frag_idx].size;
10451
10452 /* If there was data on linear skb data - check it */
10453 if (first_bd_sz > 0) {
10454 if (unlikely(wnd_sum < lso_mss)) {
10455 to_copy = 1;
10456 goto exit_lbl;
10457 }
10458
10459 wnd_sum -= first_bd_sz;
10460 }
10461
10462 /* Others are easier: run through the frag list and
10463 check all windows */
10464 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10465 wnd_sum +=
10466 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10467
10468 if (unlikely(wnd_sum < lso_mss)) {
10469 to_copy = 1;
10470 break;
10471 }
10472 wnd_sum -=
10473 skb_shinfo(skb)->frags[wnd_idx].size;
10474 }
755735eb
EG
10475 } else {
10476 /* in non-LSO too fragmented packet should always
10477 be linearized */
10478 to_copy = 1;
10479 }
10480 }
10481
10482exit_lbl:
10483 if (unlikely(to_copy))
10484 DP(NETIF_MSG_TX_QUEUED,
10485 "Linearization IS REQUIRED for %s packet. "
10486 "num_frags %d hlen %d first_bd_sz %d\n",
10487 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10488 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10489
10490 return to_copy;
10491}
632da4d6 10492#endif
755735eb
EG
10493
10494/* called with netif_tx_lock
a2fbb9ea 10495 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10496 * netif_wake_queue()
a2fbb9ea
ET
10497 */
10498static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10499{
10500 struct bnx2x *bp = netdev_priv(dev);
10501 struct bnx2x_fastpath *fp;
555f6c78 10502 struct netdev_queue *txq;
a2fbb9ea
ET
10503 struct sw_tx_bd *tx_buf;
10504 struct eth_tx_bd *tx_bd;
10505 struct eth_tx_parse_bd *pbd = NULL;
10506 u16 pkt_prod, bd_prod;
755735eb 10507 int nbd, fp_index;
a2fbb9ea 10508 dma_addr_t mapping;
755735eb
EG
10509 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10510 int vlan_off = (bp->e1hov ? 4 : 0);
10511 int i;
10512 u8 hlen = 0;
a2fbb9ea
ET
10513
10514#ifdef BNX2X_STOP_ON_ERROR
10515 if (unlikely(bp->panic))
10516 return NETDEV_TX_BUSY;
10517#endif
10518
555f6c78
EG
10519 fp_index = skb_get_queue_mapping(skb);
10520 txq = netdev_get_tx_queue(dev, fp_index);
10521
a2fbb9ea 10522 fp = &bp->fp[fp_index];
755735eb 10523
231fd58a 10524 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10525 fp->eth_q_stats.driver_xoff++,
555f6c78 10526 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10527 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10528 return NETDEV_TX_BUSY;
10529 }
10530
755735eb
EG
10531 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10532 " gso type %x xmit_type %x\n",
10533 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10534 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10535
632da4d6 10536#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10537 /* First, check if we need to linearize the skb (due to FW
10538 restrictions). No need to check fragmentation if page size > 8K
10539 (there will be no violation to FW restrictions) */
755735eb
EG
10540 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10541 /* Statistics of linearization */
10542 bp->lin_cnt++;
10543 if (skb_linearize(skb) != 0) {
10544 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10545 "silently dropping this SKB\n");
10546 dev_kfree_skb_any(skb);
da5a662a 10547 return NETDEV_TX_OK;
755735eb
EG
10548 }
10549 }
632da4d6 10550#endif
755735eb 10551
a2fbb9ea 10552 /*
755735eb 10553 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10554 then for TSO or xsum we have a parsing info BD,
755735eb 10555 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10556 (don't forget to mark the last one as last,
10557 and to unmap only AFTER you write to the BD ...)
755735eb 10558 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10559 */
10560
10561 pkt_prod = fp->tx_pkt_prod++;
755735eb 10562 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10563
755735eb 10564 /* get a tx_buf and first BD */
a2fbb9ea
ET
10565 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10566 tx_bd = &fp->tx_desc_ring[bd_prod];
10567
10568 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10569 tx_bd->general_data = (UNICAST_ADDRESS <<
10570 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10571 /* header nbd */
10572 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10573
755735eb
EG
10574 /* remember the first BD of the packet */
10575 tx_buf->first_bd = fp->tx_bd_prod;
10576 tx_buf->skb = skb;
a2fbb9ea
ET
10577
10578 DP(NETIF_MSG_TX_QUEUED,
10579 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10580 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10581
0c6671b0
EG
10582#ifdef BCM_VLAN
10583 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10584 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10585 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10586 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10587 vlan_off += 4;
10588 } else
0c6671b0 10589#endif
755735eb 10590 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10591
755735eb 10592 if (xmit_type) {
755735eb 10593 /* turn on parsing and get a BD */
a2fbb9ea
ET
10594 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10595 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10596
10597 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10598 }
10599
10600 if (xmit_type & XMIT_CSUM) {
10601 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10602
10603 /* for now NS flag is not used in Linux */
4781bfad
EG
10604 pbd->global_data =
10605 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10606 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10607
755735eb
EG
10608 pbd->ip_hlen = (skb_transport_header(skb) -
10609 skb_network_header(skb)) / 2;
10610
10611 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10612
755735eb
EG
10613 pbd->total_hlen = cpu_to_le16(hlen);
10614 hlen = hlen*2 - vlan_off;
a2fbb9ea 10615
755735eb
EG
10616 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10617
10618 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10619 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10620 ETH_TX_BD_FLAGS_IP_CSUM;
10621 else
10622 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10623
10624 if (xmit_type & XMIT_CSUM_TCP) {
10625 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10626
10627 } else {
10628 s8 fix = SKB_CS_OFF(skb); /* signed! */
10629
a2fbb9ea 10630 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10631 pbd->cs_offset = fix / 2;
a2fbb9ea 10632
755735eb
EG
10633 DP(NETIF_MSG_TX_QUEUED,
10634 "hlen %d offset %d fix %d csum before fix %x\n",
10635 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10636 SKB_CS(skb));
10637
10638 /* HW bug: fixup the CSUM */
10639 pbd->tcp_pseudo_csum =
10640 bnx2x_csum_fix(skb_transport_header(skb),
10641 SKB_CS(skb), fix);
10642
10643 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10644 pbd->tcp_pseudo_csum);
10645 }
a2fbb9ea
ET
10646 }
10647
10648 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10649 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10650
10651 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10652 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10653 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10654 tx_bd->nbd = cpu_to_le16(nbd);
10655 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10656
10657 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10658 " nbytes %d flags %x vlan %x\n",
10659 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10660 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10661 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10662
755735eb 10663 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10664
10665 DP(NETIF_MSG_TX_QUEUED,
10666 "TSO packet len %d hlen %d total len %d tso size %d\n",
10667 skb->len, hlen, skb_headlen(skb),
10668 skb_shinfo(skb)->gso_size);
10669
10670 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10671
755735eb
EG
10672 if (unlikely(skb_headlen(skb) > hlen))
10673 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10674 bd_prod, ++nbd);
a2fbb9ea
ET
10675
10676 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10677 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10678 pbd->tcp_flags = pbd_tcp_flags(skb);
10679
10680 if (xmit_type & XMIT_GSO_V4) {
10681 pbd->ip_id = swab16(ip_hdr(skb)->id);
10682 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10683 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10684 ip_hdr(skb)->daddr,
10685 0, IPPROTO_TCP, 0));
755735eb
EG
10686
10687 } else
10688 pbd->tcp_pseudo_csum =
10689 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10690 &ipv6_hdr(skb)->daddr,
10691 0, IPPROTO_TCP, 0));
10692
a2fbb9ea
ET
10693 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10694 }
10695
755735eb
EG
10696 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10697 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10698
755735eb
EG
10699 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10700 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10701
755735eb
EG
10702 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10703 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10704
755735eb
EG
10705 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10706 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10707 tx_bd->nbytes = cpu_to_le16(frag->size);
10708 tx_bd->vlan = cpu_to_le16(pkt_prod);
10709 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10710
755735eb
EG
10711 DP(NETIF_MSG_TX_QUEUED,
10712 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10713 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10714 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10715 }
10716
755735eb 10717 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10718 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10719
10720 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10721 tx_bd, tx_bd->bd_flags.as_bitfield);
10722
a2fbb9ea
ET
10723 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10724
755735eb 10725 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10726 * if the packet contains or ends with it
10727 */
10728 if (TX_BD_POFF(bd_prod) < nbd)
10729 nbd++;
10730
10731 if (pbd)
10732 DP(NETIF_MSG_TX_QUEUED,
10733 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10734 " tcp_flags %x xsum %x seq %u hlen %u\n",
10735 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10736 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10737 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10738
755735eb 10739 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10740
58f4c4cf
EG
10741 /*
10742 * Make sure that the BD data is updated before updating the producer
10743 * since FW might read the BD right after the producer is updated.
10744 * This is only applicable for weak-ordered memory model archs such
10745 * as IA-64. The following barrier is also mandatory since FW will
10746 * assumes packets must have BDs.
10747 */
10748 wmb();
10749
4781bfad 10750 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10751 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10752 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10753 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10754
10755 mmiowb();
10756
755735eb 10757 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10758
10759 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10760 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10761 if we put Tx into XOFF state. */
10762 smp_mb();
555f6c78 10763 netif_tx_stop_queue(txq);
de832a55 10764 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10765 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10766 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10767 }
10768 fp->tx_pkt++;
10769
10770 return NETDEV_TX_OK;
10771}
10772
bb2a0f7a 10773/* called with rtnl_lock */
a2fbb9ea
ET
10774static int bnx2x_open(struct net_device *dev)
10775{
10776 struct bnx2x *bp = netdev_priv(dev);
10777
6eccabb3
EG
10778 netif_carrier_off(dev);
10779
a2fbb9ea
ET
10780 bnx2x_set_power_state(bp, PCI_D0);
10781
bb2a0f7a 10782 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10783}
10784
bb2a0f7a 10785/* called with rtnl_lock */
a2fbb9ea
ET
10786static int bnx2x_close(struct net_device *dev)
10787{
a2fbb9ea
ET
10788 struct bnx2x *bp = netdev_priv(dev);
10789
10790 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10791 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10792 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10793 if (!CHIP_REV_IS_SLOW(bp))
10794 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10795
10796 return 0;
10797}
10798
f5372251 10799/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10800static void bnx2x_set_rx_mode(struct net_device *dev)
10801{
10802 struct bnx2x *bp = netdev_priv(dev);
10803 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10804 int port = BP_PORT(bp);
10805
10806 if (bp->state != BNX2X_STATE_OPEN) {
10807 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10808 return;
10809 }
10810
10811 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10812
10813 if (dev->flags & IFF_PROMISC)
10814 rx_mode = BNX2X_RX_MODE_PROMISC;
10815
10816 else if ((dev->flags & IFF_ALLMULTI) ||
10817 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10818 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10819
10820 else { /* some multicasts */
10821 if (CHIP_IS_E1(bp)) {
10822 int i, old, offset;
10823 struct dev_mc_list *mclist;
10824 struct mac_configuration_cmd *config =
10825 bnx2x_sp(bp, mcast_config);
10826
10827 for (i = 0, mclist = dev->mc_list;
10828 mclist && (i < dev->mc_count);
10829 i++, mclist = mclist->next) {
10830
10831 config->config_table[i].
10832 cam_entry.msb_mac_addr =
10833 swab16(*(u16 *)&mclist->dmi_addr[0]);
10834 config->config_table[i].
10835 cam_entry.middle_mac_addr =
10836 swab16(*(u16 *)&mclist->dmi_addr[2]);
10837 config->config_table[i].
10838 cam_entry.lsb_mac_addr =
10839 swab16(*(u16 *)&mclist->dmi_addr[4]);
10840 config->config_table[i].cam_entry.flags =
10841 cpu_to_le16(port);
10842 config->config_table[i].
10843 target_table_entry.flags = 0;
10844 config->config_table[i].
10845 target_table_entry.client_id = 0;
10846 config->config_table[i].
10847 target_table_entry.vlan_id = 0;
10848
10849 DP(NETIF_MSG_IFUP,
10850 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10851 config->config_table[i].
10852 cam_entry.msb_mac_addr,
10853 config->config_table[i].
10854 cam_entry.middle_mac_addr,
10855 config->config_table[i].
10856 cam_entry.lsb_mac_addr);
10857 }
8d9c5f34 10858 old = config->hdr.length;
34f80b04
EG
10859 if (old > i) {
10860 for (; i < old; i++) {
10861 if (CAM_IS_INVALID(config->
10862 config_table[i])) {
af246401 10863 /* already invalidated */
34f80b04
EG
10864 break;
10865 }
10866 /* invalidate */
10867 CAM_INVALIDATE(config->
10868 config_table[i]);
10869 }
10870 }
10871
10872 if (CHIP_REV_IS_SLOW(bp))
10873 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10874 else
10875 offset = BNX2X_MAX_MULTICAST*(1 + port);
10876
8d9c5f34 10877 config->hdr.length = i;
34f80b04 10878 config->hdr.offset = offset;
8d9c5f34 10879 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10880 config->hdr.reserved1 = 0;
10881
10882 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10883 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10884 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10885 0);
10886 } else { /* E1H */
10887 /* Accept one or more multicasts */
10888 struct dev_mc_list *mclist;
10889 u32 mc_filter[MC_HASH_SIZE];
10890 u32 crc, bit, regidx;
10891 int i;
10892
10893 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10894
10895 for (i = 0, mclist = dev->mc_list;
10896 mclist && (i < dev->mc_count);
10897 i++, mclist = mclist->next) {
10898
7c510e4b
JB
10899 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10900 mclist->dmi_addr);
34f80b04
EG
10901
10902 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10903 bit = (crc >> 24) & 0xff;
10904 regidx = bit >> 5;
10905 bit &= 0x1f;
10906 mc_filter[regidx] |= (1 << bit);
10907 }
10908
10909 for (i = 0; i < MC_HASH_SIZE; i++)
10910 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10911 mc_filter[i]);
10912 }
10913 }
10914
10915 bp->rx_mode = rx_mode;
10916 bnx2x_set_storm_rx_mode(bp);
10917}
10918
10919/* called with rtnl_lock */
a2fbb9ea
ET
10920static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10921{
10922 struct sockaddr *addr = p;
10923 struct bnx2x *bp = netdev_priv(dev);
10924
34f80b04 10925 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10926 return -EINVAL;
10927
10928 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10929 if (netif_running(dev)) {
10930 if (CHIP_IS_E1(bp))
3101c2bc 10931 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10932 else
3101c2bc 10933 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10934 }
a2fbb9ea
ET
10935
10936 return 0;
10937}
10938
c18487ee 10939/* called with rtnl_lock */
a2fbb9ea
ET
10940static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10941{
10942 struct mii_ioctl_data *data = if_mii(ifr);
10943 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10944 int port = BP_PORT(bp);
a2fbb9ea
ET
10945 int err;
10946
10947 switch (cmd) {
10948 case SIOCGMIIPHY:
34f80b04 10949 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10950
c14423fe 10951 /* fallthrough */
c18487ee 10952
a2fbb9ea 10953 case SIOCGMIIREG: {
c18487ee 10954 u16 mii_regval;
a2fbb9ea 10955
c18487ee
YR
10956 if (!netif_running(dev))
10957 return -EAGAIN;
a2fbb9ea 10958
34f80b04 10959 mutex_lock(&bp->port.phy_mutex);
3196a88a 10960 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10961 DEFAULT_PHY_DEV_ADDR,
10962 (data->reg_num & 0x1f), &mii_regval);
10963 data->val_out = mii_regval;
34f80b04 10964 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10965 return err;
10966 }
10967
10968 case SIOCSMIIREG:
10969 if (!capable(CAP_NET_ADMIN))
10970 return -EPERM;
10971
c18487ee
YR
10972 if (!netif_running(dev))
10973 return -EAGAIN;
10974
34f80b04 10975 mutex_lock(&bp->port.phy_mutex);
3196a88a 10976 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10977 DEFAULT_PHY_DEV_ADDR,
10978 (data->reg_num & 0x1f), data->val_in);
34f80b04 10979 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10980 return err;
10981
10982 default:
10983 /* do nothing */
10984 break;
10985 }
10986
10987 return -EOPNOTSUPP;
10988}
10989
34f80b04 10990/* called with rtnl_lock */
a2fbb9ea
ET
10991static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10992{
10993 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10994 int rc = 0;
a2fbb9ea
ET
10995
10996 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10997 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10998 return -EINVAL;
10999
11000 /* This does not race with packet allocation
c14423fe 11001 * because the actual alloc size is
a2fbb9ea
ET
11002 * only updated as part of load
11003 */
11004 dev->mtu = new_mtu;
11005
11006 if (netif_running(dev)) {
34f80b04
EG
11007 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11008 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11009 }
34f80b04
EG
11010
11011 return rc;
a2fbb9ea
ET
11012}
11013
11014static void bnx2x_tx_timeout(struct net_device *dev)
11015{
11016 struct bnx2x *bp = netdev_priv(dev);
11017
11018#ifdef BNX2X_STOP_ON_ERROR
11019 if (!bp->panic)
11020 bnx2x_panic();
11021#endif
11022 /* This allows the netif to be shutdown gracefully before resetting */
11023 schedule_work(&bp->reset_task);
11024}
11025
11026#ifdef BCM_VLAN
34f80b04 11027/* called with rtnl_lock */
a2fbb9ea
ET
11028static void bnx2x_vlan_rx_register(struct net_device *dev,
11029 struct vlan_group *vlgrp)
11030{
11031 struct bnx2x *bp = netdev_priv(dev);
11032
11033 bp->vlgrp = vlgrp;
0c6671b0
EG
11034
11035 /* Set flags according to the required capabilities */
11036 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11037
11038 if (dev->features & NETIF_F_HW_VLAN_TX)
11039 bp->flags |= HW_VLAN_TX_FLAG;
11040
11041 if (dev->features & NETIF_F_HW_VLAN_RX)
11042 bp->flags |= HW_VLAN_RX_FLAG;
11043
a2fbb9ea 11044 if (netif_running(dev))
49d66772 11045 bnx2x_set_client_config(bp);
a2fbb9ea 11046}
34f80b04 11047
a2fbb9ea
ET
11048#endif
11049
11050#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11051static void poll_bnx2x(struct net_device *dev)
11052{
11053 struct bnx2x *bp = netdev_priv(dev);
11054
11055 disable_irq(bp->pdev->irq);
11056 bnx2x_interrupt(bp->pdev->irq, dev);
11057 enable_irq(bp->pdev->irq);
11058}
11059#endif
11060
c64213cd
SH
11061static const struct net_device_ops bnx2x_netdev_ops = {
11062 .ndo_open = bnx2x_open,
11063 .ndo_stop = bnx2x_close,
11064 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11065 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11066 .ndo_set_mac_address = bnx2x_change_mac_addr,
11067 .ndo_validate_addr = eth_validate_addr,
11068 .ndo_do_ioctl = bnx2x_ioctl,
11069 .ndo_change_mtu = bnx2x_change_mtu,
11070 .ndo_tx_timeout = bnx2x_tx_timeout,
11071#ifdef BCM_VLAN
11072 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11073#endif
11074#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11075 .ndo_poll_controller = poll_bnx2x,
11076#endif
11077};
11078
34f80b04
EG
11079static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11080 struct net_device *dev)
a2fbb9ea
ET
11081{
11082 struct bnx2x *bp;
11083 int rc;
11084
11085 SET_NETDEV_DEV(dev, &pdev->dev);
11086 bp = netdev_priv(dev);
11087
34f80b04
EG
11088 bp->dev = dev;
11089 bp->pdev = pdev;
a2fbb9ea 11090 bp->flags = 0;
34f80b04 11091 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11092
11093 rc = pci_enable_device(pdev);
11094 if (rc) {
11095 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11096 goto err_out;
11097 }
11098
11099 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11100 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11101 " aborting\n");
11102 rc = -ENODEV;
11103 goto err_out_disable;
11104 }
11105
11106 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11107 printk(KERN_ERR PFX "Cannot find second PCI device"
11108 " base address, aborting\n");
11109 rc = -ENODEV;
11110 goto err_out_disable;
11111 }
11112
34f80b04
EG
11113 if (atomic_read(&pdev->enable_cnt) == 1) {
11114 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11115 if (rc) {
11116 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11117 " aborting\n");
11118 goto err_out_disable;
11119 }
a2fbb9ea 11120
34f80b04
EG
11121 pci_set_master(pdev);
11122 pci_save_state(pdev);
11123 }
a2fbb9ea
ET
11124
11125 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11126 if (bp->pm_cap == 0) {
11127 printk(KERN_ERR PFX "Cannot find power management"
11128 " capability, aborting\n");
11129 rc = -EIO;
11130 goto err_out_release;
11131 }
11132
11133 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11134 if (bp->pcie_cap == 0) {
11135 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11136 " aborting\n");
11137 rc = -EIO;
11138 goto err_out_release;
11139 }
11140
6a35528a 11141 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11142 bp->flags |= USING_DAC_FLAG;
6a35528a 11143 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11144 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11145 " failed, aborting\n");
11146 rc = -EIO;
11147 goto err_out_release;
11148 }
11149
284901a9 11150 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11151 printk(KERN_ERR PFX "System does not support DMA,"
11152 " aborting\n");
11153 rc = -EIO;
11154 goto err_out_release;
11155 }
11156
34f80b04
EG
11157 dev->mem_start = pci_resource_start(pdev, 0);
11158 dev->base_addr = dev->mem_start;
11159 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11160
11161 dev->irq = pdev->irq;
11162
275f165f 11163 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11164 if (!bp->regview) {
11165 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11166 rc = -ENOMEM;
11167 goto err_out_release;
11168 }
11169
34f80b04
EG
11170 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11171 min_t(u64, BNX2X_DB_SIZE,
11172 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11173 if (!bp->doorbells) {
11174 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11175 rc = -ENOMEM;
11176 goto err_out_unmap;
11177 }
11178
11179 bnx2x_set_power_state(bp, PCI_D0);
11180
34f80b04
EG
11181 /* clean indirect addresses */
11182 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11183 PCICFG_VENDOR_ID_OFFSET);
11184 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11185 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11186 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11187 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11188
34f80b04 11189 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11190
c64213cd 11191 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11192 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11193 dev->features |= NETIF_F_SG;
11194 dev->features |= NETIF_F_HW_CSUM;
11195 if (bp->flags & USING_DAC_FLAG)
11196 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11197 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11198 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11199#ifdef BCM_VLAN
11200 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11201 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11202
11203 dev->vlan_features |= NETIF_F_SG;
11204 dev->vlan_features |= NETIF_F_HW_CSUM;
11205 if (bp->flags & USING_DAC_FLAG)
11206 dev->vlan_features |= NETIF_F_HIGHDMA;
11207 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11208 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11209#endif
a2fbb9ea
ET
11210
11211 return 0;
11212
11213err_out_unmap:
11214 if (bp->regview) {
11215 iounmap(bp->regview);
11216 bp->regview = NULL;
11217 }
a2fbb9ea
ET
11218 if (bp->doorbells) {
11219 iounmap(bp->doorbells);
11220 bp->doorbells = NULL;
11221 }
11222
11223err_out_release:
34f80b04
EG
11224 if (atomic_read(&pdev->enable_cnt) == 1)
11225 pci_release_regions(pdev);
a2fbb9ea
ET
11226
11227err_out_disable:
11228 pci_disable_device(pdev);
11229 pci_set_drvdata(pdev, NULL);
11230
11231err_out:
11232 return rc;
11233}
11234
25047950
ET
11235static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11236{
11237 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11238
11239 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11240 return val;
11241}
11242
11243/* return value of 1=2.5GHz 2=5GHz */
11244static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11245{
11246 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11247
11248 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11249 return val;
11250}
94a78b79
VZ
11251static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11252{
11253 struct bnx2x_fw_file_hdr *fw_hdr;
11254 struct bnx2x_fw_file_section *sections;
11255 u16 *ops_offsets;
11256 u32 offset, len, num_ops;
11257 int i;
11258 const struct firmware *firmware = bp->firmware;
11259 const u8 * fw_ver;
11260
11261 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11262 return -EINVAL;
11263
11264 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11265 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11266
11267 /* Make sure none of the offsets and sizes make us read beyond
11268 * the end of the firmware data */
11269 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11270 offset = be32_to_cpu(sections[i].offset);
11271 len = be32_to_cpu(sections[i].len);
11272 if (offset + len > firmware->size) {
11273 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11274 return -EINVAL;
11275 }
11276 }
11277
11278 /* Likewise for the init_ops offsets */
11279 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11280 ops_offsets = (u16 *)(firmware->data + offset);
11281 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11282
11283 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11284 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11285 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11286 return -EINVAL;
11287 }
11288 }
11289
11290 /* Check FW version */
11291 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11292 fw_ver = firmware->data + offset;
11293 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11294 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11295 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11296 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11297 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11298 " Should be %d.%d.%d.%d\n",
11299 fw_ver[0], fw_ver[1], fw_ver[2],
11300 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11301 BCM_5710_FW_MINOR_VERSION,
11302 BCM_5710_FW_REVISION_VERSION,
11303 BCM_5710_FW_ENGINEERING_VERSION);
11304 return -EINVAL;
11305 }
11306
11307 return 0;
11308}
11309
11310static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11311{
11312 u32 i;
11313 const __be32 *source = (const __be32*)_source;
11314 u32 *target = (u32*)_target;
11315
11316 for (i = 0; i < n/4; i++)
11317 target[i] = be32_to_cpu(source[i]);
11318}
11319
11320/*
11321 Ops array is stored in the following format:
11322 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11323 */
11324static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11325{
11326 u32 i, j, tmp;
11327 const __be32 *source = (const __be32*)_source;
11328 struct raw_op *target = (struct raw_op*)_target;
11329
11330 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11331 tmp = be32_to_cpu(source[j]);
11332 target[i].op = (tmp >> 24) & 0xff;
11333 target[i].offset = tmp & 0xffffff;
11334 target[i].raw_data = be32_to_cpu(source[j+1]);
11335 }
11336}
11337static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11338{
11339 u32 i;
11340 u16 *target = (u16*)_target;
11341 const __be16 *source = (const __be16*)_source;
11342
11343 for (i = 0; i < n/2; i++)
11344 target[i] = be16_to_cpu(source[i]);
11345}
11346
11347#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11348 do { \
11349 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11350 bp->arr = kmalloc(len, GFP_KERNEL); \
11351 if (!bp->arr) { \
11352 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11353 goto lbl; \
11354 } \
11355 func(bp->firmware->data + \
11356 be32_to_cpu(fw_hdr->arr.offset), \
11357 (u8*)bp->arr, len); \
11358 } while (0)
11359
11360
11361static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11362{
11363 char fw_file_name[40] = {0};
11364 int rc, offset;
11365 struct bnx2x_fw_file_hdr *fw_hdr;
11366
11367 /* Create a FW file name */
11368 if (CHIP_IS_E1(bp))
11369 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11370 else
11371 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11372
11373 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11374 BCM_5710_FW_MAJOR_VERSION,
11375 BCM_5710_FW_MINOR_VERSION,
11376 BCM_5710_FW_REVISION_VERSION,
11377 BCM_5710_FW_ENGINEERING_VERSION);
11378
11379 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11380
11381 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11382 if (rc) {
11383 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11384 goto request_firmware_exit;
11385 }
11386
11387 rc = bnx2x_check_firmware(bp);
11388 if (rc) {
11389 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11390 goto request_firmware_exit;
11391 }
11392
11393 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11394
11395 /* Initialize the pointers to the init arrays */
11396 /* Blob */
11397 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11398
11399 /* Opcodes */
11400 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11401
11402 /* Offsets */
11403 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11404
11405 /* STORMs firmware */
11406 bp->tsem_int_table_data = bp->firmware->data +
11407 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11408 bp->tsem_pram_data = bp->firmware->data +
11409 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11410 bp->usem_int_table_data = bp->firmware->data +
11411 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11412 bp->usem_pram_data = bp->firmware->data +
11413 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11414 bp->xsem_int_table_data = bp->firmware->data +
11415 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11416 bp->xsem_pram_data = bp->firmware->data +
11417 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11418 bp->csem_int_table_data = bp->firmware->data +
11419 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11420 bp->csem_pram_data = bp->firmware->data +
11421 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11422
11423 return 0;
11424init_offsets_alloc_err:
11425 kfree(bp->init_ops);
11426init_ops_alloc_err:
11427 kfree(bp->init_data);
11428request_firmware_exit:
11429 release_firmware(bp->firmware);
11430
11431 return rc;
11432}
11433
11434
25047950 11435
a2fbb9ea
ET
11436static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11437 const struct pci_device_id *ent)
11438{
11439 static int version_printed;
11440 struct net_device *dev = NULL;
11441 struct bnx2x *bp;
25047950 11442 int rc;
a2fbb9ea
ET
11443
11444 if (version_printed++ == 0)
11445 printk(KERN_INFO "%s", version);
11446
11447 /* dev zeroed in init_etherdev */
555f6c78 11448 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11449 if (!dev) {
11450 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11451 return -ENOMEM;
34f80b04 11452 }
a2fbb9ea 11453
a2fbb9ea
ET
11454 bp = netdev_priv(dev);
11455 bp->msglevel = debug;
11456
34f80b04 11457 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11458 if (rc < 0) {
11459 free_netdev(dev);
11460 return rc;
11461 }
11462
a2fbb9ea
ET
11463 pci_set_drvdata(pdev, dev);
11464
34f80b04 11465 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11466 if (rc)
11467 goto init_one_exit;
11468
94a78b79
VZ
11469 /* Set init arrays */
11470 rc = bnx2x_init_firmware(bp, &pdev->dev);
11471 if (rc) {
11472 printk(KERN_ERR PFX "Error loading firmware\n");
11473 goto init_one_exit;
11474 }
11475
693fc0d1 11476 rc = register_netdev(dev);
34f80b04 11477 if (rc) {
693fc0d1 11478 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11479 goto init_one_exit;
11480 }
11481
25047950 11482 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11483 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11484 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11485 bnx2x_get_pcie_width(bp),
11486 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11487 dev->base_addr, bp->pdev->irq);
e174961c 11488 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11489
a2fbb9ea 11490 return 0;
34f80b04
EG
11491
11492init_one_exit:
11493 if (bp->regview)
11494 iounmap(bp->regview);
11495
11496 if (bp->doorbells)
11497 iounmap(bp->doorbells);
11498
11499 free_netdev(dev);
11500
11501 if (atomic_read(&pdev->enable_cnt) == 1)
11502 pci_release_regions(pdev);
11503
11504 pci_disable_device(pdev);
11505 pci_set_drvdata(pdev, NULL);
11506
11507 return rc;
a2fbb9ea
ET
11508}
11509
11510static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11511{
11512 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11513 struct bnx2x *bp;
11514
11515 if (!dev) {
228241eb
ET
11516 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11517 return;
11518 }
228241eb 11519 bp = netdev_priv(dev);
a2fbb9ea 11520
a2fbb9ea
ET
11521 unregister_netdev(dev);
11522
94a78b79
VZ
11523 kfree(bp->init_ops_offsets);
11524 kfree(bp->init_ops);
11525 kfree(bp->init_data);
11526 release_firmware(bp->firmware);
11527
a2fbb9ea
ET
11528 if (bp->regview)
11529 iounmap(bp->regview);
11530
11531 if (bp->doorbells)
11532 iounmap(bp->doorbells);
11533
11534 free_netdev(dev);
34f80b04
EG
11535
11536 if (atomic_read(&pdev->enable_cnt) == 1)
11537 pci_release_regions(pdev);
11538
a2fbb9ea
ET
11539 pci_disable_device(pdev);
11540 pci_set_drvdata(pdev, NULL);
11541}
11542
11543static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11544{
11545 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11546 struct bnx2x *bp;
11547
34f80b04
EG
11548 if (!dev) {
11549 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11550 return -ENODEV;
11551 }
11552 bp = netdev_priv(dev);
a2fbb9ea 11553
34f80b04 11554 rtnl_lock();
a2fbb9ea 11555
34f80b04 11556 pci_save_state(pdev);
228241eb 11557
34f80b04
EG
11558 if (!netif_running(dev)) {
11559 rtnl_unlock();
11560 return 0;
11561 }
a2fbb9ea
ET
11562
11563 netif_device_detach(dev);
a2fbb9ea 11564
da5a662a 11565 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11566
a2fbb9ea 11567 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11568
34f80b04
EG
11569 rtnl_unlock();
11570
a2fbb9ea
ET
11571 return 0;
11572}
11573
11574static int bnx2x_resume(struct pci_dev *pdev)
11575{
11576 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11577 struct bnx2x *bp;
a2fbb9ea
ET
11578 int rc;
11579
228241eb
ET
11580 if (!dev) {
11581 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11582 return -ENODEV;
11583 }
228241eb 11584 bp = netdev_priv(dev);
a2fbb9ea 11585
34f80b04
EG
11586 rtnl_lock();
11587
228241eb 11588 pci_restore_state(pdev);
34f80b04
EG
11589
11590 if (!netif_running(dev)) {
11591 rtnl_unlock();
11592 return 0;
11593 }
11594
a2fbb9ea
ET
11595 bnx2x_set_power_state(bp, PCI_D0);
11596 netif_device_attach(dev);
11597
da5a662a 11598 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11599
34f80b04
EG
11600 rtnl_unlock();
11601
11602 return rc;
a2fbb9ea
ET
11603}
11604
f8ef6e44
YG
11605static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11606{
11607 int i;
11608
11609 bp->state = BNX2X_STATE_ERROR;
11610
11611 bp->rx_mode = BNX2X_RX_MODE_NONE;
11612
11613 bnx2x_netif_stop(bp, 0);
11614
11615 del_timer_sync(&bp->timer);
11616 bp->stats_state = STATS_STATE_DISABLED;
11617 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11618
11619 /* Release IRQs */
11620 bnx2x_free_irq(bp);
11621
11622 if (CHIP_IS_E1(bp)) {
11623 struct mac_configuration_cmd *config =
11624 bnx2x_sp(bp, mcast_config);
11625
8d9c5f34 11626 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11627 CAM_INVALIDATE(config->config_table[i]);
11628 }
11629
11630 /* Free SKBs, SGEs, TPA pool and driver internals */
11631 bnx2x_free_skbs(bp);
555f6c78 11632 for_each_rx_queue(bp, i)
f8ef6e44 11633 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11634 for_each_rx_queue(bp, i)
7cde1c8b 11635 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11636 bnx2x_free_mem(bp);
11637
11638 bp->state = BNX2X_STATE_CLOSED;
11639
11640 netif_carrier_off(bp->dev);
11641
11642 return 0;
11643}
11644
11645static void bnx2x_eeh_recover(struct bnx2x *bp)
11646{
11647 u32 val;
11648
11649 mutex_init(&bp->port.phy_mutex);
11650
11651 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11652 bp->link_params.shmem_base = bp->common.shmem_base;
11653 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11654
11655 if (!bp->common.shmem_base ||
11656 (bp->common.shmem_base < 0xA0000) ||
11657 (bp->common.shmem_base >= 0xC0000)) {
11658 BNX2X_DEV_INFO("MCP not active\n");
11659 bp->flags |= NO_MCP_FLAG;
11660 return;
11661 }
11662
11663 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11664 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11665 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11666 BNX2X_ERR("BAD MCP validity signature\n");
11667
11668 if (!BP_NOMCP(bp)) {
11669 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11670 & DRV_MSG_SEQ_NUMBER_MASK);
11671 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11672 }
11673}
11674
493adb1f
WX
11675/**
11676 * bnx2x_io_error_detected - called when PCI error is detected
11677 * @pdev: Pointer to PCI device
11678 * @state: The current pci connection state
11679 *
11680 * This function is called after a PCI bus error affecting
11681 * this device has been detected.
11682 */
11683static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11684 pci_channel_state_t state)
11685{
11686 struct net_device *dev = pci_get_drvdata(pdev);
11687 struct bnx2x *bp = netdev_priv(dev);
11688
11689 rtnl_lock();
11690
11691 netif_device_detach(dev);
11692
11693 if (netif_running(dev))
f8ef6e44 11694 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11695
11696 pci_disable_device(pdev);
11697
11698 rtnl_unlock();
11699
11700 /* Request a slot reset */
11701 return PCI_ERS_RESULT_NEED_RESET;
11702}
11703
11704/**
11705 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11706 * @pdev: Pointer to PCI device
11707 *
11708 * Restart the card from scratch, as if from a cold-boot.
11709 */
11710static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11711{
11712 struct net_device *dev = pci_get_drvdata(pdev);
11713 struct bnx2x *bp = netdev_priv(dev);
11714
11715 rtnl_lock();
11716
11717 if (pci_enable_device(pdev)) {
11718 dev_err(&pdev->dev,
11719 "Cannot re-enable PCI device after reset\n");
11720 rtnl_unlock();
11721 return PCI_ERS_RESULT_DISCONNECT;
11722 }
11723
11724 pci_set_master(pdev);
11725 pci_restore_state(pdev);
11726
11727 if (netif_running(dev))
11728 bnx2x_set_power_state(bp, PCI_D0);
11729
11730 rtnl_unlock();
11731
11732 return PCI_ERS_RESULT_RECOVERED;
11733}
11734
11735/**
11736 * bnx2x_io_resume - called when traffic can start flowing again
11737 * @pdev: Pointer to PCI device
11738 *
11739 * This callback is called when the error recovery driver tells us that
11740 * its OK to resume normal operation.
11741 */
11742static void bnx2x_io_resume(struct pci_dev *pdev)
11743{
11744 struct net_device *dev = pci_get_drvdata(pdev);
11745 struct bnx2x *bp = netdev_priv(dev);
11746
11747 rtnl_lock();
11748
f8ef6e44
YG
11749 bnx2x_eeh_recover(bp);
11750
493adb1f 11751 if (netif_running(dev))
f8ef6e44 11752 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11753
11754 netif_device_attach(dev);
11755
11756 rtnl_unlock();
11757}
11758
11759static struct pci_error_handlers bnx2x_err_handler = {
11760 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11761 .slot_reset = bnx2x_io_slot_reset,
11762 .resume = bnx2x_io_resume,
493adb1f
WX
11763};
11764
a2fbb9ea 11765static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11766 .name = DRV_MODULE_NAME,
11767 .id_table = bnx2x_pci_tbl,
11768 .probe = bnx2x_init_one,
11769 .remove = __devexit_p(bnx2x_remove_one),
11770 .suspend = bnx2x_suspend,
11771 .resume = bnx2x_resume,
11772 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11773};
11774
11775static int __init bnx2x_init(void)
11776{
dd21ca6d
SG
11777 int ret;
11778
1cf167f2
EG
11779 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11780 if (bnx2x_wq == NULL) {
11781 printk(KERN_ERR PFX "Cannot create workqueue\n");
11782 return -ENOMEM;
11783 }
11784
dd21ca6d
SG
11785 ret = pci_register_driver(&bnx2x_pci_driver);
11786 if (ret) {
11787 printk(KERN_ERR PFX "Cannot register driver\n");
11788 destroy_workqueue(bnx2x_wq);
11789 }
11790 return ret;
a2fbb9ea
ET
11791}
11792
11793static void __exit bnx2x_cleanup(void)
11794{
11795 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11796
11797 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11798}
11799
11800module_init(bnx2x_init);
11801module_exit(bnx2x_cleanup);
11802
94a78b79 11803