]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: New FW files
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
573dd788
EG
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
2059aba7 83MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
555f6c78 84
19680c48 85static int disable_tpa;
19680c48 86module_param(disable_tpa, int, 0);
9898f86d 87MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
88
89static int int_mode;
90module_param(int_mode, int, 0);
91MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
9898f86d 93static int poll;
a2fbb9ea 94module_param(poll, int, 0);
9898f86d 95MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
96
97static int mrrs = -1;
98module_param(mrrs, int, 0);
99MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
9898f86d 101static int debug;
a2fbb9ea 102module_param(debug, int, 0);
9898f86d
EG
103MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 106
1cf167f2 107static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
108
109enum bnx2x_board_type {
110 BCM57710 = 0,
34f80b04
EG
111 BCM57711 = 1,
112 BCM57711E = 2,
a2fbb9ea
ET
113};
114
34f80b04 115/* indexed by board_type, above */
53a10565 116static struct {
a2fbb9ea
ET
117 char *name;
118} board_info[] __devinitdata = {
34f80b04
EG
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
122};
123
34f80b04 124
a2fbb9ea
ET
125static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
132 { 0 }
133};
134
135MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137/****************************************************************************
138* General service functions
139****************************************************************************/
140
141/* used only at init
142 * locking is done by mcp
143 */
144static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145{
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150}
151
a2fbb9ea
ET
152static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153{
154 u32 val;
155
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
160
161 return val;
162}
a2fbb9ea
ET
163
164static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169};
170
171/* copy command into DMAE command memory and set DMAE command go */
172static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173 int idx)
174{
175 u32 cmd_offset;
176 int i;
177
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
ad8d3948
EG
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
184 }
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
186}
187
ad8d3948
EG
188void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189 u32 len32)
a2fbb9ea 190{
ad8d3948 191 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
193 int cnt = 200;
194
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201 return;
202 }
203
204 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
205
206 memset(dmae, 0, sizeof(struct dmae_command));
207
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211#ifdef __BIG_ENDIAN
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
213#else
214 DMAE_CMD_ENDIANITY_DW_SWAP |
215#endif
34f80b04
EG
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
222 dmae->len = len32;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 225 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 226
c3eefaf6 227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
237
238 *wb_comp = 0;
239
34f80b04 240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
241
242 udelay(5);
ad8d3948
EG
243
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
ad8d3948 247 if (!cnt) {
c3eefaf6 248 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
249 break;
250 }
ad8d3948 251 cnt--;
12469401
YG
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
254 msleep(100);
255 else
256 udelay(5);
a2fbb9ea 257 }
ad8d3948
EG
258
259 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
260}
261
c18487ee 262void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 263{
ad8d3948 264 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
266 int cnt = 200;
267
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
270 int i;
271
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276 return;
277 }
278
279 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
280
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
283
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287#ifdef __BIG_ENDIAN
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
289#else
290 DMAE_CMD_ENDIANITY_DW_SWAP |
291#endif
34f80b04
EG
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298 dmae->len = len32;
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 301 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 302
c3eefaf6 303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
310
311 *wb_comp = 0;
312
34f80b04 313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
314
315 udelay(5);
ad8d3948
EG
316
317 while (*wb_comp != DMAE_COMP_VAL) {
318
ad8d3948 319 if (!cnt) {
c3eefaf6 320 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
321 break;
322 }
ad8d3948 323 cnt--;
12469401
YG
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
326 msleep(100);
327 else
328 udelay(5);
a2fbb9ea 329 }
ad8d3948 330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
333
334 mutex_unlock(&bp->dmae_mutex);
335}
336
337/* used only for slowpath so not inlined */
338static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339{
340 u32 wb_write[2];
341
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 345}
a2fbb9ea 346
ad8d3948
EG
347#ifdef USE_WB_RD
348static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349{
350 u32 wb_data[2];
351
352 REG_RD_DMAE(bp, reg, wb_data, 2);
353
354 return HILO_U64(wb_data[0], wb_data[1]);
355}
356#endif
357
a2fbb9ea
ET
358static int bnx2x_mc_assert(struct bnx2x *bp)
359{
a2fbb9ea 360 char last_idx;
34f80b04
EG
361 int i, rc = 0;
362 u32 row0, row1, row2, row3;
363
364 /* XSTORM */
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
367 if (last_idx)
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
386 rc++;
387 } else {
388 break;
389 }
390 }
391
392 /* TSTORM */
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
395 if (last_idx)
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
414 rc++;
415 } else {
416 break;
417 }
418 }
419
420 /* CSTORM */
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
423 if (last_idx)
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
442 rc++;
443 } else {
444 break;
445 }
446 }
447
448 /* USTORM */
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
451 if (last_idx)
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
470 rc++;
471 } else {
472 break;
a2fbb9ea
ET
473 }
474 }
34f80b04 475
a2fbb9ea
ET
476 return rc;
477}
c14423fe 478
a2fbb9ea
ET
479static void bnx2x_fw_dump(struct bnx2x *bp)
480{
481 u32 mark, offset;
4781bfad 482 __be32 data[9];
a2fbb9ea
ET
483 int word;
484
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 486 mark = ((mark + 0x3) & ~0x3);
ad361c98 487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 488
ad361c98 489 printk(KERN_ERR PFX);
a2fbb9ea
ET
490 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
491 for (word = 0; word < 8; word++)
492 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
493 offset + 4*word));
494 data[8] = 0x0;
49d66772 495 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
496 }
497 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
498 for (word = 0; word < 8; word++)
499 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
500 offset + 4*word));
501 data[8] = 0x0;
49d66772 502 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 503 }
ad361c98 504 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
505}
506
507static void bnx2x_panic_dump(struct bnx2x *bp)
508{
509 int i;
510 u16 j, start, end;
511
66e855f3
YG
512 bp->stats_state = STATS_STATE_DISABLED;
513 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
514
a2fbb9ea
ET
515 BNX2X_ERR("begin crash dump -----------------\n");
516
8440d2b6
EG
517 /* Indices */
518 /* Common */
519 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
520 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
521 " spq_prod_idx(%u)\n",
522 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
523 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
524
525 /* Rx */
526 for_each_rx_queue(bp, i) {
a2fbb9ea 527 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 528
c3eefaf6 529 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
530 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
531 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 532 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
533 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
534 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 535 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
536 " fp_u_idx(%x) *sb_u_idx(%x)\n",
537 fp->rx_sge_prod, fp->last_max_sge,
538 le16_to_cpu(fp->fp_u_idx),
539 fp->status_blk->u_status_block.status_block_index);
540 }
a2fbb9ea 541
8440d2b6
EG
542 /* Tx */
543 for_each_tx_queue(bp, i) {
544 struct bnx2x_fastpath *fp = &bp->fp[i];
545 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 546
c3eefaf6 547 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
548 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
549 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
550 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 551 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
8440d2b6
EG
552 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
553 fp->status_blk->c_status_block.status_block_index,
554 hw_prods->packets_prod, hw_prods->bds_prod);
555 }
a2fbb9ea 556
8440d2b6
EG
557 /* Rings */
558 /* Rx */
559 for_each_rx_queue(bp, i) {
560 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
561
562 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
563 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 564 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
565 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
566 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
567
c3eefaf6
EG
568 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
569 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
570 }
571
3196a88a
EG
572 start = RX_SGE(fp->rx_sge_prod);
573 end = RX_SGE(fp->last_max_sge);
8440d2b6 574 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
575 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
576 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
577
c3eefaf6
EG
578 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
579 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
580 }
581
a2fbb9ea
ET
582 start = RCQ_BD(fp->rx_comp_cons - 10);
583 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 584 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
585 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
586
c3eefaf6
EG
587 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
588 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
589 }
590 }
591
8440d2b6
EG
592 /* Tx */
593 for_each_tx_queue(bp, i) {
594 struct bnx2x_fastpath *fp = &bp->fp[i];
595
596 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
597 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
598 for (j = start; j != end; j = TX_BD(j + 1)) {
599 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
600
c3eefaf6
EG
601 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
602 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
603 }
604
605 start = TX_BD(fp->tx_bd_cons - 10);
606 end = TX_BD(fp->tx_bd_cons + 254);
607 for (j = start; j != end; j = TX_BD(j + 1)) {
608 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
609
c3eefaf6
EG
610 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
611 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
612 }
613 }
a2fbb9ea 614
34f80b04 615 bnx2x_fw_dump(bp);
a2fbb9ea
ET
616 bnx2x_mc_assert(bp);
617 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
618}
619
615f8fd9 620static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 621{
34f80b04 622 int port = BP_PORT(bp);
a2fbb9ea
ET
623 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
624 u32 val = REG_RD(bp, addr);
625 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 626 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
627
628 if (msix) {
8badd27a
EG
629 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
630 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
631 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
633 } else if (msi) {
634 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
635 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
636 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
637 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
638 } else {
639 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 643
8badd27a
EG
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645 val, port, addr);
615f8fd9
ET
646
647 REG_WR(bp, addr, val);
648
a2fbb9ea
ET
649 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
650 }
651
8badd27a
EG
652 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
653 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
654
655 REG_WR(bp, addr, val);
37dbbf32
EG
656 /*
657 * Ensure that HC_CONFIG is written before leading/trailing edge config
658 */
659 mmiowb();
660 barrier();
34f80b04
EG
661
662 if (CHIP_IS_E1H(bp)) {
663 /* init leading/trailing edge */
664 if (IS_E1HMF(bp)) {
8badd27a 665 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 666 if (bp->port.pmf)
4acac6a5
EG
667 /* enable nig and gpio3 attention */
668 val |= 0x1100;
34f80b04
EG
669 } else
670 val = 0xffff;
671
672 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
673 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
674 }
37dbbf32
EG
675
676 /* Make sure that interrupts are indeed enabled from here on */
677 mmiowb();
a2fbb9ea
ET
678}
679
615f8fd9 680static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 681{
34f80b04 682 int port = BP_PORT(bp);
a2fbb9ea
ET
683 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
684 u32 val = REG_RD(bp, addr);
685
686 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
687 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
688 HC_CONFIG_0_REG_INT_LINE_EN_0 |
689 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
690
691 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
692 val, port, addr);
693
8badd27a
EG
694 /* flush all outstanding writes */
695 mmiowb();
696
a2fbb9ea
ET
697 REG_WR(bp, addr, val);
698 if (REG_RD(bp, addr) != val)
699 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 700
a2fbb9ea
ET
701}
702
f8ef6e44 703static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 704{
a2fbb9ea 705 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 706 int i, offset;
a2fbb9ea 707
34f80b04 708 /* disable interrupt handling */
a2fbb9ea 709 atomic_inc(&bp->intr_sem);
e1510706
EG
710 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
711
f8ef6e44
YG
712 if (disable_hw)
713 /* prevent the HW from sending interrupts */
714 bnx2x_int_disable(bp);
a2fbb9ea
ET
715
716 /* make sure all ISRs are done */
717 if (msix) {
8badd27a
EG
718 synchronize_irq(bp->msix_table[0].vector);
719 offset = 1;
a2fbb9ea 720 for_each_queue(bp, i)
8badd27a 721 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
722 } else
723 synchronize_irq(bp->pdev->irq);
724
725 /* make sure sp_task is not running */
1cf167f2
EG
726 cancel_delayed_work(&bp->sp_task);
727 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
728}
729
34f80b04 730/* fast path */
a2fbb9ea
ET
731
732/*
34f80b04 733 * General service functions
a2fbb9ea
ET
734 */
735
34f80b04 736static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
737 u8 storm, u16 index, u8 op, u8 update)
738{
5c862848
EG
739 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
740 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
741 struct igu_ack_register igu_ack;
742
743 igu_ack.status_block_index = index;
744 igu_ack.sb_id_and_flags =
34f80b04 745 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
746 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
747 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
748 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
749
5c862848
EG
750 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
751 (*(u32 *)&igu_ack), hc_addr);
752 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
753
754 /* Make sure that ACK is written */
755 mmiowb();
756 barrier();
a2fbb9ea
ET
757}
758
759static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
760{
761 struct host_status_block *fpsb = fp->status_blk;
762 u16 rc = 0;
763
764 barrier(); /* status block is written to by the chip */
765 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
766 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
767 rc |= 1;
768 }
769 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
770 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
771 rc |= 2;
772 }
773 return rc;
774}
775
a2fbb9ea
ET
776static u16 bnx2x_ack_int(struct bnx2x *bp)
777{
5c862848
EG
778 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
779 COMMAND_REG_SIMD_MASK);
780 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 781
5c862848
EG
782 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
783 result, hc_addr);
a2fbb9ea 784
a2fbb9ea
ET
785 return result;
786}
787
788
789/*
790 * fast path service functions
791 */
792
237907c1
EG
793static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
794{
795 u16 tx_cons_sb;
796
797 /* Tell compiler that status block fields can change */
798 barrier();
799 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
800 return (fp->tx_pkt_cons != tx_cons_sb);
801}
802
803static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804{
805 /* Tell compiler that consumer and producer can change */
806 barrier();
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
808}
809
a2fbb9ea
ET
810/* free skb in the packet ring at pos idx
811 * return idx of last bd freed
812 */
813static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814 u16 idx)
815{
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817 struct eth_tx_bd *tx_bd;
818 struct sk_buff *skb = tx_buf->skb;
34f80b04 819 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
820 int nbd;
821
822 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
823 idx, tx_buf, skb);
824
825 /* unmap first bd */
826 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
827 tx_bd = &fp->tx_desc_ring[bd_idx];
828 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
829 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
830
831 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 832 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
833#ifdef BNX2X_STOP_ON_ERROR
834 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 835 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
836 bnx2x_panic();
837 }
838#endif
839
840 /* Skip a parse bd and the TSO split header bd
841 since they have no mapping */
842 if (nbd)
843 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844
845 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
846 ETH_TX_BD_FLAGS_TCP_CSUM |
847 ETH_TX_BD_FLAGS_SW_LSO)) {
848 if (--nbd)
849 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
850 tx_bd = &fp->tx_desc_ring[bd_idx];
851 /* is this a TSO split header bd? */
852 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
853 if (--nbd)
854 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
855 }
856 }
857
858 /* now free frags */
859 while (nbd > 0) {
860
861 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
862 tx_bd = &fp->tx_desc_ring[bd_idx];
863 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
864 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
865 if (--nbd)
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
867 }
868
869 /* release skb */
53e5e96e 870 WARN_ON(!skb);
a2fbb9ea
ET
871 dev_kfree_skb(skb);
872 tx_buf->first_bd = 0;
873 tx_buf->skb = NULL;
874
34f80b04 875 return new_cons;
a2fbb9ea
ET
876}
877
34f80b04 878static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 879{
34f80b04
EG
880 s16 used;
881 u16 prod;
882 u16 cons;
a2fbb9ea 883
34f80b04 884 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
885 prod = fp->tx_bd_prod;
886 cons = fp->tx_bd_cons;
887
34f80b04
EG
888 /* NUM_TX_RINGS = number of "next-page" entries
889 It will be used as a threshold */
890 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 891
34f80b04 892#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
893 WARN_ON(used < 0);
894 WARN_ON(used > fp->bp->tx_ring_size);
895 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 896#endif
a2fbb9ea 897
34f80b04 898 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
899}
900
7961f791 901static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
902{
903 struct bnx2x *bp = fp->bp;
555f6c78 904 struct netdev_queue *txq;
a2fbb9ea
ET
905 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
906 int done = 0;
907
908#ifdef BNX2X_STOP_ON_ERROR
909 if (unlikely(bp->panic))
910 return;
911#endif
912
555f6c78 913 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
914 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
915 sw_cons = fp->tx_pkt_cons;
916
917 while (sw_cons != hw_cons) {
918 u16 pkt_cons;
919
920 pkt_cons = TX_BD(sw_cons);
921
922 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
923
34f80b04 924 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
925 hw_cons, sw_cons, pkt_cons);
926
34f80b04 927/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
928 rmb();
929 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
930 }
931*/
932 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
933 sw_cons++;
934 done++;
a2fbb9ea
ET
935 }
936
937 fp->tx_pkt_cons = sw_cons;
938 fp->tx_bd_cons = bd_cons;
939
a2fbb9ea 940 /* TBD need a thresh? */
555f6c78 941 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 942
555f6c78 943 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 944
6044735d
EG
945 /* Need to make the tx_bd_cons update visible to start_xmit()
946 * before checking for netif_tx_queue_stopped(). Without the
947 * memory barrier, there is a small possibility that
948 * start_xmit() will miss it and cause the queue to be stopped
949 * forever.
950 */
951 smp_mb();
952
555f6c78 953 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 954 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 955 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 956 netif_tx_wake_queue(txq);
a2fbb9ea 957
555f6c78 958 __netif_tx_unlock(txq);
a2fbb9ea
ET
959 }
960}
961
3196a88a 962
a2fbb9ea
ET
963static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
964 union eth_rx_cqe *rr_cqe)
965{
966 struct bnx2x *bp = fp->bp;
967 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
968 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
969
34f80b04 970 DP(BNX2X_MSG_SP,
a2fbb9ea 971 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 972 fp->index, cid, command, bp->state,
34f80b04 973 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
974
975 bp->spq_left++;
976
0626b899 977 if (fp->index) {
a2fbb9ea
ET
978 switch (command | fp->state) {
979 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
980 BNX2X_FP_STATE_OPENING):
981 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
982 cid);
983 fp->state = BNX2X_FP_STATE_OPEN;
984 break;
985
986 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
987 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
988 cid);
989 fp->state = BNX2X_FP_STATE_HALTED;
990 break;
991
992 default:
34f80b04
EG
993 BNX2X_ERR("unexpected MC reply (%d) "
994 "fp->state is %x\n", command, fp->state);
995 break;
a2fbb9ea 996 }
34f80b04 997 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
998 return;
999 }
c14423fe 1000
a2fbb9ea
ET
1001 switch (command | bp->state) {
1002 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1003 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1004 bp->state = BNX2X_STATE_OPEN;
1005 break;
1006
1007 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1008 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1009 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1010 fp->state = BNX2X_FP_STATE_HALTED;
1011 break;
1012
a2fbb9ea 1013 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1014 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1015 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1016 break;
1017
3196a88a 1018
a2fbb9ea 1019 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1020 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1021 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1022 bp->set_mac_pending = 0;
a2fbb9ea
ET
1023 break;
1024
49d66772 1025 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1026 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1027 break;
1028
a2fbb9ea 1029 default:
34f80b04 1030 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1031 command, bp->state);
34f80b04 1032 break;
a2fbb9ea 1033 }
34f80b04 1034 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1035}
1036
7a9b2557
VZ
1037static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1038 struct bnx2x_fastpath *fp, u16 index)
1039{
1040 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1041 struct page *page = sw_buf->page;
1042 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1043
1044 /* Skip "next page" elements */
1045 if (!page)
1046 return;
1047
1048 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1049 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1050 __free_pages(page, PAGES_PER_SGE_SHIFT);
1051
1052 sw_buf->page = NULL;
1053 sge->addr_hi = 0;
1054 sge->addr_lo = 0;
1055}
1056
1057static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1058 struct bnx2x_fastpath *fp, int last)
1059{
1060 int i;
1061
1062 for (i = 0; i < last; i++)
1063 bnx2x_free_rx_sge(bp, fp, i);
1064}
1065
1066static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1068{
1069 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1070 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072 dma_addr_t mapping;
1073
1074 if (unlikely(page == NULL))
1075 return -ENOMEM;
1076
4f40f2cb 1077 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1078 PCI_DMA_FROMDEVICE);
8d8bb39b 1079 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1080 __free_pages(page, PAGES_PER_SGE_SHIFT);
1081 return -ENOMEM;
1082 }
1083
1084 sw_buf->page = page;
1085 pci_unmap_addr_set(sw_buf, mapping, mapping);
1086
1087 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1088 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1089
1090 return 0;
1091}
1092
a2fbb9ea
ET
1093static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1094 struct bnx2x_fastpath *fp, u16 index)
1095{
1096 struct sk_buff *skb;
1097 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1098 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1099 dma_addr_t mapping;
1100
1101 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1102 if (unlikely(skb == NULL))
1103 return -ENOMEM;
1104
437cf2f1 1105 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1106 PCI_DMA_FROMDEVICE);
8d8bb39b 1107 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1108 dev_kfree_skb(skb);
1109 return -ENOMEM;
1110 }
1111
1112 rx_buf->skb = skb;
1113 pci_unmap_addr_set(rx_buf, mapping, mapping);
1114
1115 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1116 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1117
1118 return 0;
1119}
1120
1121/* note that we are not allocating a new skb,
1122 * we are just moving one from cons to prod
1123 * we are not creating a new mapping,
1124 * so there is no need to check for dma_mapping_error().
1125 */
1126static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1127 struct sk_buff *skb, u16 cons, u16 prod)
1128{
1129 struct bnx2x *bp = fp->bp;
1130 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1131 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1132 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1133 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1134
1135 pci_dma_sync_single_for_device(bp->pdev,
1136 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1137 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1138
1139 prod_rx_buf->skb = cons_rx_buf->skb;
1140 pci_unmap_addr_set(prod_rx_buf, mapping,
1141 pci_unmap_addr(cons_rx_buf, mapping));
1142 *prod_bd = *cons_bd;
1143}
1144
7a9b2557
VZ
1145static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1146 u16 idx)
1147{
1148 u16 last_max = fp->last_max_sge;
1149
1150 if (SUB_S16(idx, last_max) > 0)
1151 fp->last_max_sge = idx;
1152}
1153
1154static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1155{
1156 int i, j;
1157
1158 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1159 int idx = RX_SGE_CNT * i - 1;
1160
1161 for (j = 0; j < 2; j++) {
1162 SGE_MASK_CLEAR_BIT(fp, idx);
1163 idx--;
1164 }
1165 }
1166}
1167
1168static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1169 struct eth_fast_path_rx_cqe *fp_cqe)
1170{
1171 struct bnx2x *bp = fp->bp;
4f40f2cb 1172 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1173 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1174 SGE_PAGE_SHIFT;
7a9b2557
VZ
1175 u16 last_max, last_elem, first_elem;
1176 u16 delta = 0;
1177 u16 i;
1178
1179 if (!sge_len)
1180 return;
1181
1182 /* First mark all used pages */
1183 for (i = 0; i < sge_len; i++)
1184 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1185
1186 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1187 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1188
1189 /* Here we assume that the last SGE index is the biggest */
1190 prefetch((void *)(fp->sge_mask));
1191 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1192
1193 last_max = RX_SGE(fp->last_max_sge);
1194 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1195 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1196
1197 /* If ring is not full */
1198 if (last_elem + 1 != first_elem)
1199 last_elem++;
1200
1201 /* Now update the prod */
1202 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1203 if (likely(fp->sge_mask[i]))
1204 break;
1205
1206 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1207 delta += RX_SGE_MASK_ELEM_SZ;
1208 }
1209
1210 if (delta > 0) {
1211 fp->rx_sge_prod += delta;
1212 /* clear page-end entries */
1213 bnx2x_clear_sge_mask_next_elems(fp);
1214 }
1215
1216 DP(NETIF_MSG_RX_STATUS,
1217 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1218 fp->last_max_sge, fp->rx_sge_prod);
1219}
1220
1221static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1222{
1223 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1224 memset(fp->sge_mask, 0xff,
1225 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1226
33471629
EG
1227 /* Clear the two last indices in the page to 1:
1228 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1229 hence will never be indicated and should be removed from
1230 the calculations. */
1231 bnx2x_clear_sge_mask_next_elems(fp);
1232}
1233
1234static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1235 struct sk_buff *skb, u16 cons, u16 prod)
1236{
1237 struct bnx2x *bp = fp->bp;
1238 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1239 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1240 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1241 dma_addr_t mapping;
1242
1243 /* move empty skb from pool to prod and map it */
1244 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1245 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1246 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1247 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1248
1249 /* move partial skb from cons to pool (don't unmap yet) */
1250 fp->tpa_pool[queue] = *cons_rx_buf;
1251
1252 /* mark bin state as start - print error if current state != stop */
1253 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1254 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1255
1256 fp->tpa_state[queue] = BNX2X_TPA_START;
1257
1258 /* point prod_bd to new skb */
1259 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1260 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1261
1262#ifdef BNX2X_STOP_ON_ERROR
1263 fp->tpa_queue_used |= (1 << queue);
1264#ifdef __powerpc64__
1265 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1266#else
1267 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1268#endif
1269 fp->tpa_queue_used);
1270#endif
1271}
1272
1273static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1274 struct sk_buff *skb,
1275 struct eth_fast_path_rx_cqe *fp_cqe,
1276 u16 cqe_idx)
1277{
1278 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1279 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1280 u32 i, frag_len, frag_size, pages;
1281 int err;
1282 int j;
1283
1284 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1285 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1286
1287 /* This is needed in order to enable forwarding support */
1288 if (frag_size)
4f40f2cb 1289 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1290 max(frag_size, (u32)len_on_bd));
1291
1292#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1293 if (pages >
1294 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1295 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1296 pages, cqe_idx);
1297 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1298 fp_cqe->pkt_len, len_on_bd);
1299 bnx2x_panic();
1300 return -EINVAL;
1301 }
1302#endif
1303
1304 /* Run through the SGL and compose the fragmented skb */
1305 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1306 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1307
1308 /* FW gives the indices of the SGE as if the ring is an array
1309 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1310 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1311 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1312 old_rx_pg = *rx_pg;
1313
1314 /* If we fail to allocate a substitute page, we simply stop
1315 where we are and drop the whole packet */
1316 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1317 if (unlikely(err)) {
de832a55 1318 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1319 return err;
1320 }
1321
1322 /* Unmap the page as we r going to pass it to the stack */
1323 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1324 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1325
1326 /* Add one frag and update the appropriate fields in the skb */
1327 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1328
1329 skb->data_len += frag_len;
1330 skb->truesize += frag_len;
1331 skb->len += frag_len;
1332
1333 frag_size -= frag_len;
1334 }
1335
1336 return 0;
1337}
1338
1339static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1340 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1341 u16 cqe_idx)
1342{
1343 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1344 struct sk_buff *skb = rx_buf->skb;
1345 /* alloc new skb */
1346 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1347
1348 /* Unmap skb in the pool anyway, as we are going to change
1349 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1350 fails. */
1351 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1352 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1353
7a9b2557 1354 if (likely(new_skb)) {
66e855f3
YG
1355 /* fix ip xsum and give it to the stack */
1356 /* (no need to map the new skb) */
0c6671b0
EG
1357#ifdef BCM_VLAN
1358 int is_vlan_cqe =
1359 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1360 PARSING_FLAGS_VLAN);
1361 int is_not_hwaccel_vlan_cqe =
1362 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1363#endif
7a9b2557
VZ
1364
1365 prefetch(skb);
1366 prefetch(((char *)(skb)) + 128);
1367
7a9b2557
VZ
1368#ifdef BNX2X_STOP_ON_ERROR
1369 if (pad + len > bp->rx_buf_size) {
1370 BNX2X_ERR("skb_put is about to fail... "
1371 "pad %d len %d rx_buf_size %d\n",
1372 pad, len, bp->rx_buf_size);
1373 bnx2x_panic();
1374 return;
1375 }
1376#endif
1377
1378 skb_reserve(skb, pad);
1379 skb_put(skb, len);
1380
1381 skb->protocol = eth_type_trans(skb, bp->dev);
1382 skb->ip_summed = CHECKSUM_UNNECESSARY;
1383
1384 {
1385 struct iphdr *iph;
1386
1387 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1388#ifdef BCM_VLAN
1389 /* If there is no Rx VLAN offloading -
1390 take VLAN tag into an account */
1391 if (unlikely(is_not_hwaccel_vlan_cqe))
1392 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1393#endif
7a9b2557
VZ
1394 iph->check = 0;
1395 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1396 }
1397
1398 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1399 &cqe->fast_path_cqe, cqe_idx)) {
1400#ifdef BCM_VLAN
0c6671b0
EG
1401 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1402 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1403 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1404 le16_to_cpu(cqe->fast_path_cqe.
1405 vlan_tag));
1406 else
1407#endif
1408 netif_receive_skb(skb);
1409 } else {
1410 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1411 " - dropping packet!\n");
1412 dev_kfree_skb(skb);
1413 }
1414
7a9b2557
VZ
1415
1416 /* put new skb in bin */
1417 fp->tpa_pool[queue].skb = new_skb;
1418
1419 } else {
66e855f3 1420 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1421 DP(NETIF_MSG_RX_STATUS,
1422 "Failed to allocate new skb - dropping packet!\n");
de832a55 1423 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1424 }
1425
1426 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1427}
1428
1429static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1430 struct bnx2x_fastpath *fp,
1431 u16 bd_prod, u16 rx_comp_prod,
1432 u16 rx_sge_prod)
1433{
8d9c5f34 1434 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1435 int i;
1436
1437 /* Update producers */
1438 rx_prods.bd_prod = bd_prod;
1439 rx_prods.cqe_prod = rx_comp_prod;
1440 rx_prods.sge_prod = rx_sge_prod;
1441
58f4c4cf
EG
1442 /*
1443 * Make sure that the BD and SGE data is updated before updating the
1444 * producers since FW might read the BD/SGE right after the producer
1445 * is updated.
1446 * This is only applicable for weak-ordered memory model archs such
1447 * as IA-64. The following barrier is also mandatory since FW will
1448 * assumes BDs must have buffers.
1449 */
1450 wmb();
1451
8d9c5f34
EG
1452 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1453 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1454 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1455 ((u32 *)&rx_prods)[i]);
1456
58f4c4cf
EG
1457 mmiowb(); /* keep prod updates ordered */
1458
7a9b2557 1459 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1460 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1461 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1462}
1463
a2fbb9ea
ET
1464static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1465{
1466 struct bnx2x *bp = fp->bp;
34f80b04 1467 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1468 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1469 int rx_pkt = 0;
1470
1471#ifdef BNX2X_STOP_ON_ERROR
1472 if (unlikely(bp->panic))
1473 return 0;
1474#endif
1475
34f80b04
EG
1476 /* CQ "next element" is of the size of the regular element,
1477 that's why it's ok here */
a2fbb9ea
ET
1478 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1479 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1480 hw_comp_cons++;
1481
1482 bd_cons = fp->rx_bd_cons;
1483 bd_prod = fp->rx_bd_prod;
34f80b04 1484 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1485 sw_comp_cons = fp->rx_comp_cons;
1486 sw_comp_prod = fp->rx_comp_prod;
1487
1488 /* Memory barrier necessary as speculative reads of the rx
1489 * buffer can be ahead of the index in the status block
1490 */
1491 rmb();
1492
1493 DP(NETIF_MSG_RX_STATUS,
1494 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1495 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1496
1497 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1498 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1499 struct sk_buff *skb;
1500 union eth_rx_cqe *cqe;
34f80b04
EG
1501 u8 cqe_fp_flags;
1502 u16 len, pad;
a2fbb9ea
ET
1503
1504 comp_ring_cons = RCQ_BD(sw_comp_cons);
1505 bd_prod = RX_BD(bd_prod);
1506 bd_cons = RX_BD(bd_cons);
1507
1508 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1509 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1510
a2fbb9ea 1511 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1512 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1513 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1514 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1515 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1516 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1517
1518 /* is this a slowpath msg? */
34f80b04 1519 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1520 bnx2x_sp_event(fp, cqe);
1521 goto next_cqe;
1522
1523 /* this is an rx packet */
1524 } else {
1525 rx_buf = &fp->rx_buf_ring[bd_cons];
1526 skb = rx_buf->skb;
a2fbb9ea
ET
1527 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1528 pad = cqe->fast_path_cqe.placement_offset;
1529
7a9b2557
VZ
1530 /* If CQE is marked both TPA_START and TPA_END
1531 it is a non-TPA CQE */
1532 if ((!fp->disable_tpa) &&
1533 (TPA_TYPE(cqe_fp_flags) !=
1534 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1535 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1536
1537 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1538 DP(NETIF_MSG_RX_STATUS,
1539 "calling tpa_start on queue %d\n",
1540 queue);
1541
1542 bnx2x_tpa_start(fp, queue, skb,
1543 bd_cons, bd_prod);
1544 goto next_rx;
1545 }
1546
1547 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1548 DP(NETIF_MSG_RX_STATUS,
1549 "calling tpa_stop on queue %d\n",
1550 queue);
1551
1552 if (!BNX2X_RX_SUM_FIX(cqe))
1553 BNX2X_ERR("STOP on none TCP "
1554 "data\n");
1555
1556 /* This is a size of the linear data
1557 on this skb */
1558 len = le16_to_cpu(cqe->fast_path_cqe.
1559 len_on_bd);
1560 bnx2x_tpa_stop(bp, fp, queue, pad,
1561 len, cqe, comp_ring_cons);
1562#ifdef BNX2X_STOP_ON_ERROR
1563 if (bp->panic)
17cb4006 1564 return 0;
7a9b2557
VZ
1565#endif
1566
1567 bnx2x_update_sge_prod(fp,
1568 &cqe->fast_path_cqe);
1569 goto next_cqe;
1570 }
1571 }
1572
a2fbb9ea
ET
1573 pci_dma_sync_single_for_device(bp->pdev,
1574 pci_unmap_addr(rx_buf, mapping),
1575 pad + RX_COPY_THRESH,
1576 PCI_DMA_FROMDEVICE);
1577 prefetch(skb);
1578 prefetch(((char *)(skb)) + 128);
1579
1580 /* is this an error packet? */
34f80b04 1581 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1582 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1583 "ERROR flags %x rx packet %u\n",
1584 cqe_fp_flags, sw_comp_cons);
de832a55 1585 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1586 goto reuse_rx;
1587 }
1588
1589 /* Since we don't have a jumbo ring
1590 * copy small packets if mtu > 1500
1591 */
1592 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1593 (len <= RX_COPY_THRESH)) {
1594 struct sk_buff *new_skb;
1595
1596 new_skb = netdev_alloc_skb(bp->dev,
1597 len + pad);
1598 if (new_skb == NULL) {
1599 DP(NETIF_MSG_RX_ERR,
34f80b04 1600 "ERROR packet dropped "
a2fbb9ea 1601 "because of alloc failure\n");
de832a55 1602 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1603 goto reuse_rx;
1604 }
1605
1606 /* aligned copy */
1607 skb_copy_from_linear_data_offset(skb, pad,
1608 new_skb->data + pad, len);
1609 skb_reserve(new_skb, pad);
1610 skb_put(new_skb, len);
1611
1612 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1613
1614 skb = new_skb;
1615
1616 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1617 pci_unmap_single(bp->pdev,
1618 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1619 bp->rx_buf_size,
a2fbb9ea
ET
1620 PCI_DMA_FROMDEVICE);
1621 skb_reserve(skb, pad);
1622 skb_put(skb, len);
1623
1624 } else {
1625 DP(NETIF_MSG_RX_ERR,
34f80b04 1626 "ERROR packet dropped because "
a2fbb9ea 1627 "of alloc failure\n");
de832a55 1628 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1629reuse_rx:
1630 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1631 goto next_rx;
1632 }
1633
1634 skb->protocol = eth_type_trans(skb, bp->dev);
1635
1636 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1637 if (bp->rx_csum) {
1adcd8be
EG
1638 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1639 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1640 else
de832a55 1641 fp->eth_q_stats.hw_csum_err++;
66e855f3 1642 }
a2fbb9ea
ET
1643 }
1644
748e5439 1645 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1646#ifdef BCM_VLAN
0c6671b0 1647 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1648 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1649 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1650 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1651 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1652 else
1653#endif
34f80b04 1654 netif_receive_skb(skb);
a2fbb9ea 1655
a2fbb9ea
ET
1656
1657next_rx:
1658 rx_buf->skb = NULL;
1659
1660 bd_cons = NEXT_RX_IDX(bd_cons);
1661 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1662 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1663 rx_pkt++;
a2fbb9ea
ET
1664next_cqe:
1665 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1666 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1667
34f80b04 1668 if (rx_pkt == budget)
a2fbb9ea
ET
1669 break;
1670 } /* while */
1671
1672 fp->rx_bd_cons = bd_cons;
34f80b04 1673 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1674 fp->rx_comp_cons = sw_comp_cons;
1675 fp->rx_comp_prod = sw_comp_prod;
1676
7a9b2557
VZ
1677 /* Update producers */
1678 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1679 fp->rx_sge_prod);
a2fbb9ea
ET
1680
1681 fp->rx_pkt += rx_pkt;
1682 fp->rx_calls++;
1683
1684 return rx_pkt;
1685}
1686
1687static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1688{
1689 struct bnx2x_fastpath *fp = fp_cookie;
1690 struct bnx2x *bp = fp->bp;
0626b899 1691 int index = fp->index;
a2fbb9ea 1692
da5a662a
VZ
1693 /* Return here if interrupt is disabled */
1694 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1695 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1696 return IRQ_HANDLED;
1697 }
1698
34f80b04 1699 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1700 index, fp->sb_id);
1701 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1702
1703#ifdef BNX2X_STOP_ON_ERROR
1704 if (unlikely(bp->panic))
1705 return IRQ_HANDLED;
1706#endif
1707
1708 prefetch(fp->rx_cons_sb);
1709 prefetch(fp->tx_cons_sb);
1710 prefetch(&fp->status_blk->c_status_block.status_block_index);
1711 prefetch(&fp->status_blk->u_status_block.status_block_index);
1712
288379f0 1713 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1714
a2fbb9ea
ET
1715 return IRQ_HANDLED;
1716}
1717
1718static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1719{
555f6c78 1720 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1721 u16 status = bnx2x_ack_int(bp);
34f80b04 1722 u16 mask;
a2fbb9ea 1723
34f80b04 1724 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1725 if (unlikely(status == 0)) {
1726 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1727 return IRQ_NONE;
1728 }
f5372251 1729 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1730
34f80b04 1731 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1732 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1733 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1734 return IRQ_HANDLED;
1735 }
1736
3196a88a
EG
1737#ifdef BNX2X_STOP_ON_ERROR
1738 if (unlikely(bp->panic))
1739 return IRQ_HANDLED;
1740#endif
1741
34f80b04
EG
1742 mask = 0x2 << bp->fp[0].sb_id;
1743 if (status & mask) {
a2fbb9ea
ET
1744 struct bnx2x_fastpath *fp = &bp->fp[0];
1745
1746 prefetch(fp->rx_cons_sb);
1747 prefetch(fp->tx_cons_sb);
1748 prefetch(&fp->status_blk->c_status_block.status_block_index);
1749 prefetch(&fp->status_blk->u_status_block.status_block_index);
1750
288379f0 1751 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1752
34f80b04 1753 status &= ~mask;
a2fbb9ea
ET
1754 }
1755
a2fbb9ea 1756
34f80b04 1757 if (unlikely(status & 0x1)) {
1cf167f2 1758 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1759
1760 status &= ~0x1;
1761 if (!status)
1762 return IRQ_HANDLED;
1763 }
1764
34f80b04
EG
1765 if (status)
1766 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1767 status);
a2fbb9ea 1768
c18487ee 1769 return IRQ_HANDLED;
a2fbb9ea
ET
1770}
1771
c18487ee 1772/* end of fast path */
a2fbb9ea 1773
bb2a0f7a 1774static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1775
c18487ee
YR
1776/* Link */
1777
1778/*
1779 * General service functions
1780 */
a2fbb9ea 1781
4a37fb66 1782static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1783{
1784 u32 lock_status;
1785 u32 resource_bit = (1 << resource);
4a37fb66
YG
1786 int func = BP_FUNC(bp);
1787 u32 hw_lock_control_reg;
c18487ee 1788 int cnt;
a2fbb9ea 1789
c18487ee
YR
1790 /* Validating that the resource is within range */
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 DP(NETIF_MSG_HW,
1793 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1795 return -EINVAL;
1796 }
a2fbb9ea 1797
4a37fb66
YG
1798 if (func <= 5) {
1799 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 } else {
1801 hw_lock_control_reg =
1802 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1803 }
1804
c18487ee 1805 /* Validating that the resource is not already taken */
4a37fb66 1806 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1807 if (lock_status & resource_bit) {
1808 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1809 lock_status, resource_bit);
1810 return -EEXIST;
1811 }
a2fbb9ea 1812
46230476
EG
1813 /* Try for 5 second every 5ms */
1814 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1815 /* Try to acquire the lock */
4a37fb66
YG
1816 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1817 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1818 if (lock_status & resource_bit)
1819 return 0;
a2fbb9ea 1820
c18487ee 1821 msleep(5);
a2fbb9ea 1822 }
c18487ee
YR
1823 DP(NETIF_MSG_HW, "Timeout\n");
1824 return -EAGAIN;
1825}
a2fbb9ea 1826
4a37fb66 1827static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1828{
1829 u32 lock_status;
1830 u32 resource_bit = (1 << resource);
4a37fb66
YG
1831 int func = BP_FUNC(bp);
1832 u32 hw_lock_control_reg;
a2fbb9ea 1833
c18487ee
YR
1834 /* Validating that the resource is within range */
1835 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1836 DP(NETIF_MSG_HW,
1837 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1838 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1839 return -EINVAL;
1840 }
1841
4a37fb66
YG
1842 if (func <= 5) {
1843 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1844 } else {
1845 hw_lock_control_reg =
1846 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1847 }
1848
c18487ee 1849 /* Validating that the resource is currently taken */
4a37fb66 1850 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1851 if (!(lock_status & resource_bit)) {
1852 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1853 lock_status, resource_bit);
1854 return -EFAULT;
a2fbb9ea
ET
1855 }
1856
4a37fb66 1857 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1858 return 0;
1859}
1860
1861/* HW Lock for shared dual port PHYs */
4a37fb66 1862static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1863{
34f80b04 1864 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1865
46c6a674
EG
1866 if (bp->port.need_hw_lock)
1867 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1868}
a2fbb9ea 1869
4a37fb66 1870static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1871{
46c6a674
EG
1872 if (bp->port.need_hw_lock)
1873 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1874
34f80b04 1875 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1876}
a2fbb9ea 1877
4acac6a5
EG
1878int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1879{
1880 /* The GPIO should be swapped if swap register is set and active */
1881 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1882 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1883 int gpio_shift = gpio_num +
1884 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1885 u32 gpio_mask = (1 << gpio_shift);
1886 u32 gpio_reg;
1887 int value;
1888
1889 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1890 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1891 return -EINVAL;
1892 }
1893
1894 /* read GPIO value */
1895 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1896
1897 /* get the requested pin value */
1898 if ((gpio_reg & gpio_mask) == gpio_mask)
1899 value = 1;
1900 else
1901 value = 0;
1902
1903 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1904
1905 return value;
1906}
1907
17de50b7 1908int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1909{
1910 /* The GPIO should be swapped if swap register is set and active */
1911 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1912 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1913 int gpio_shift = gpio_num +
1914 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1915 u32 gpio_mask = (1 << gpio_shift);
1916 u32 gpio_reg;
a2fbb9ea 1917
c18487ee
YR
1918 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1919 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1920 return -EINVAL;
1921 }
a2fbb9ea 1922
4a37fb66 1923 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1924 /* read GPIO and mask except the float bits */
1925 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1926
c18487ee
YR
1927 switch (mode) {
1928 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1929 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1930 gpio_num, gpio_shift);
1931 /* clear FLOAT and set CLR */
1932 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1933 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1934 break;
a2fbb9ea 1935
c18487ee
YR
1936 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1937 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1938 gpio_num, gpio_shift);
1939 /* clear FLOAT and set SET */
1940 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1941 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1942 break;
a2fbb9ea 1943
17de50b7 1944 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1945 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1946 gpio_num, gpio_shift);
1947 /* set FLOAT */
1948 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1949 break;
a2fbb9ea 1950
c18487ee
YR
1951 default:
1952 break;
a2fbb9ea
ET
1953 }
1954
c18487ee 1955 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1956 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1957
c18487ee 1958 return 0;
a2fbb9ea
ET
1959}
1960
4acac6a5
EG
1961int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1962{
1963 /* The GPIO should be swapped if swap register is set and active */
1964 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1965 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1966 int gpio_shift = gpio_num +
1967 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1968 u32 gpio_mask = (1 << gpio_shift);
1969 u32 gpio_reg;
1970
1971 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1972 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1973 return -EINVAL;
1974 }
1975
1976 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1977 /* read GPIO int */
1978 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1979
1980 switch (mode) {
1981 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1982 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1983 "output low\n", gpio_num, gpio_shift);
1984 /* clear SET and set CLR */
1985 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1986 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1987 break;
1988
1989 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1990 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1991 "output high\n", gpio_num, gpio_shift);
1992 /* clear CLR and set SET */
1993 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1994 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1995 break;
1996
1997 default:
1998 break;
1999 }
2000
2001 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2002 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2003
2004 return 0;
2005}
2006
c18487ee 2007static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2008{
c18487ee
YR
2009 u32 spio_mask = (1 << spio_num);
2010 u32 spio_reg;
a2fbb9ea 2011
c18487ee
YR
2012 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2013 (spio_num > MISC_REGISTERS_SPIO_7)) {
2014 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2015 return -EINVAL;
a2fbb9ea
ET
2016 }
2017
4a37fb66 2018 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2019 /* read SPIO and mask except the float bits */
2020 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2021
c18487ee 2022 switch (mode) {
6378c025 2023 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2024 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2025 /* clear FLOAT and set CLR */
2026 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2027 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2028 break;
a2fbb9ea 2029
6378c025 2030 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2031 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2032 /* clear FLOAT and set SET */
2033 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2034 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2035 break;
a2fbb9ea 2036
c18487ee
YR
2037 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2038 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2039 /* set FLOAT */
2040 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2041 break;
a2fbb9ea 2042
c18487ee
YR
2043 default:
2044 break;
a2fbb9ea
ET
2045 }
2046
c18487ee 2047 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2048 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2049
a2fbb9ea
ET
2050 return 0;
2051}
2052
c18487ee 2053static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2054{
ad33ea3a
EG
2055 switch (bp->link_vars.ieee_fc &
2056 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2057 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2058 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2059 ADVERTISED_Pause);
2060 break;
356e2385 2061
c18487ee 2062 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2063 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2064 ADVERTISED_Pause);
2065 break;
356e2385 2066
c18487ee 2067 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2068 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2069 break;
356e2385 2070
c18487ee 2071 default:
34f80b04 2072 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2073 ADVERTISED_Pause);
2074 break;
2075 }
2076}
f1410647 2077
c18487ee
YR
2078static void bnx2x_link_report(struct bnx2x *bp)
2079{
2080 if (bp->link_vars.link_up) {
2081 if (bp->state == BNX2X_STATE_OPEN)
2082 netif_carrier_on(bp->dev);
2083 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2084
c18487ee 2085 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2086
c18487ee
YR
2087 if (bp->link_vars.duplex == DUPLEX_FULL)
2088 printk("full duplex");
2089 else
2090 printk("half duplex");
f1410647 2091
c0700f90
DM
2092 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2093 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2094 printk(", receive ");
356e2385
EG
2095 if (bp->link_vars.flow_ctrl &
2096 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2097 printk("& transmit ");
2098 } else {
2099 printk(", transmit ");
2100 }
2101 printk("flow control ON");
2102 }
2103 printk("\n");
f1410647 2104
c18487ee
YR
2105 } else { /* link_down */
2106 netif_carrier_off(bp->dev);
2107 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2108 }
c18487ee
YR
2109}
2110
b5bf9068 2111static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2112{
19680c48
EG
2113 if (!BP_NOMCP(bp)) {
2114 u8 rc;
a2fbb9ea 2115
19680c48 2116 /* Initialize link parameters structure variables */
8c99e7b0
YR
2117 /* It is recommended to turn off RX FC for jumbo frames
2118 for better performance */
2119 if (IS_E1HMF(bp))
c0700f90 2120 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2121 else if (bp->dev->mtu > 5000)
c0700f90 2122 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2123 else
c0700f90 2124 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2125
4a37fb66 2126 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2127
2128 if (load_mode == LOAD_DIAG)
2129 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2130
19680c48 2131 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2132
4a37fb66 2133 bnx2x_release_phy_lock(bp);
a2fbb9ea 2134
3c96c68b
EG
2135 bnx2x_calc_fc_adv(bp);
2136
b5bf9068
EG
2137 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2138 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2139 bnx2x_link_report(bp);
b5bf9068 2140 }
34f80b04 2141
19680c48
EG
2142 return rc;
2143 }
f5372251 2144 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2145 return -EINVAL;
a2fbb9ea
ET
2146}
2147
c18487ee 2148static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2149{
19680c48 2150 if (!BP_NOMCP(bp)) {
4a37fb66 2151 bnx2x_acquire_phy_lock(bp);
19680c48 2152 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2153 bnx2x_release_phy_lock(bp);
a2fbb9ea 2154
19680c48
EG
2155 bnx2x_calc_fc_adv(bp);
2156 } else
f5372251 2157 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2158}
a2fbb9ea 2159
c18487ee
YR
2160static void bnx2x__link_reset(struct bnx2x *bp)
2161{
19680c48 2162 if (!BP_NOMCP(bp)) {
4a37fb66 2163 bnx2x_acquire_phy_lock(bp);
589abe3a 2164 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2165 bnx2x_release_phy_lock(bp);
19680c48 2166 } else
f5372251 2167 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2168}
a2fbb9ea 2169
c18487ee
YR
2170static u8 bnx2x_link_test(struct bnx2x *bp)
2171{
2172 u8 rc;
a2fbb9ea 2173
4a37fb66 2174 bnx2x_acquire_phy_lock(bp);
c18487ee 2175 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2176 bnx2x_release_phy_lock(bp);
a2fbb9ea 2177
c18487ee
YR
2178 return rc;
2179}
a2fbb9ea 2180
8a1c38d1 2181static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2182{
8a1c38d1
EG
2183 u32 r_param = bp->link_vars.line_speed / 8;
2184 u32 fair_periodic_timeout_usec;
2185 u32 t_fair;
34f80b04 2186
8a1c38d1
EG
2187 memset(&(bp->cmng.rs_vars), 0,
2188 sizeof(struct rate_shaping_vars_per_port));
2189 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2190
8a1c38d1
EG
2191 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2192 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2193
8a1c38d1
EG
2194 /* this is the threshold below which no timer arming will occur
2195 1.25 coefficient is for the threshold to be a little bigger
2196 than the real time, to compensate for timer in-accuracy */
2197 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2198 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2199
8a1c38d1
EG
2200 /* resolution of fairness timer */
2201 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2202 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2203 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2204
8a1c38d1
EG
2205 /* this is the threshold below which we won't arm the timer anymore */
2206 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2207
8a1c38d1
EG
2208 /* we multiply by 1e3/8 to get bytes/msec.
2209 We don't want the credits to pass a credit
2210 of the t_fair*FAIR_MEM (algorithm resolution) */
2211 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2212 /* since each tick is 4 usec */
2213 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2214}
2215
8a1c38d1 2216static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2217{
2218 struct rate_shaping_vars_per_vn m_rs_vn;
2219 struct fairness_vars_per_vn m_fair_vn;
2220 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2221 u16 vn_min_rate, vn_max_rate;
2222 int i;
2223
2224 /* If function is hidden - set min and max to zeroes */
2225 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2226 vn_min_rate = 0;
2227 vn_max_rate = 0;
2228
2229 } else {
2230 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2231 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2232 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2233 if current min rate is zero - set it to 1.
33471629 2234 This is a requirement of the algorithm. */
8a1c38d1 2235 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2236 vn_min_rate = DEF_MIN_RATE;
2237 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2238 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2239 }
2240
8a1c38d1
EG
2241 DP(NETIF_MSG_IFUP,
2242 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2243 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2244
2245 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2246 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2247
2248 /* global vn counter - maximal Mbps for this vn */
2249 m_rs_vn.vn_counter.rate = vn_max_rate;
2250
2251 /* quota - number of bytes transmitted in this period */
2252 m_rs_vn.vn_counter.quota =
2253 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2254
8a1c38d1 2255 if (bp->vn_weight_sum) {
34f80b04
EG
2256 /* credit for each period of the fairness algorithm:
2257 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2258 vn_weight_sum should not be larger than 10000, thus
2259 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2260 than zero */
34f80b04 2261 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2262 max((u32)(vn_min_rate * (T_FAIR_COEF /
2263 (8 * bp->vn_weight_sum))),
2264 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2265 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2266 m_fair_vn.vn_credit_delta);
2267 }
2268
34f80b04
EG
2269 /* Store it to internal memory */
2270 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2271 REG_WR(bp, BAR_XSTRORM_INTMEM +
2272 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2273 ((u32 *)(&m_rs_vn))[i]);
2274
2275 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2276 REG_WR(bp, BAR_XSTRORM_INTMEM +
2277 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2278 ((u32 *)(&m_fair_vn))[i]);
2279}
2280
8a1c38d1 2281
c18487ee
YR
2282/* This function is called upon link interrupt */
2283static void bnx2x_link_attn(struct bnx2x *bp)
2284{
bb2a0f7a
YG
2285 /* Make sure that we are synced with the current statistics */
2286 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2287
c18487ee 2288 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2289
bb2a0f7a
YG
2290 if (bp->link_vars.link_up) {
2291
1c06328c
EG
2292 /* dropless flow control */
2293 if (CHIP_IS_E1H(bp)) {
2294 int port = BP_PORT(bp);
2295 u32 pause_enabled = 0;
2296
2297 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2298 pause_enabled = 1;
2299
2300 REG_WR(bp, BAR_USTRORM_INTMEM +
2301 USTORM_PAUSE_ENABLED_OFFSET(port),
2302 pause_enabled);
2303 }
2304
bb2a0f7a
YG
2305 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2306 struct host_port_stats *pstats;
2307
2308 pstats = bnx2x_sp(bp, port_stats);
2309 /* reset old bmac stats */
2310 memset(&(pstats->mac_stx[0]), 0,
2311 sizeof(struct mac_stx));
2312 }
2313 if ((bp->state == BNX2X_STATE_OPEN) ||
2314 (bp->state == BNX2X_STATE_DISABLED))
2315 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2316 }
2317
c18487ee
YR
2318 /* indicate link status */
2319 bnx2x_link_report(bp);
34f80b04
EG
2320
2321 if (IS_E1HMF(bp)) {
8a1c38d1 2322 int port = BP_PORT(bp);
34f80b04 2323 int func;
8a1c38d1 2324 int vn;
34f80b04
EG
2325
2326 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2327 if (vn == BP_E1HVN(bp))
2328 continue;
2329
8a1c38d1 2330 func = ((vn << 1) | port);
34f80b04
EG
2331
2332 /* Set the attention towards other drivers
2333 on the same port */
2334 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2335 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2336 }
34f80b04 2337
8a1c38d1
EG
2338 if (bp->link_vars.link_up) {
2339 int i;
2340
2341 /* Init rate shaping and fairness contexts */
2342 bnx2x_init_port_minmax(bp);
34f80b04 2343
34f80b04 2344 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2345 bnx2x_init_vn_minmax(bp, 2*vn + port);
2346
2347 /* Store it to internal memory */
2348 for (i = 0;
2349 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2350 REG_WR(bp, BAR_XSTRORM_INTMEM +
2351 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2352 ((u32 *)(&bp->cmng))[i]);
2353 }
34f80b04 2354 }
c18487ee 2355}
a2fbb9ea 2356
c18487ee
YR
2357static void bnx2x__link_status_update(struct bnx2x *bp)
2358{
2359 if (bp->state != BNX2X_STATE_OPEN)
2360 return;
a2fbb9ea 2361
c18487ee 2362 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2363
bb2a0f7a
YG
2364 if (bp->link_vars.link_up)
2365 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2366 else
2367 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2368
c18487ee
YR
2369 /* indicate link status */
2370 bnx2x_link_report(bp);
a2fbb9ea 2371}
a2fbb9ea 2372
34f80b04
EG
2373static void bnx2x_pmf_update(struct bnx2x *bp)
2374{
2375 int port = BP_PORT(bp);
2376 u32 val;
2377
2378 bp->port.pmf = 1;
2379 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2380
2381 /* enable nig attention */
2382 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2383 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2384 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2385
2386 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2387}
2388
c18487ee 2389/* end of Link */
a2fbb9ea
ET
2390
2391/* slow path */
2392
2393/*
2394 * General service functions
2395 */
2396
2397/* the slow path queue is odd since completions arrive on the fastpath ring */
2398static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2399 u32 data_hi, u32 data_lo, int common)
2400{
34f80b04 2401 int func = BP_FUNC(bp);
a2fbb9ea 2402
34f80b04
EG
2403 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2404 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2405 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2406 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2407 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2408
2409#ifdef BNX2X_STOP_ON_ERROR
2410 if (unlikely(bp->panic))
2411 return -EIO;
2412#endif
2413
34f80b04 2414 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2415
2416 if (!bp->spq_left) {
2417 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2418 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2419 bnx2x_panic();
2420 return -EBUSY;
2421 }
f1410647 2422
a2fbb9ea
ET
2423 /* CID needs port number to be encoded int it */
2424 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2425 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2426 HW_CID(bp, cid)));
2427 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2428 if (common)
2429 bp->spq_prod_bd->hdr.type |=
2430 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2431
2432 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2433 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2434
2435 bp->spq_left--;
2436
2437 if (bp->spq_prod_bd == bp->spq_last_bd) {
2438 bp->spq_prod_bd = bp->spq;
2439 bp->spq_prod_idx = 0;
2440 DP(NETIF_MSG_TIMER, "end of spq\n");
2441
2442 } else {
2443 bp->spq_prod_bd++;
2444 bp->spq_prod_idx++;
2445 }
2446
37dbbf32
EG
2447 /* Make sure that BD data is updated before writing the producer */
2448 wmb();
2449
34f80b04 2450 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2451 bp->spq_prod_idx);
2452
37dbbf32
EG
2453 mmiowb();
2454
34f80b04 2455 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2456 return 0;
2457}
2458
2459/* acquire split MCP access lock register */
4a37fb66 2460static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2461{
a2fbb9ea 2462 u32 i, j, val;
34f80b04 2463 int rc = 0;
a2fbb9ea
ET
2464
2465 might_sleep();
2466 i = 100;
2467 for (j = 0; j < i*10; j++) {
2468 val = (1UL << 31);
2469 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2470 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2471 if (val & (1L << 31))
2472 break;
2473
2474 msleep(5);
2475 }
a2fbb9ea 2476 if (!(val & (1L << 31))) {
19680c48 2477 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2478 rc = -EBUSY;
2479 }
2480
2481 return rc;
2482}
2483
4a37fb66
YG
2484/* release split MCP access lock register */
2485static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2486{
2487 u32 val = 0;
2488
2489 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2490}
2491
2492static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2493{
2494 struct host_def_status_block *def_sb = bp->def_status_blk;
2495 u16 rc = 0;
2496
2497 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2498 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2499 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2500 rc |= 1;
2501 }
2502 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2503 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2504 rc |= 2;
2505 }
2506 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2507 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2508 rc |= 4;
2509 }
2510 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2511 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2512 rc |= 8;
2513 }
2514 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2515 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2516 rc |= 16;
2517 }
2518 return rc;
2519}
2520
2521/*
2522 * slow path service functions
2523 */
2524
2525static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2526{
34f80b04 2527 int port = BP_PORT(bp);
5c862848
EG
2528 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2529 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2530 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2531 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2532 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2533 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2534 u32 aeu_mask;
87942b46 2535 u32 nig_mask = 0;
a2fbb9ea 2536
a2fbb9ea
ET
2537 if (bp->attn_state & asserted)
2538 BNX2X_ERR("IGU ERROR\n");
2539
3fcaf2e5
EG
2540 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2541 aeu_mask = REG_RD(bp, aeu_addr);
2542
a2fbb9ea 2543 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2544 aeu_mask, asserted);
2545 aeu_mask &= ~(asserted & 0xff);
2546 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2547
3fcaf2e5
EG
2548 REG_WR(bp, aeu_addr, aeu_mask);
2549 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2550
3fcaf2e5 2551 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2552 bp->attn_state |= asserted;
3fcaf2e5 2553 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2554
2555 if (asserted & ATTN_HARD_WIRED_MASK) {
2556 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2557
a5e9a7cf
EG
2558 bnx2x_acquire_phy_lock(bp);
2559
877e9aa4 2560 /* save nig interrupt mask */
87942b46 2561 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2562 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2563
c18487ee 2564 bnx2x_link_attn(bp);
a2fbb9ea
ET
2565
2566 /* handle unicore attn? */
2567 }
2568 if (asserted & ATTN_SW_TIMER_4_FUNC)
2569 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2570
2571 if (asserted & GPIO_2_FUNC)
2572 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2573
2574 if (asserted & GPIO_3_FUNC)
2575 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2576
2577 if (asserted & GPIO_4_FUNC)
2578 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2579
2580 if (port == 0) {
2581 if (asserted & ATTN_GENERAL_ATTN_1) {
2582 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2583 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2584 }
2585 if (asserted & ATTN_GENERAL_ATTN_2) {
2586 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2587 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2588 }
2589 if (asserted & ATTN_GENERAL_ATTN_3) {
2590 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2591 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2592 }
2593 } else {
2594 if (asserted & ATTN_GENERAL_ATTN_4) {
2595 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2596 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2597 }
2598 if (asserted & ATTN_GENERAL_ATTN_5) {
2599 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2600 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2601 }
2602 if (asserted & ATTN_GENERAL_ATTN_6) {
2603 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2604 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2605 }
2606 }
2607
2608 } /* if hardwired */
2609
5c862848
EG
2610 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2611 asserted, hc_addr);
2612 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2613
2614 /* now set back the mask */
a5e9a7cf 2615 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2616 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2617 bnx2x_release_phy_lock(bp);
2618 }
a2fbb9ea
ET
2619}
2620
fd4ef40d
EG
2621static inline void bnx2x_fan_failure(struct bnx2x *bp)
2622{
2623 int port = BP_PORT(bp);
2624
2625 /* mark the failure */
2626 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2627 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2628 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2629 bp->link_params.ext_phy_config);
2630
2631 /* log the failure */
2632 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2633 " the driver to shutdown the card to prevent permanent"
2634 " damage. Please contact Dell Support for assistance\n",
2635 bp->dev->name);
2636}
877e9aa4 2637static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2638{
34f80b04 2639 int port = BP_PORT(bp);
877e9aa4 2640 int reg_offset;
4d295db0 2641 u32 val, swap_val, swap_override;
877e9aa4 2642
34f80b04
EG
2643 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2644 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2645
34f80b04 2646 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2647
2648 val = REG_RD(bp, reg_offset);
2649 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2650 REG_WR(bp, reg_offset, val);
2651
2652 BNX2X_ERR("SPIO5 hw attention\n");
2653
fd4ef40d 2654 /* Fan failure attention */
35b19ba5
EG
2655 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2656 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2657 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2658 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2659 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2660 /* The PHY reset is controlled by GPIO 1 */
2661 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2662 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2663 break;
2664
4d295db0
EG
2665 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2666 /* The PHY reset is controlled by GPIO 1 */
2667 /* fake the port number to cancel the swap done in
2668 set_gpio() */
2669 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2670 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2671 port = (swap_val && swap_override) ^ 1;
2672 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2673 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2674 break;
2675
877e9aa4
ET
2676 default:
2677 break;
2678 }
fd4ef40d 2679 bnx2x_fan_failure(bp);
877e9aa4 2680 }
34f80b04 2681
589abe3a
EG
2682 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2683 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2684 bnx2x_acquire_phy_lock(bp);
2685 bnx2x_handle_module_detect_int(&bp->link_params);
2686 bnx2x_release_phy_lock(bp);
2687 }
2688
34f80b04
EG
2689 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2690
2691 val = REG_RD(bp, reg_offset);
2692 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2693 REG_WR(bp, reg_offset, val);
2694
2695 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2696 (attn & HW_INTERRUT_ASSERT_SET_0));
2697 bnx2x_panic();
2698 }
877e9aa4
ET
2699}
2700
2701static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2702{
2703 u32 val;
2704
0626b899 2705 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2706
2707 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2708 BNX2X_ERR("DB hw attention 0x%x\n", val);
2709 /* DORQ discard attention */
2710 if (val & 0x2)
2711 BNX2X_ERR("FATAL error from DORQ\n");
2712 }
34f80b04
EG
2713
2714 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2715
2716 int port = BP_PORT(bp);
2717 int reg_offset;
2718
2719 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2720 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2721
2722 val = REG_RD(bp, reg_offset);
2723 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2724 REG_WR(bp, reg_offset, val);
2725
2726 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2727 (attn & HW_INTERRUT_ASSERT_SET_1));
2728 bnx2x_panic();
2729 }
877e9aa4
ET
2730}
2731
2732static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2733{
2734 u32 val;
2735
2736 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2737
2738 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2739 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2740 /* CFC error attention */
2741 if (val & 0x2)
2742 BNX2X_ERR("FATAL error from CFC\n");
2743 }
2744
2745 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2746
2747 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2748 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2749 /* RQ_USDMDP_FIFO_OVERFLOW */
2750 if (val & 0x18000)
2751 BNX2X_ERR("FATAL error from PXP\n");
2752 }
34f80b04
EG
2753
2754 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2755
2756 int port = BP_PORT(bp);
2757 int reg_offset;
2758
2759 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2760 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2761
2762 val = REG_RD(bp, reg_offset);
2763 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2764 REG_WR(bp, reg_offset, val);
2765
2766 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2767 (attn & HW_INTERRUT_ASSERT_SET_2));
2768 bnx2x_panic();
2769 }
877e9aa4
ET
2770}
2771
2772static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2773{
34f80b04
EG
2774 u32 val;
2775
877e9aa4
ET
2776 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2777
34f80b04
EG
2778 if (attn & BNX2X_PMF_LINK_ASSERT) {
2779 int func = BP_FUNC(bp);
2780
2781 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2782 bnx2x__link_status_update(bp);
2783 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2784 DRV_STATUS_PMF)
2785 bnx2x_pmf_update(bp);
2786
2787 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2788
2789 BNX2X_ERR("MC assert!\n");
2790 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2791 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2792 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2793 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2794 bnx2x_panic();
2795
2796 } else if (attn & BNX2X_MCP_ASSERT) {
2797
2798 BNX2X_ERR("MCP assert!\n");
2799 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2800 bnx2x_fw_dump(bp);
877e9aa4
ET
2801
2802 } else
2803 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2804 }
2805
2806 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2807 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2808 if (attn & BNX2X_GRC_TIMEOUT) {
2809 val = CHIP_IS_E1H(bp) ?
2810 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2811 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2812 }
2813 if (attn & BNX2X_GRC_RSV) {
2814 val = CHIP_IS_E1H(bp) ?
2815 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2816 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2817 }
877e9aa4 2818 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2819 }
2820}
2821
2822static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2823{
a2fbb9ea
ET
2824 struct attn_route attn;
2825 struct attn_route group_mask;
34f80b04 2826 int port = BP_PORT(bp);
877e9aa4 2827 int index;
a2fbb9ea
ET
2828 u32 reg_addr;
2829 u32 val;
3fcaf2e5 2830 u32 aeu_mask;
a2fbb9ea
ET
2831
2832 /* need to take HW lock because MCP or other port might also
2833 try to handle this event */
4a37fb66 2834 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2835
2836 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2837 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2838 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2839 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2840 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2841 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2842
2843 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2844 if (deasserted & (1 << index)) {
2845 group_mask = bp->attn_group[index];
2846
34f80b04
EG
2847 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2848 index, group_mask.sig[0], group_mask.sig[1],
2849 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2850
877e9aa4
ET
2851 bnx2x_attn_int_deasserted3(bp,
2852 attn.sig[3] & group_mask.sig[3]);
2853 bnx2x_attn_int_deasserted1(bp,
2854 attn.sig[1] & group_mask.sig[1]);
2855 bnx2x_attn_int_deasserted2(bp,
2856 attn.sig[2] & group_mask.sig[2]);
2857 bnx2x_attn_int_deasserted0(bp,
2858 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2859
a2fbb9ea
ET
2860 if ((attn.sig[0] & group_mask.sig[0] &
2861 HW_PRTY_ASSERT_SET_0) ||
2862 (attn.sig[1] & group_mask.sig[1] &
2863 HW_PRTY_ASSERT_SET_1) ||
2864 (attn.sig[2] & group_mask.sig[2] &
2865 HW_PRTY_ASSERT_SET_2))
6378c025 2866 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2867 }
2868 }
2869
4a37fb66 2870 bnx2x_release_alr(bp);
a2fbb9ea 2871
5c862848 2872 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2873
2874 val = ~deasserted;
3fcaf2e5
EG
2875 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2876 val, reg_addr);
5c862848 2877 REG_WR(bp, reg_addr, val);
a2fbb9ea 2878
a2fbb9ea 2879 if (~bp->attn_state & deasserted)
3fcaf2e5 2880 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2881
2882 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2883 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2884
3fcaf2e5
EG
2885 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2886 aeu_mask = REG_RD(bp, reg_addr);
2887
2888 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2889 aeu_mask, deasserted);
2890 aeu_mask |= (deasserted & 0xff);
2891 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2892
3fcaf2e5
EG
2893 REG_WR(bp, reg_addr, aeu_mask);
2894 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2895
2896 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2897 bp->attn_state &= ~deasserted;
2898 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2899}
2900
2901static void bnx2x_attn_int(struct bnx2x *bp)
2902{
2903 /* read local copy of bits */
68d59484
EG
2904 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2905 attn_bits);
2906 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2907 attn_bits_ack);
a2fbb9ea
ET
2908 u32 attn_state = bp->attn_state;
2909
2910 /* look for changed bits */
2911 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2912 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2913
2914 DP(NETIF_MSG_HW,
2915 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2916 attn_bits, attn_ack, asserted, deasserted);
2917
2918 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2919 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2920
2921 /* handle bits that were raised */
2922 if (asserted)
2923 bnx2x_attn_int_asserted(bp, asserted);
2924
2925 if (deasserted)
2926 bnx2x_attn_int_deasserted(bp, deasserted);
2927}
2928
2929static void bnx2x_sp_task(struct work_struct *work)
2930{
1cf167f2 2931 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2932 u16 status;
2933
34f80b04 2934
a2fbb9ea
ET
2935 /* Return here if interrupt is disabled */
2936 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2937 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2938 return;
2939 }
2940
2941 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2942/* if (status == 0) */
2943/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2944
3196a88a 2945 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2946
877e9aa4
ET
2947 /* HW attentions */
2948 if (status & 0x1)
a2fbb9ea 2949 bnx2x_attn_int(bp);
a2fbb9ea 2950
68d59484 2951 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2952 IGU_INT_NOP, 1);
2953 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2954 IGU_INT_NOP, 1);
2955 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2956 IGU_INT_NOP, 1);
2957 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2958 IGU_INT_NOP, 1);
2959 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2960 IGU_INT_ENABLE, 1);
877e9aa4 2961
a2fbb9ea
ET
2962}
2963
2964static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2965{
2966 struct net_device *dev = dev_instance;
2967 struct bnx2x *bp = netdev_priv(dev);
2968
2969 /* Return here if interrupt is disabled */
2970 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2971 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2972 return IRQ_HANDLED;
2973 }
2974
8d9c5f34 2975 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2976
2977#ifdef BNX2X_STOP_ON_ERROR
2978 if (unlikely(bp->panic))
2979 return IRQ_HANDLED;
2980#endif
2981
1cf167f2 2982 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2983
2984 return IRQ_HANDLED;
2985}
2986
2987/* end of slow path */
2988
2989/* Statistics */
2990
2991/****************************************************************************
2992* Macros
2993****************************************************************************/
2994
a2fbb9ea
ET
2995/* sum[hi:lo] += add[hi:lo] */
2996#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2997 do { \
2998 s_lo += a_lo; \
f5ba6772 2999 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3000 } while (0)
3001
3002/* difference = minuend - subtrahend */
3003#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3004 do { \
bb2a0f7a
YG
3005 if (m_lo < s_lo) { \
3006 /* underflow */ \
a2fbb9ea 3007 d_hi = m_hi - s_hi; \
bb2a0f7a 3008 if (d_hi > 0) { \
6378c025 3009 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3010 d_hi--; \
3011 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3012 } else { \
6378c025 3013 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3014 d_hi = 0; \
3015 d_lo = 0; \
3016 } \
bb2a0f7a
YG
3017 } else { \
3018 /* m_lo >= s_lo */ \
a2fbb9ea 3019 if (m_hi < s_hi) { \
bb2a0f7a
YG
3020 d_hi = 0; \
3021 d_lo = 0; \
3022 } else { \
6378c025 3023 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3024 d_hi = m_hi - s_hi; \
3025 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3026 } \
3027 } \
3028 } while (0)
3029
bb2a0f7a 3030#define UPDATE_STAT64(s, t) \
a2fbb9ea 3031 do { \
bb2a0f7a
YG
3032 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3033 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3034 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3035 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3036 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3037 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3038 } while (0)
3039
bb2a0f7a 3040#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3041 do { \
bb2a0f7a
YG
3042 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3043 diff.lo, new->s##_lo, old->s##_lo); \
3044 ADD_64(estats->t##_hi, diff.hi, \
3045 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3046 } while (0)
3047
3048/* sum[hi:lo] += add */
3049#define ADD_EXTEND_64(s_hi, s_lo, a) \
3050 do { \
3051 s_lo += a; \
3052 s_hi += (s_lo < a) ? 1 : 0; \
3053 } while (0)
3054
bb2a0f7a 3055#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3056 do { \
bb2a0f7a
YG
3057 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3058 pstats->mac_stx[1].s##_lo, \
3059 new->s); \
a2fbb9ea
ET
3060 } while (0)
3061
bb2a0f7a 3062#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3063 do { \
4781bfad
EG
3064 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3065 old_tclient->s = tclient->s; \
de832a55
EG
3066 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3067 } while (0)
3068
3069#define UPDATE_EXTEND_USTAT(s, t) \
3070 do { \
3071 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3072 old_uclient->s = uclient->s; \
3073 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3074 } while (0)
3075
3076#define UPDATE_EXTEND_XSTAT(s, t) \
3077 do { \
4781bfad
EG
3078 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3079 old_xclient->s = xclient->s; \
de832a55
EG
3080 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3081 } while (0)
3082
3083/* minuend -= subtrahend */
3084#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3085 do { \
3086 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3087 } while (0)
3088
3089/* minuend[hi:lo] -= subtrahend */
3090#define SUB_EXTEND_64(m_hi, m_lo, s) \
3091 do { \
3092 SUB_64(m_hi, 0, m_lo, s); \
3093 } while (0)
3094
3095#define SUB_EXTEND_USTAT(s, t) \
3096 do { \
3097 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3098 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3099 } while (0)
3100
3101/*
3102 * General service functions
3103 */
3104
3105static inline long bnx2x_hilo(u32 *hiref)
3106{
3107 u32 lo = *(hiref + 1);
3108#if (BITS_PER_LONG == 64)
3109 u32 hi = *hiref;
3110
3111 return HILO_U64(hi, lo);
3112#else
3113 return lo;
3114#endif
3115}
3116
3117/*
3118 * Init service functions
3119 */
3120
bb2a0f7a
YG
3121static void bnx2x_storm_stats_post(struct bnx2x *bp)
3122{
3123 if (!bp->stats_pending) {
3124 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3125 int i, rc;
bb2a0f7a
YG
3126
3127 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3128 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3129 for_each_queue(bp, i)
3130 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3131
3132 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3133 ((u32 *)&ramrod_data)[1],
3134 ((u32 *)&ramrod_data)[0], 0);
3135 if (rc == 0) {
3136 /* stats ramrod has it's own slot on the spq */
3137 bp->spq_left++;
3138 bp->stats_pending = 1;
3139 }
3140 }
3141}
3142
3143static void bnx2x_stats_init(struct bnx2x *bp)
3144{
3145 int port = BP_PORT(bp);
de832a55 3146 int i;
bb2a0f7a 3147
de832a55 3148 bp->stats_pending = 0;
bb2a0f7a
YG
3149 bp->executer_idx = 0;
3150 bp->stats_counter = 0;
3151
3152 /* port stats */
3153 if (!BP_NOMCP(bp))
3154 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3155 else
3156 bp->port.port_stx = 0;
3157 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3158
3159 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3160 bp->port.old_nig_stats.brb_discard =
3161 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3162 bp->port.old_nig_stats.brb_truncate =
3163 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3164 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3165 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3166 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3167 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3168
3169 /* function stats */
de832a55
EG
3170 for_each_queue(bp, i) {
3171 struct bnx2x_fastpath *fp = &bp->fp[i];
3172
3173 memset(&fp->old_tclient, 0,
3174 sizeof(struct tstorm_per_client_stats));
3175 memset(&fp->old_uclient, 0,
3176 sizeof(struct ustorm_per_client_stats));
3177 memset(&fp->old_xclient, 0,
3178 sizeof(struct xstorm_per_client_stats));
3179 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3180 }
3181
bb2a0f7a 3182 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3183 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3184
3185 bp->stats_state = STATS_STATE_DISABLED;
3186 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3187 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3188}
3189
3190static void bnx2x_hw_stats_post(struct bnx2x *bp)
3191{
3192 struct dmae_command *dmae = &bp->stats_dmae;
3193 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3194
3195 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3196 if (CHIP_REV_IS_SLOW(bp))
3197 return;
bb2a0f7a
YG
3198
3199 /* loader */
3200 if (bp->executer_idx) {
3201 int loader_idx = PMF_DMAE_C(bp);
3202
3203 memset(dmae, 0, sizeof(struct dmae_command));
3204
3205 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3206 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3207 DMAE_CMD_DST_RESET |
3208#ifdef __BIG_ENDIAN
3209 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3210#else
3211 DMAE_CMD_ENDIANITY_DW_SWAP |
3212#endif
3213 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3214 DMAE_CMD_PORT_0) |
3215 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3216 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3217 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3218 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3219 sizeof(struct dmae_command) *
3220 (loader_idx + 1)) >> 2;
3221 dmae->dst_addr_hi = 0;
3222 dmae->len = sizeof(struct dmae_command) >> 2;
3223 if (CHIP_IS_E1(bp))
3224 dmae->len--;
3225 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3226 dmae->comp_addr_hi = 0;
3227 dmae->comp_val = 1;
3228
3229 *stats_comp = 0;
3230 bnx2x_post_dmae(bp, dmae, loader_idx);
3231
3232 } else if (bp->func_stx) {
3233 *stats_comp = 0;
3234 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3235 }
3236}
3237
3238static int bnx2x_stats_comp(struct bnx2x *bp)
3239{
3240 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3241 int cnt = 10;
3242
3243 might_sleep();
3244 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3245 if (!cnt) {
3246 BNX2X_ERR("timeout waiting for stats finished\n");
3247 break;
3248 }
3249 cnt--;
12469401 3250 msleep(1);
bb2a0f7a
YG
3251 }
3252 return 1;
3253}
3254
3255/*
3256 * Statistics service functions
3257 */
3258
3259static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3260{
3261 struct dmae_command *dmae;
3262 u32 opcode;
3263 int loader_idx = PMF_DMAE_C(bp);
3264 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3265
3266 /* sanity */
3267 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3268 BNX2X_ERR("BUG!\n");
3269 return;
3270 }
3271
3272 bp->executer_idx = 0;
3273
3274 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3275 DMAE_CMD_C_ENABLE |
3276 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3277#ifdef __BIG_ENDIAN
3278 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3279#else
3280 DMAE_CMD_ENDIANITY_DW_SWAP |
3281#endif
3282 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3283 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3284
3285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3286 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3287 dmae->src_addr_lo = bp->port.port_stx >> 2;
3288 dmae->src_addr_hi = 0;
3289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3290 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3291 dmae->len = DMAE_LEN32_RD_MAX;
3292 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293 dmae->comp_addr_hi = 0;
3294 dmae->comp_val = 1;
3295
3296 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3298 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3299 dmae->src_addr_hi = 0;
7a9b2557
VZ
3300 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3301 DMAE_LEN32_RD_MAX * 4);
3302 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3303 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3304 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3305 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3306 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3307 dmae->comp_val = DMAE_COMP_VAL;
3308
3309 *stats_comp = 0;
3310 bnx2x_hw_stats_post(bp);
3311 bnx2x_stats_comp(bp);
3312}
3313
3314static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3315{
3316 struct dmae_command *dmae;
34f80b04 3317 int port = BP_PORT(bp);
bb2a0f7a 3318 int vn = BP_E1HVN(bp);
a2fbb9ea 3319 u32 opcode;
bb2a0f7a 3320 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3321 u32 mac_addr;
bb2a0f7a
YG
3322 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3323
3324 /* sanity */
3325 if (!bp->link_vars.link_up || !bp->port.pmf) {
3326 BNX2X_ERR("BUG!\n");
3327 return;
3328 }
a2fbb9ea
ET
3329
3330 bp->executer_idx = 0;
bb2a0f7a
YG
3331
3332 /* MCP */
3333 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3334 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3335 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3336#ifdef __BIG_ENDIAN
bb2a0f7a 3337 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3338#else
bb2a0f7a 3339 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3340#endif
bb2a0f7a
YG
3341 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3342 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3343
bb2a0f7a 3344 if (bp->port.port_stx) {
a2fbb9ea
ET
3345
3346 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3347 dmae->opcode = opcode;
bb2a0f7a
YG
3348 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3349 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3350 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3351 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3352 dmae->len = sizeof(struct host_port_stats) >> 2;
3353 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3354 dmae->comp_addr_hi = 0;
3355 dmae->comp_val = 1;
a2fbb9ea
ET
3356 }
3357
bb2a0f7a
YG
3358 if (bp->func_stx) {
3359
3360 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3361 dmae->opcode = opcode;
3362 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3363 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3364 dmae->dst_addr_lo = bp->func_stx >> 2;
3365 dmae->dst_addr_hi = 0;
3366 dmae->len = sizeof(struct host_func_stats) >> 2;
3367 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3368 dmae->comp_addr_hi = 0;
3369 dmae->comp_val = 1;
a2fbb9ea
ET
3370 }
3371
bb2a0f7a 3372 /* MAC */
a2fbb9ea
ET
3373 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3374 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3375 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3376#ifdef __BIG_ENDIAN
3377 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3378#else
3379 DMAE_CMD_ENDIANITY_DW_SWAP |
3380#endif
bb2a0f7a
YG
3381 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3382 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3383
c18487ee 3384 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3385
3386 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3387 NIG_REG_INGRESS_BMAC0_MEM);
3388
3389 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3390 BIGMAC_REGISTER_TX_STAT_GTBYT */
3391 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3392 dmae->opcode = opcode;
3393 dmae->src_addr_lo = (mac_addr +
3394 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3395 dmae->src_addr_hi = 0;
3396 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3397 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3398 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3399 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3400 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3401 dmae->comp_addr_hi = 0;
3402 dmae->comp_val = 1;
3403
3404 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3405 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3406 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3407 dmae->opcode = opcode;
3408 dmae->src_addr_lo = (mac_addr +
3409 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3410 dmae->src_addr_hi = 0;
3411 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3412 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3413 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3414 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3415 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3416 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3417 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3418 dmae->comp_addr_hi = 0;
3419 dmae->comp_val = 1;
3420
c18487ee 3421 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3422
3423 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3424
3425 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3426 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3427 dmae->opcode = opcode;
3428 dmae->src_addr_lo = (mac_addr +
3429 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3430 dmae->src_addr_hi = 0;
3431 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3432 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3433 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3434 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3435 dmae->comp_addr_hi = 0;
3436 dmae->comp_val = 1;
3437
3438 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3439 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440 dmae->opcode = opcode;
3441 dmae->src_addr_lo = (mac_addr +
3442 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3443 dmae->src_addr_hi = 0;
3444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3445 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3446 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3447 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3448 dmae->len = 1;
3449 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3450 dmae->comp_addr_hi = 0;
3451 dmae->comp_val = 1;
3452
3453 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3454 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3455 dmae->opcode = opcode;
3456 dmae->src_addr_lo = (mac_addr +
3457 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3458 dmae->src_addr_hi = 0;
3459 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3460 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3461 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3462 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3463 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3464 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3465 dmae->comp_addr_hi = 0;
3466 dmae->comp_val = 1;
3467 }
3468
3469 /* NIG */
bb2a0f7a
YG
3470 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3471 dmae->opcode = opcode;
3472 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3473 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3474 dmae->src_addr_hi = 0;
3475 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3476 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3477 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3478 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3479 dmae->comp_addr_hi = 0;
3480 dmae->comp_val = 1;
3481
3482 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3483 dmae->opcode = opcode;
3484 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3485 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3486 dmae->src_addr_hi = 0;
3487 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3488 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3489 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3490 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3491 dmae->len = (2*sizeof(u32)) >> 2;
3492 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3493 dmae->comp_addr_hi = 0;
3494 dmae->comp_val = 1;
3495
a2fbb9ea
ET
3496 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3497 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3498 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3499 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3500#ifdef __BIG_ENDIAN
3501 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3502#else
3503 DMAE_CMD_ENDIANITY_DW_SWAP |
3504#endif
bb2a0f7a
YG
3505 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3506 (vn << DMAE_CMD_E1HVN_SHIFT));
3507 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3508 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3509 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3510 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3511 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3512 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3513 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3514 dmae->len = (2*sizeof(u32)) >> 2;
3515 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3516 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3517 dmae->comp_val = DMAE_COMP_VAL;
3518
3519 *stats_comp = 0;
a2fbb9ea
ET
3520}
3521
bb2a0f7a 3522static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3523{
bb2a0f7a
YG
3524 struct dmae_command *dmae = &bp->stats_dmae;
3525 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3526
bb2a0f7a
YG
3527 /* sanity */
3528 if (!bp->func_stx) {
3529 BNX2X_ERR("BUG!\n");
3530 return;
3531 }
a2fbb9ea 3532
bb2a0f7a
YG
3533 bp->executer_idx = 0;
3534 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3535
bb2a0f7a
YG
3536 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3537 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3538 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3539#ifdef __BIG_ENDIAN
3540 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3541#else
3542 DMAE_CMD_ENDIANITY_DW_SWAP |
3543#endif
3544 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3545 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3546 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3547 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3548 dmae->dst_addr_lo = bp->func_stx >> 2;
3549 dmae->dst_addr_hi = 0;
3550 dmae->len = sizeof(struct host_func_stats) >> 2;
3551 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3552 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3553 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3554
bb2a0f7a
YG
3555 *stats_comp = 0;
3556}
a2fbb9ea 3557
bb2a0f7a
YG
3558static void bnx2x_stats_start(struct bnx2x *bp)
3559{
3560 if (bp->port.pmf)
3561 bnx2x_port_stats_init(bp);
3562
3563 else if (bp->func_stx)
3564 bnx2x_func_stats_init(bp);
3565
3566 bnx2x_hw_stats_post(bp);
3567 bnx2x_storm_stats_post(bp);
3568}
3569
3570static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3571{
3572 bnx2x_stats_comp(bp);
3573 bnx2x_stats_pmf_update(bp);
3574 bnx2x_stats_start(bp);
3575}
3576
3577static void bnx2x_stats_restart(struct bnx2x *bp)
3578{
3579 bnx2x_stats_comp(bp);
3580 bnx2x_stats_start(bp);
3581}
3582
3583static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3584{
3585 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3586 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3587 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3588 struct {
3589 u32 lo;
3590 u32 hi;
3591 } diff;
bb2a0f7a
YG
3592
3593 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3594 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3595 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3596 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3597 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3598 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3599 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3600 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3601 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3602 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3603 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3604 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3605 UPDATE_STAT64(tx_stat_gt127,
3606 tx_stat_etherstatspkts65octetsto127octets);
3607 UPDATE_STAT64(tx_stat_gt255,
3608 tx_stat_etherstatspkts128octetsto255octets);
3609 UPDATE_STAT64(tx_stat_gt511,
3610 tx_stat_etherstatspkts256octetsto511octets);
3611 UPDATE_STAT64(tx_stat_gt1023,
3612 tx_stat_etherstatspkts512octetsto1023octets);
3613 UPDATE_STAT64(tx_stat_gt1518,
3614 tx_stat_etherstatspkts1024octetsto1522octets);
3615 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3616 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3617 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3618 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3619 UPDATE_STAT64(tx_stat_gterr,
3620 tx_stat_dot3statsinternalmactransmiterrors);
3621 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3622
3623 estats->pause_frames_received_hi =
3624 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3625 estats->pause_frames_received_lo =
3626 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3627
3628 estats->pause_frames_sent_hi =
3629 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3630 estats->pause_frames_sent_lo =
3631 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3632}
3633
3634static void bnx2x_emac_stats_update(struct bnx2x *bp)
3635{
3636 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3637 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3638 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3639
3640 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3641 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3642 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3643 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3644 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3645 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3646 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3647 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3648 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3649 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3650 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3651 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3652 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3653 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3654 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3655 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3656 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3657 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3658 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3659 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3660 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3661 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3662 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3663 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3664 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3665 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3666 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3667 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3668 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3669 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3670 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3671
3672 estats->pause_frames_received_hi =
3673 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3674 estats->pause_frames_received_lo =
3675 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3676 ADD_64(estats->pause_frames_received_hi,
3677 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3678 estats->pause_frames_received_lo,
3679 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3680
3681 estats->pause_frames_sent_hi =
3682 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3683 estats->pause_frames_sent_lo =
3684 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3685 ADD_64(estats->pause_frames_sent_hi,
3686 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3687 estats->pause_frames_sent_lo,
3688 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3689}
3690
3691static int bnx2x_hw_stats_update(struct bnx2x *bp)
3692{
3693 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3694 struct nig_stats *old = &(bp->port.old_nig_stats);
3695 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3696 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3697 struct {
3698 u32 lo;
3699 u32 hi;
3700 } diff;
de832a55 3701 u32 nig_timer_max;
bb2a0f7a
YG
3702
3703 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3704 bnx2x_bmac_stats_update(bp);
3705
3706 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3707 bnx2x_emac_stats_update(bp);
3708
3709 else { /* unreached */
c3eefaf6 3710 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3711 return -1;
3712 }
a2fbb9ea 3713
bb2a0f7a
YG
3714 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3715 new->brb_discard - old->brb_discard);
66e855f3
YG
3716 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3717 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3718
bb2a0f7a
YG
3719 UPDATE_STAT64_NIG(egress_mac_pkt0,
3720 etherstatspkts1024octetsto1522octets);
3721 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3722
bb2a0f7a 3723 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3724
bb2a0f7a
YG
3725 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3726 sizeof(struct mac_stx));
3727 estats->brb_drop_hi = pstats->brb_drop_hi;
3728 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3729
bb2a0f7a 3730 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3731
de832a55
EG
3732 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3733 if (nig_timer_max != estats->nig_timer_max) {
3734 estats->nig_timer_max = nig_timer_max;
3735 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3736 }
3737
bb2a0f7a 3738 return 0;
a2fbb9ea
ET
3739}
3740
bb2a0f7a 3741static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3742{
3743 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3744 struct tstorm_per_port_stats *tport =
de832a55 3745 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3746 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3747 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3748 int i;
3749
3750 memset(&(fstats->total_bytes_received_hi), 0,
3751 sizeof(struct host_func_stats) - 2*sizeof(u32));
3752 estats->error_bytes_received_hi = 0;
3753 estats->error_bytes_received_lo = 0;
3754 estats->etherstatsoverrsizepkts_hi = 0;
3755 estats->etherstatsoverrsizepkts_lo = 0;
3756 estats->no_buff_discard_hi = 0;
3757 estats->no_buff_discard_lo = 0;
a2fbb9ea 3758
de832a55
EG
3759 for_each_queue(bp, i) {
3760 struct bnx2x_fastpath *fp = &bp->fp[i];
3761 int cl_id = fp->cl_id;
3762 struct tstorm_per_client_stats *tclient =
3763 &stats->tstorm_common.client_statistics[cl_id];
3764 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3765 struct ustorm_per_client_stats *uclient =
3766 &stats->ustorm_common.client_statistics[cl_id];
3767 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3768 struct xstorm_per_client_stats *xclient =
3769 &stats->xstorm_common.client_statistics[cl_id];
3770 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3771 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3772 u32 diff;
3773
3774 /* are storm stats valid? */
3775 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3776 bp->stats_counter) {
de832a55
EG
3777 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3778 " xstorm counter (%d) != stats_counter (%d)\n",
3779 i, xclient->stats_counter, bp->stats_counter);
3780 return -1;
3781 }
3782 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3783 bp->stats_counter) {
de832a55
EG
3784 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3785 " tstorm counter (%d) != stats_counter (%d)\n",
3786 i, tclient->stats_counter, bp->stats_counter);
3787 return -2;
3788 }
3789 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3790 bp->stats_counter) {
3791 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3792 " ustorm counter (%d) != stats_counter (%d)\n",
3793 i, uclient->stats_counter, bp->stats_counter);
3794 return -4;
3795 }
a2fbb9ea 3796
de832a55
EG
3797 qstats->total_bytes_received_hi =
3798 qstats->valid_bytes_received_hi =
a2fbb9ea 3799 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3800 qstats->total_bytes_received_lo =
3801 qstats->valid_bytes_received_lo =
a2fbb9ea 3802 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3803
de832a55 3804 qstats->error_bytes_received_hi =
bb2a0f7a 3805 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3806 qstats->error_bytes_received_lo =
bb2a0f7a 3807 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3808
de832a55
EG
3809 ADD_64(qstats->total_bytes_received_hi,
3810 qstats->error_bytes_received_hi,
3811 qstats->total_bytes_received_lo,
3812 qstats->error_bytes_received_lo);
3813
3814 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3815 total_unicast_packets_received);
3816 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3817 total_multicast_packets_received);
3818 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3819 total_broadcast_packets_received);
3820 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3821 etherstatsoverrsizepkts);
3822 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3823
3824 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3825 total_unicast_packets_received);
3826 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3827 total_multicast_packets_received);
3828 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3829 total_broadcast_packets_received);
3830 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3831 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3832 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3833
3834 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3835 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3836 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3837 le32_to_cpu(xclient->total_sent_bytes.lo);
3838
de832a55
EG
3839 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3840 total_unicast_packets_transmitted);
3841 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3842 total_multicast_packets_transmitted);
3843 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3844 total_broadcast_packets_transmitted);
3845
3846 old_tclient->checksum_discard = tclient->checksum_discard;
3847 old_tclient->ttl0_discard = tclient->ttl0_discard;
3848
3849 ADD_64(fstats->total_bytes_received_hi,
3850 qstats->total_bytes_received_hi,
3851 fstats->total_bytes_received_lo,
3852 qstats->total_bytes_received_lo);
3853 ADD_64(fstats->total_bytes_transmitted_hi,
3854 qstats->total_bytes_transmitted_hi,
3855 fstats->total_bytes_transmitted_lo,
3856 qstats->total_bytes_transmitted_lo);
3857 ADD_64(fstats->total_unicast_packets_received_hi,
3858 qstats->total_unicast_packets_received_hi,
3859 fstats->total_unicast_packets_received_lo,
3860 qstats->total_unicast_packets_received_lo);
3861 ADD_64(fstats->total_multicast_packets_received_hi,
3862 qstats->total_multicast_packets_received_hi,
3863 fstats->total_multicast_packets_received_lo,
3864 qstats->total_multicast_packets_received_lo);
3865 ADD_64(fstats->total_broadcast_packets_received_hi,
3866 qstats->total_broadcast_packets_received_hi,
3867 fstats->total_broadcast_packets_received_lo,
3868 qstats->total_broadcast_packets_received_lo);
3869 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3870 qstats->total_unicast_packets_transmitted_hi,
3871 fstats->total_unicast_packets_transmitted_lo,
3872 qstats->total_unicast_packets_transmitted_lo);
3873 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3874 qstats->total_multicast_packets_transmitted_hi,
3875 fstats->total_multicast_packets_transmitted_lo,
3876 qstats->total_multicast_packets_transmitted_lo);
3877 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3878 qstats->total_broadcast_packets_transmitted_hi,
3879 fstats->total_broadcast_packets_transmitted_lo,
3880 qstats->total_broadcast_packets_transmitted_lo);
3881 ADD_64(fstats->valid_bytes_received_hi,
3882 qstats->valid_bytes_received_hi,
3883 fstats->valid_bytes_received_lo,
3884 qstats->valid_bytes_received_lo);
3885
3886 ADD_64(estats->error_bytes_received_hi,
3887 qstats->error_bytes_received_hi,
3888 estats->error_bytes_received_lo,
3889 qstats->error_bytes_received_lo);
3890 ADD_64(estats->etherstatsoverrsizepkts_hi,
3891 qstats->etherstatsoverrsizepkts_hi,
3892 estats->etherstatsoverrsizepkts_lo,
3893 qstats->etherstatsoverrsizepkts_lo);
3894 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3895 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3896 }
3897
3898 ADD_64(fstats->total_bytes_received_hi,
3899 estats->rx_stat_ifhcinbadoctets_hi,
3900 fstats->total_bytes_received_lo,
3901 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3902
3903 memcpy(estats, &(fstats->total_bytes_received_hi),
3904 sizeof(struct host_func_stats) - 2*sizeof(u32));
3905
de832a55
EG
3906 ADD_64(estats->etherstatsoverrsizepkts_hi,
3907 estats->rx_stat_dot3statsframestoolong_hi,
3908 estats->etherstatsoverrsizepkts_lo,
3909 estats->rx_stat_dot3statsframestoolong_lo);
3910 ADD_64(estats->error_bytes_received_hi,
3911 estats->rx_stat_ifhcinbadoctets_hi,
3912 estats->error_bytes_received_lo,
3913 estats->rx_stat_ifhcinbadoctets_lo);
3914
3915 if (bp->port.pmf) {
3916 estats->mac_filter_discard =
3917 le32_to_cpu(tport->mac_filter_discard);
3918 estats->xxoverflow_discard =
3919 le32_to_cpu(tport->xxoverflow_discard);
3920 estats->brb_truncate_discard =
bb2a0f7a 3921 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3922 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3923 }
bb2a0f7a
YG
3924
3925 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3926
de832a55
EG
3927 bp->stats_pending = 0;
3928
a2fbb9ea
ET
3929 return 0;
3930}
3931
bb2a0f7a 3932static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3933{
bb2a0f7a 3934 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3935 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3936 int i;
a2fbb9ea
ET
3937
3938 nstats->rx_packets =
3939 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3940 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3941 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3942
3943 nstats->tx_packets =
3944 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3945 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3946 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3947
de832a55 3948 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3949
0e39e645 3950 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3951
de832a55
EG
3952 nstats->rx_dropped = estats->mac_discard;
3953 for_each_queue(bp, i)
3954 nstats->rx_dropped +=
3955 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3956
a2fbb9ea
ET
3957 nstats->tx_dropped = 0;
3958
3959 nstats->multicast =
de832a55 3960 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3961
bb2a0f7a 3962 nstats->collisions =
de832a55 3963 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3964
3965 nstats->rx_length_errors =
de832a55
EG
3966 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3967 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3968 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3969 bnx2x_hilo(&estats->brb_truncate_hi);
3970 nstats->rx_crc_errors =
3971 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3972 nstats->rx_frame_errors =
3973 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3974 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3975 nstats->rx_missed_errors = estats->xxoverflow_discard;
3976
3977 nstats->rx_errors = nstats->rx_length_errors +
3978 nstats->rx_over_errors +
3979 nstats->rx_crc_errors +
3980 nstats->rx_frame_errors +
0e39e645
ET
3981 nstats->rx_fifo_errors +
3982 nstats->rx_missed_errors;
a2fbb9ea 3983
bb2a0f7a 3984 nstats->tx_aborted_errors =
de832a55
EG
3985 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3986 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3987 nstats->tx_carrier_errors =
3988 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3989 nstats->tx_fifo_errors = 0;
3990 nstats->tx_heartbeat_errors = 0;
3991 nstats->tx_window_errors = 0;
3992
3993 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3994 nstats->tx_carrier_errors +
3995 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3996}
3997
3998static void bnx2x_drv_stats_update(struct bnx2x *bp)
3999{
4000 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4001 int i;
4002
4003 estats->driver_xoff = 0;
4004 estats->rx_err_discard_pkt = 0;
4005 estats->rx_skb_alloc_failed = 0;
4006 estats->hw_csum_err = 0;
4007 for_each_queue(bp, i) {
4008 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4009
4010 estats->driver_xoff += qstats->driver_xoff;
4011 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4012 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4013 estats->hw_csum_err += qstats->hw_csum_err;
4014 }
a2fbb9ea
ET
4015}
4016
bb2a0f7a 4017static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4018{
bb2a0f7a 4019 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4020
bb2a0f7a
YG
4021 if (*stats_comp != DMAE_COMP_VAL)
4022 return;
4023
4024 if (bp->port.pmf)
de832a55 4025 bnx2x_hw_stats_update(bp);
a2fbb9ea 4026
de832a55
EG
4027 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4028 BNX2X_ERR("storm stats were not updated for 3 times\n");
4029 bnx2x_panic();
4030 return;
a2fbb9ea
ET
4031 }
4032
de832a55
EG
4033 bnx2x_net_stats_update(bp);
4034 bnx2x_drv_stats_update(bp);
4035
a2fbb9ea 4036 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
4037 struct tstorm_per_client_stats *old_tclient =
4038 &bp->fp->old_tclient;
4039 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4040 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4041 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4042 int i;
a2fbb9ea
ET
4043
4044 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4045 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4046 " tx pkt (%lx)\n",
4047 bnx2x_tx_avail(bp->fp),
7a9b2557 4048 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4049 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4050 " rx pkt (%lx)\n",
7a9b2557
VZ
4051 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4052 bp->fp->rx_comp_cons),
4053 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4054 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4055 "brb truncate %u\n",
4056 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4057 qstats->driver_xoff,
4058 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4059 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4060 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4061 "mac_discard %u mac_filter_discard %u "
4062 "xxovrflow_discard %u brb_truncate_discard %u "
4063 "ttl0_discard %u\n",
4781bfad 4064 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4065 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4066 bnx2x_hilo(&qstats->no_buff_discard_hi),
4067 estats->mac_discard, estats->mac_filter_discard,
4068 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4069 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4070
4071 for_each_queue(bp, i) {
4072 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4073 bnx2x_fp(bp, i, tx_pkt),
4074 bnx2x_fp(bp, i, rx_pkt),
4075 bnx2x_fp(bp, i, rx_calls));
4076 }
4077 }
4078
bb2a0f7a
YG
4079 bnx2x_hw_stats_post(bp);
4080 bnx2x_storm_stats_post(bp);
4081}
a2fbb9ea 4082
bb2a0f7a
YG
4083static void bnx2x_port_stats_stop(struct bnx2x *bp)
4084{
4085 struct dmae_command *dmae;
4086 u32 opcode;
4087 int loader_idx = PMF_DMAE_C(bp);
4088 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4089
bb2a0f7a 4090 bp->executer_idx = 0;
a2fbb9ea 4091
bb2a0f7a
YG
4092 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4093 DMAE_CMD_C_ENABLE |
4094 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4095#ifdef __BIG_ENDIAN
bb2a0f7a 4096 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4097#else
bb2a0f7a 4098 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4099#endif
bb2a0f7a
YG
4100 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4101 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4102
4103 if (bp->port.port_stx) {
4104
4105 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4106 if (bp->func_stx)
4107 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4108 else
4109 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4110 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4111 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4112 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4113 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4114 dmae->len = sizeof(struct host_port_stats) >> 2;
4115 if (bp->func_stx) {
4116 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4117 dmae->comp_addr_hi = 0;
4118 dmae->comp_val = 1;
4119 } else {
4120 dmae->comp_addr_lo =
4121 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4122 dmae->comp_addr_hi =
4123 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4124 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4125
bb2a0f7a
YG
4126 *stats_comp = 0;
4127 }
a2fbb9ea
ET
4128 }
4129
bb2a0f7a
YG
4130 if (bp->func_stx) {
4131
4132 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4133 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4134 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4135 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4136 dmae->dst_addr_lo = bp->func_stx >> 2;
4137 dmae->dst_addr_hi = 0;
4138 dmae->len = sizeof(struct host_func_stats) >> 2;
4139 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4140 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4141 dmae->comp_val = DMAE_COMP_VAL;
4142
4143 *stats_comp = 0;
a2fbb9ea 4144 }
bb2a0f7a
YG
4145}
4146
4147static void bnx2x_stats_stop(struct bnx2x *bp)
4148{
4149 int update = 0;
4150
4151 bnx2x_stats_comp(bp);
4152
4153 if (bp->port.pmf)
4154 update = (bnx2x_hw_stats_update(bp) == 0);
4155
4156 update |= (bnx2x_storm_stats_update(bp) == 0);
4157
4158 if (update) {
4159 bnx2x_net_stats_update(bp);
a2fbb9ea 4160
bb2a0f7a
YG
4161 if (bp->port.pmf)
4162 bnx2x_port_stats_stop(bp);
4163
4164 bnx2x_hw_stats_post(bp);
4165 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4166 }
4167}
4168
bb2a0f7a
YG
4169static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4170{
4171}
4172
4173static const struct {
4174 void (*action)(struct bnx2x *bp);
4175 enum bnx2x_stats_state next_state;
4176} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4177/* state event */
4178{
4179/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4180/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4181/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4182/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4183},
4184{
4185/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4186/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4187/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4188/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4189}
4190};
4191
4192static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4193{
4194 enum bnx2x_stats_state state = bp->stats_state;
4195
4196 bnx2x_stats_stm[state][event].action(bp);
4197 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4198
4199 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4200 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4201 state, event, bp->stats_state);
4202}
4203
a2fbb9ea
ET
4204static void bnx2x_timer(unsigned long data)
4205{
4206 struct bnx2x *bp = (struct bnx2x *) data;
4207
4208 if (!netif_running(bp->dev))
4209 return;
4210
4211 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4212 goto timer_restart;
a2fbb9ea
ET
4213
4214 if (poll) {
4215 struct bnx2x_fastpath *fp = &bp->fp[0];
4216 int rc;
4217
7961f791 4218 bnx2x_tx_int(fp);
a2fbb9ea
ET
4219 rc = bnx2x_rx_int(fp, 1000);
4220 }
4221
34f80b04
EG
4222 if (!BP_NOMCP(bp)) {
4223 int func = BP_FUNC(bp);
a2fbb9ea
ET
4224 u32 drv_pulse;
4225 u32 mcp_pulse;
4226
4227 ++bp->fw_drv_pulse_wr_seq;
4228 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4229 /* TBD - add SYSTEM_TIME */
4230 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4231 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4232
34f80b04 4233 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4234 MCP_PULSE_SEQ_MASK);
4235 /* The delta between driver pulse and mcp response
4236 * should be 1 (before mcp response) or 0 (after mcp response)
4237 */
4238 if ((drv_pulse != mcp_pulse) &&
4239 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4240 /* someone lost a heartbeat... */
4241 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4242 drv_pulse, mcp_pulse);
4243 }
4244 }
4245
bb2a0f7a
YG
4246 if ((bp->state == BNX2X_STATE_OPEN) ||
4247 (bp->state == BNX2X_STATE_DISABLED))
4248 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4249
f1410647 4250timer_restart:
a2fbb9ea
ET
4251 mod_timer(&bp->timer, jiffies + bp->current_interval);
4252}
4253
4254/* end of Statistics */
4255
4256/* nic init */
4257
4258/*
4259 * nic init service functions
4260 */
4261
34f80b04 4262static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4263{
34f80b04
EG
4264 int port = BP_PORT(bp);
4265
490c3c9b 4266 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04 4267 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4268 sizeof(struct ustorm_status_block)/4);
490c3c9b 4269 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04 4270 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4271 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4272}
4273
5c862848
EG
4274static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4275 dma_addr_t mapping, int sb_id)
34f80b04
EG
4276{
4277 int port = BP_PORT(bp);
bb2a0f7a 4278 int func = BP_FUNC(bp);
a2fbb9ea 4279 int index;
34f80b04 4280 u64 section;
a2fbb9ea
ET
4281
4282 /* USTORM */
4283 section = ((u64)mapping) + offsetof(struct host_status_block,
4284 u_status_block);
34f80b04 4285 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4286
4287 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4288 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4289 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4290 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4291 U64_HI(section));
bb2a0f7a
YG
4292 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4293 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4294
4295 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4296 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4297 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4298
4299 /* CSTORM */
4300 section = ((u64)mapping) + offsetof(struct host_status_block,
4301 c_status_block);
34f80b04 4302 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4303
4304 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4305 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4306 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4307 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4308 U64_HI(section));
7a9b2557
VZ
4309 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4310 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4311
4312 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4313 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4314 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4315
4316 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4317}
4318
4319static void bnx2x_zero_def_sb(struct bnx2x *bp)
4320{
4321 int func = BP_FUNC(bp);
a2fbb9ea 4322
490c3c9b
EG
4323 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4324 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4325 sizeof(struct tstorm_def_status_block)/4);
4326 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04
EG
4327 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4328 sizeof(struct ustorm_def_status_block)/4);
490c3c9b 4329 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04
EG
4330 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4331 sizeof(struct cstorm_def_status_block)/4);
490c3c9b 4332 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
34f80b04
EG
4333 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4334 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4335}
4336
4337static void bnx2x_init_def_sb(struct bnx2x *bp,
4338 struct host_def_status_block *def_sb,
34f80b04 4339 dma_addr_t mapping, int sb_id)
a2fbb9ea 4340{
34f80b04
EG
4341 int port = BP_PORT(bp);
4342 int func = BP_FUNC(bp);
a2fbb9ea
ET
4343 int index, val, reg_offset;
4344 u64 section;
4345
4346 /* ATTN */
4347 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4348 atten_status_block);
34f80b04 4349 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4350
49d66772
ET
4351 bp->attn_state = 0;
4352
a2fbb9ea
ET
4353 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4354 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4355
34f80b04 4356 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4357 bp->attn_group[index].sig[0] = REG_RD(bp,
4358 reg_offset + 0x10*index);
4359 bp->attn_group[index].sig[1] = REG_RD(bp,
4360 reg_offset + 0x4 + 0x10*index);
4361 bp->attn_group[index].sig[2] = REG_RD(bp,
4362 reg_offset + 0x8 + 0x10*index);
4363 bp->attn_group[index].sig[3] = REG_RD(bp,
4364 reg_offset + 0xc + 0x10*index);
4365 }
4366
a2fbb9ea
ET
4367 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4368 HC_REG_ATTN_MSG0_ADDR_L);
4369
4370 REG_WR(bp, reg_offset, U64_LO(section));
4371 REG_WR(bp, reg_offset + 4, U64_HI(section));
4372
4373 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4374
4375 val = REG_RD(bp, reg_offset);
34f80b04 4376 val |= sb_id;
a2fbb9ea
ET
4377 REG_WR(bp, reg_offset, val);
4378
4379 /* USTORM */
4380 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4381 u_def_status_block);
34f80b04 4382 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4383
4384 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4385 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4386 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4387 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4388 U64_HI(section));
5c862848 4389 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4390 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4391
4392 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4393 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4394 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4395
4396 /* CSTORM */
4397 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4398 c_def_status_block);
34f80b04 4399 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4400
4401 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4402 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4403 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4404 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4405 U64_HI(section));
5c862848 4406 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4407 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4408
4409 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4410 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4411 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4412
4413 /* TSTORM */
4414 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4415 t_def_status_block);
34f80b04 4416 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4417
4418 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4419 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4420 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4421 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4422 U64_HI(section));
5c862848 4423 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4424 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4425
4426 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4427 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4428 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4429
4430 /* XSTORM */
4431 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4432 x_def_status_block);
34f80b04 4433 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4434
4435 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4436 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4437 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4438 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4439 U64_HI(section));
5c862848 4440 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4441 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4442
4443 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4444 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4445 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4446
bb2a0f7a 4447 bp->stats_pending = 0;
66e855f3 4448 bp->set_mac_pending = 0;
bb2a0f7a 4449
34f80b04 4450 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4451}
4452
4453static void bnx2x_update_coalesce(struct bnx2x *bp)
4454{
34f80b04 4455 int port = BP_PORT(bp);
a2fbb9ea
ET
4456 int i;
4457
4458 for_each_queue(bp, i) {
34f80b04 4459 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4460
4461 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4462 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4463 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4464 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4465 bp->rx_ticks/12);
a2fbb9ea 4466 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4467 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4468 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4469 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4470
4471 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4472 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4473 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4474 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4475 bp->tx_ticks/12);
a2fbb9ea 4476 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4477 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4478 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4479 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4480 }
4481}
4482
7a9b2557
VZ
4483static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4484 struct bnx2x_fastpath *fp, int last)
4485{
4486 int i;
4487
4488 for (i = 0; i < last; i++) {
4489 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4490 struct sk_buff *skb = rx_buf->skb;
4491
4492 if (skb == NULL) {
4493 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4494 continue;
4495 }
4496
4497 if (fp->tpa_state[i] == BNX2X_TPA_START)
4498 pci_unmap_single(bp->pdev,
4499 pci_unmap_addr(rx_buf, mapping),
356e2385 4500 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4501
4502 dev_kfree_skb(skb);
4503 rx_buf->skb = NULL;
4504 }
4505}
4506
a2fbb9ea
ET
4507static void bnx2x_init_rx_rings(struct bnx2x *bp)
4508{
7a9b2557 4509 int func = BP_FUNC(bp);
32626230
EG
4510 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4511 ETH_MAX_AGGREGATION_QUEUES_E1H;
4512 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4513 int i, j;
a2fbb9ea 4514
87942b46 4515 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4516 DP(NETIF_MSG_IFUP,
4517 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4518
7a9b2557 4519 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4520
555f6c78 4521 for_each_rx_queue(bp, j) {
32626230 4522 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4523
32626230 4524 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4525 fp->tpa_pool[i].skb =
4526 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4527 if (!fp->tpa_pool[i].skb) {
4528 BNX2X_ERR("Failed to allocate TPA "
4529 "skb pool for queue[%d] - "
4530 "disabling TPA on this "
4531 "queue!\n", j);
4532 bnx2x_free_tpa_pool(bp, fp, i);
4533 fp->disable_tpa = 1;
4534 break;
4535 }
4536 pci_unmap_addr_set((struct sw_rx_bd *)
4537 &bp->fp->tpa_pool[i],
4538 mapping, 0);
4539 fp->tpa_state[i] = BNX2X_TPA_STOP;
4540 }
4541 }
4542 }
4543
555f6c78 4544 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4545 struct bnx2x_fastpath *fp = &bp->fp[j];
4546
4547 fp->rx_bd_cons = 0;
4548 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4549 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4550
4551 /* "next page" elements initialization */
4552 /* SGE ring */
4553 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4554 struct eth_rx_sge *sge;
4555
4556 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4557 sge->addr_hi =
4558 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4559 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4560 sge->addr_lo =
4561 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4562 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4563 }
4564
4565 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4566
7a9b2557 4567 /* RX BD ring */
a2fbb9ea
ET
4568 for (i = 1; i <= NUM_RX_RINGS; i++) {
4569 struct eth_rx_bd *rx_bd;
4570
4571 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4572 rx_bd->addr_hi =
4573 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4574 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4575 rx_bd->addr_lo =
4576 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4577 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4578 }
4579
34f80b04 4580 /* CQ ring */
a2fbb9ea
ET
4581 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4582 struct eth_rx_cqe_next_page *nextpg;
4583
4584 nextpg = (struct eth_rx_cqe_next_page *)
4585 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4586 nextpg->addr_hi =
4587 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4588 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4589 nextpg->addr_lo =
4590 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4591 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4592 }
4593
7a9b2557
VZ
4594 /* Allocate SGEs and initialize the ring elements */
4595 for (i = 0, ring_prod = 0;
4596 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4597
7a9b2557
VZ
4598 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4599 BNX2X_ERR("was only able to allocate "
4600 "%d rx sges\n", i);
4601 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4602 /* Cleanup already allocated elements */
4603 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4604 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4605 fp->disable_tpa = 1;
4606 ring_prod = 0;
4607 break;
4608 }
4609 ring_prod = NEXT_SGE_IDX(ring_prod);
4610 }
4611 fp->rx_sge_prod = ring_prod;
4612
4613 /* Allocate BDs and initialize BD ring */
66e855f3 4614 fp->rx_comp_cons = 0;
7a9b2557 4615 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4616 for (i = 0; i < bp->rx_ring_size; i++) {
4617 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4618 BNX2X_ERR("was only able to allocate "
de832a55
EG
4619 "%d rx skbs on queue[%d]\n", i, j);
4620 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4621 break;
4622 }
4623 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4624 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4625 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4626 }
4627
7a9b2557
VZ
4628 fp->rx_bd_prod = ring_prod;
4629 /* must not have more available CQEs than BDs */
4630 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4631 cqe_ring_prod);
a2fbb9ea
ET
4632 fp->rx_pkt = fp->rx_calls = 0;
4633
7a9b2557
VZ
4634 /* Warning!
4635 * this will generate an interrupt (to the TSTORM)
4636 * must only be done after chip is initialized
4637 */
4638 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4639 fp->rx_sge_prod);
a2fbb9ea
ET
4640 if (j != 0)
4641 continue;
4642
4643 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4644 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4645 U64_LO(fp->rx_comp_mapping));
4646 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4647 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4648 U64_HI(fp->rx_comp_mapping));
4649 }
4650}
4651
4652static void bnx2x_init_tx_ring(struct bnx2x *bp)
4653{
4654 int i, j;
4655
555f6c78 4656 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4657 struct bnx2x_fastpath *fp = &bp->fp[j];
4658
4659 for (i = 1; i <= NUM_TX_RINGS; i++) {
4660 struct eth_tx_bd *tx_bd =
4661 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4662
4663 tx_bd->addr_hi =
4664 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4665 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4666 tx_bd->addr_lo =
4667 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4668 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4669 }
4670
4671 fp->tx_pkt_prod = 0;
4672 fp->tx_pkt_cons = 0;
4673 fp->tx_bd_prod = 0;
4674 fp->tx_bd_cons = 0;
4675 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4676 fp->tx_pkt = 0;
4677 }
4678}
4679
4680static void bnx2x_init_sp_ring(struct bnx2x *bp)
4681{
34f80b04 4682 int func = BP_FUNC(bp);
a2fbb9ea
ET
4683
4684 spin_lock_init(&bp->spq_lock);
4685
4686 bp->spq_left = MAX_SPQ_PENDING;
4687 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4688 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4689 bp->spq_prod_bd = bp->spq;
4690 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4691
34f80b04 4692 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4693 U64_LO(bp->spq_mapping));
34f80b04
EG
4694 REG_WR(bp,
4695 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4696 U64_HI(bp->spq_mapping));
4697
34f80b04 4698 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4699 bp->spq_prod_idx);
4700}
4701
4702static void bnx2x_init_context(struct bnx2x *bp)
4703{
4704 int i;
4705
4706 for_each_queue(bp, i) {
4707 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4708 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4709 u8 cl_id = fp->cl_id;
0626b899 4710 u8 sb_id = fp->sb_id;
a2fbb9ea 4711
34f80b04
EG
4712 context->ustorm_st_context.common.sb_index_numbers =
4713 BNX2X_RX_SB_INDEX_NUM;
0626b899 4714 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4715 context->ustorm_st_context.common.status_block_id = sb_id;
4716 context->ustorm_st_context.common.flags =
de832a55
EG
4717 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4718 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4719 context->ustorm_st_context.common.statistics_counter_id =
4720 cl_id;
8d9c5f34 4721 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4722 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4723 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4724 bp->rx_buf_size;
34f80b04 4725 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4726 U64_HI(fp->rx_desc_mapping);
34f80b04 4727 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4728 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4729 if (!fp->disable_tpa) {
4730 context->ustorm_st_context.common.flags |=
4731 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4732 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4733 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4734 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4735 (u32)0xffff);
7a9b2557
VZ
4736 context->ustorm_st_context.common.sge_page_base_hi =
4737 U64_HI(fp->rx_sge_mapping);
4738 context->ustorm_st_context.common.sge_page_base_lo =
4739 U64_LO(fp->rx_sge_mapping);
4740 }
4741
8d9c5f34
EG
4742 context->ustorm_ag_context.cdu_usage =
4743 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4744 CDU_REGION_NUMBER_UCM_AG,
4745 ETH_CONNECTION_TYPE);
4746
4747 context->xstorm_st_context.tx_bd_page_base_hi =
4748 U64_HI(fp->tx_desc_mapping);
4749 context->xstorm_st_context.tx_bd_page_base_lo =
4750 U64_LO(fp->tx_desc_mapping);
4751 context->xstorm_st_context.db_data_addr_hi =
4752 U64_HI(fp->tx_prods_mapping);
4753 context->xstorm_st_context.db_data_addr_lo =
4754 U64_LO(fp->tx_prods_mapping);
0626b899 4755 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4756 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4757 context->cstorm_st_context.sb_index_number =
5c862848 4758 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4759 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4760
4761 context->xstorm_ag_context.cdu_reserved =
4762 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4763 CDU_REGION_NUMBER_XCM_AG,
4764 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4765 }
4766}
4767
4768static void bnx2x_init_ind_table(struct bnx2x *bp)
4769{
26c8fa4d 4770 int func = BP_FUNC(bp);
a2fbb9ea
ET
4771 int i;
4772
555f6c78 4773 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4774 return;
4775
555f6c78
EG
4776 DP(NETIF_MSG_IFUP,
4777 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4778 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4779 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4780 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4781 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4782}
4783
49d66772
ET
4784static void bnx2x_set_client_config(struct bnx2x *bp)
4785{
49d66772 4786 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4787 int port = BP_PORT(bp);
4788 int i;
49d66772 4789
e7799c5f 4790 tstorm_client.mtu = bp->dev->mtu;
49d66772 4791 tstorm_client.config_flags =
de832a55
EG
4792 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4793 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4794#ifdef BCM_VLAN
0c6671b0 4795 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4796 tstorm_client.config_flags |=
8d9c5f34 4797 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4798 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4799 }
4800#endif
49d66772 4801
7a9b2557
VZ
4802 if (bp->flags & TPA_ENABLE_FLAG) {
4803 tstorm_client.max_sges_for_packet =
4f40f2cb 4804 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4805 tstorm_client.max_sges_for_packet =
4806 ((tstorm_client.max_sges_for_packet +
4807 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4808 PAGES_PER_SGE_SHIFT;
4809
4810 tstorm_client.config_flags |=
4811 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4812 }
4813
49d66772 4814 for_each_queue(bp, i) {
de832a55
EG
4815 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4816
49d66772 4817 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4818 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4819 ((u32 *)&tstorm_client)[0]);
4820 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4821 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4822 ((u32 *)&tstorm_client)[1]);
4823 }
4824
34f80b04
EG
4825 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4826 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4827}
4828
a2fbb9ea
ET
4829static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4830{
a2fbb9ea 4831 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4832 int mode = bp->rx_mode;
4833 int mask = (1 << BP_L_ID(bp));
4834 int func = BP_FUNC(bp);
581ce43d 4835 int port = BP_PORT(bp);
a2fbb9ea 4836 int i;
581ce43d
EG
4837 /* All but management unicast packets should pass to the host as well */
4838 u32 llh_mask =
4839 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4840 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4841 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4842 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4843
3196a88a 4844 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4845
4846 switch (mode) {
4847 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4848 tstorm_mac_filter.ucast_drop_all = mask;
4849 tstorm_mac_filter.mcast_drop_all = mask;
4850 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4851 break;
356e2385 4852
a2fbb9ea 4853 case BNX2X_RX_MODE_NORMAL:
34f80b04 4854 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4855 break;
356e2385 4856
a2fbb9ea 4857 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4858 tstorm_mac_filter.mcast_accept_all = mask;
4859 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4860 break;
356e2385 4861
a2fbb9ea 4862 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4863 tstorm_mac_filter.ucast_accept_all = mask;
4864 tstorm_mac_filter.mcast_accept_all = mask;
4865 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
4866 /* pass management unicast packets as well */
4867 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4868 break;
356e2385 4869
a2fbb9ea 4870 default:
34f80b04
EG
4871 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4872 break;
a2fbb9ea
ET
4873 }
4874
581ce43d
EG
4875 REG_WR(bp,
4876 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4877 llh_mask);
4878
a2fbb9ea
ET
4879 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4880 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4881 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4882 ((u32 *)&tstorm_mac_filter)[i]);
4883
34f80b04 4884/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4885 ((u32 *)&tstorm_mac_filter)[i]); */
4886 }
a2fbb9ea 4887
49d66772
ET
4888 if (mode != BNX2X_RX_MODE_NONE)
4889 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4890}
4891
471de716
EG
4892static void bnx2x_init_internal_common(struct bnx2x *bp)
4893{
4894 int i;
4895
3cdf1db7
YG
4896 if (bp->flags & TPA_ENABLE_FLAG) {
4897 struct tstorm_eth_tpa_exist tpa = {0};
4898
4899 tpa.tpa_exist = 1;
4900
4901 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4902 ((u32 *)&tpa)[0]);
4903 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4904 ((u32 *)&tpa)[1]);
4905 }
4906
471de716
EG
4907 /* Zero this manually as its initialization is
4908 currently missing in the initTool */
4909 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4910 REG_WR(bp, BAR_USTRORM_INTMEM +
4911 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4912}
4913
4914static void bnx2x_init_internal_port(struct bnx2x *bp)
4915{
4916 int port = BP_PORT(bp);
4917
4918 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4919 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4920 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4921 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4922}
4923
8a1c38d1
EG
4924/* Calculates the sum of vn_min_rates.
4925 It's needed for further normalizing of the min_rates.
4926 Returns:
4927 sum of vn_min_rates.
4928 or
4929 0 - if all the min_rates are 0.
4930 In the later case fainess algorithm should be deactivated.
4931 If not all min_rates are zero then those that are zeroes will be set to 1.
4932 */
4933static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4934{
4935 int all_zero = 1;
4936 int port = BP_PORT(bp);
4937 int vn;
4938
4939 bp->vn_weight_sum = 0;
4940 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4941 int func = 2*vn + port;
4942 u32 vn_cfg =
4943 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4944 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4945 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4946
4947 /* Skip hidden vns */
4948 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4949 continue;
4950
4951 /* If min rate is zero - set it to 1 */
4952 if (!vn_min_rate)
4953 vn_min_rate = DEF_MIN_RATE;
4954 else
4955 all_zero = 0;
4956
4957 bp->vn_weight_sum += vn_min_rate;
4958 }
4959
4960 /* ... only if all min rates are zeros - disable fairness */
4961 if (all_zero)
4962 bp->vn_weight_sum = 0;
4963}
4964
471de716 4965static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4966{
a2fbb9ea
ET
4967 struct tstorm_eth_function_common_config tstorm_config = {0};
4968 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4969 int port = BP_PORT(bp);
4970 int func = BP_FUNC(bp);
de832a55
EG
4971 int i, j;
4972 u32 offset;
471de716 4973 u16 max_agg_size;
a2fbb9ea
ET
4974
4975 if (is_multi(bp)) {
555f6c78 4976 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4977 tstorm_config.rss_result_mask = MULTI_MASK;
4978 }
8d9c5f34
EG
4979 if (IS_E1HMF(bp))
4980 tstorm_config.config_flags |=
4981 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4982
34f80b04
EG
4983 tstorm_config.leading_client_id = BP_L_ID(bp);
4984
a2fbb9ea 4985 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4986 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4987 (*(u32 *)&tstorm_config));
4988
c14423fe 4989 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4990 bnx2x_set_storm_rx_mode(bp);
4991
de832a55
EG
4992 for_each_queue(bp, i) {
4993 u8 cl_id = bp->fp[i].cl_id;
4994
4995 /* reset xstorm per client statistics */
4996 offset = BAR_XSTRORM_INTMEM +
4997 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4998 for (j = 0;
4999 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5000 REG_WR(bp, offset + j*4, 0);
5001
5002 /* reset tstorm per client statistics */
5003 offset = BAR_TSTRORM_INTMEM +
5004 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5005 for (j = 0;
5006 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5007 REG_WR(bp, offset + j*4, 0);
5008
5009 /* reset ustorm per client statistics */
5010 offset = BAR_USTRORM_INTMEM +
5011 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5012 for (j = 0;
5013 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5014 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5015 }
5016
5017 /* Init statistics related context */
34f80b04 5018 stats_flags.collect_eth = 1;
a2fbb9ea 5019
66e855f3 5020 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5021 ((u32 *)&stats_flags)[0]);
66e855f3 5022 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5023 ((u32 *)&stats_flags)[1]);
5024
66e855f3 5025 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5026 ((u32 *)&stats_flags)[0]);
66e855f3 5027 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5028 ((u32 *)&stats_flags)[1]);
5029
de832a55
EG
5030 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5031 ((u32 *)&stats_flags)[0]);
5032 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5033 ((u32 *)&stats_flags)[1]);
5034
66e855f3 5035 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5036 ((u32 *)&stats_flags)[0]);
66e855f3 5037 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5038 ((u32 *)&stats_flags)[1]);
5039
66e855f3
YG
5040 REG_WR(bp, BAR_XSTRORM_INTMEM +
5041 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5042 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5043 REG_WR(bp, BAR_XSTRORM_INTMEM +
5044 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5045 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5046
5047 REG_WR(bp, BAR_TSTRORM_INTMEM +
5048 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5049 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5050 REG_WR(bp, BAR_TSTRORM_INTMEM +
5051 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5052 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5053
de832a55
EG
5054 REG_WR(bp, BAR_USTRORM_INTMEM +
5055 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5056 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5057 REG_WR(bp, BAR_USTRORM_INTMEM +
5058 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5059 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5060
34f80b04
EG
5061 if (CHIP_IS_E1H(bp)) {
5062 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5063 IS_E1HMF(bp));
5064 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5065 IS_E1HMF(bp));
5066 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5067 IS_E1HMF(bp));
5068 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5069 IS_E1HMF(bp));
5070
7a9b2557
VZ
5071 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5072 bp->e1hov);
34f80b04
EG
5073 }
5074
4f40f2cb
EG
5075 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5076 max_agg_size =
5077 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5078 SGE_PAGE_SIZE * PAGES_PER_SGE),
5079 (u32)0xffff);
555f6c78 5080 for_each_rx_queue(bp, i) {
7a9b2557 5081 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5082
5083 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5084 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5085 U64_LO(fp->rx_comp_mapping));
5086 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5087 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5088 U64_HI(fp->rx_comp_mapping));
5089
7a9b2557 5090 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5091 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5092 max_agg_size);
5093 }
8a1c38d1 5094
1c06328c
EG
5095 /* dropless flow control */
5096 if (CHIP_IS_E1H(bp)) {
5097 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5098
5099 rx_pause.bd_thr_low = 250;
5100 rx_pause.cqe_thr_low = 250;
5101 rx_pause.cos = 1;
5102 rx_pause.sge_thr_low = 0;
5103 rx_pause.bd_thr_high = 350;
5104 rx_pause.cqe_thr_high = 350;
5105 rx_pause.sge_thr_high = 0;
5106
5107 for_each_rx_queue(bp, i) {
5108 struct bnx2x_fastpath *fp = &bp->fp[i];
5109
5110 if (!fp->disable_tpa) {
5111 rx_pause.sge_thr_low = 150;
5112 rx_pause.sge_thr_high = 250;
5113 }
5114
5115
5116 offset = BAR_USTRORM_INTMEM +
5117 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5118 fp->cl_id);
5119 for (j = 0;
5120 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5121 j++)
5122 REG_WR(bp, offset + j*4,
5123 ((u32 *)&rx_pause)[j]);
5124 }
5125 }
5126
8a1c38d1
EG
5127 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5128
5129 /* Init rate shaping and fairness contexts */
5130 if (IS_E1HMF(bp)) {
5131 int vn;
5132
5133 /* During init there is no active link
5134 Until link is up, set link rate to 10Gbps */
5135 bp->link_vars.line_speed = SPEED_10000;
5136 bnx2x_init_port_minmax(bp);
5137
5138 bnx2x_calc_vn_weight_sum(bp);
5139
5140 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5141 bnx2x_init_vn_minmax(bp, 2*vn + port);
5142
5143 /* Enable rate shaping and fairness */
5144 bp->cmng.flags.cmng_enables =
5145 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5146 if (bp->vn_weight_sum)
5147 bp->cmng.flags.cmng_enables |=
5148 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5149 else
5150 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5151 " fairness will be disabled\n");
5152 } else {
5153 /* rate shaping and fairness are disabled */
5154 DP(NETIF_MSG_IFUP,
5155 "single function mode minmax will be disabled\n");
5156 }
5157
5158
5159 /* Store it to internal memory */
5160 if (bp->port.pmf)
5161 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5162 REG_WR(bp, BAR_XSTRORM_INTMEM +
5163 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5164 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5165}
5166
471de716
EG
5167static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5168{
5169 switch (load_code) {
5170 case FW_MSG_CODE_DRV_LOAD_COMMON:
5171 bnx2x_init_internal_common(bp);
5172 /* no break */
5173
5174 case FW_MSG_CODE_DRV_LOAD_PORT:
5175 bnx2x_init_internal_port(bp);
5176 /* no break */
5177
5178 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5179 bnx2x_init_internal_func(bp);
5180 break;
5181
5182 default:
5183 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5184 break;
5185 }
5186}
5187
5188static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5189{
5190 int i;
5191
5192 for_each_queue(bp, i) {
5193 struct bnx2x_fastpath *fp = &bp->fp[i];
5194
34f80b04 5195 fp->bp = bp;
a2fbb9ea 5196 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5197 fp->index = i;
34f80b04
EG
5198 fp->cl_id = BP_L_ID(bp) + i;
5199 fp->sb_id = fp->cl_id;
5200 DP(NETIF_MSG_IFUP,
f5372251
EG
5201 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5202 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5203 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5204 fp->sb_id);
5c862848 5205 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5206 }
5207
16119785
EG
5208 /* ensure status block indices were read */
5209 rmb();
5210
5211
5c862848
EG
5212 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5213 DEF_SB_ID);
5214 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5215 bnx2x_update_coalesce(bp);
5216 bnx2x_init_rx_rings(bp);
5217 bnx2x_init_tx_ring(bp);
5218 bnx2x_init_sp_ring(bp);
5219 bnx2x_init_context(bp);
471de716 5220 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5221 bnx2x_init_ind_table(bp);
0ef00459
EG
5222 bnx2x_stats_init(bp);
5223
5224 /* At this point, we are ready for interrupts */
5225 atomic_set(&bp->intr_sem, 0);
5226
5227 /* flush all before enabling interrupts */
5228 mb();
5229 mmiowb();
5230
615f8fd9 5231 bnx2x_int_enable(bp);
eb8da205
EG
5232
5233 /* Check for SPIO5 */
5234 bnx2x_attn_int_deasserted0(bp,
5235 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5236 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5237}
5238
5239/* end of nic init */
5240
5241/*
5242 * gzip service functions
5243 */
5244
5245static int bnx2x_gunzip_init(struct bnx2x *bp)
5246{
5247 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5248 &bp->gunzip_mapping);
5249 if (bp->gunzip_buf == NULL)
5250 goto gunzip_nomem1;
5251
5252 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5253 if (bp->strm == NULL)
5254 goto gunzip_nomem2;
5255
5256 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5257 GFP_KERNEL);
5258 if (bp->strm->workspace == NULL)
5259 goto gunzip_nomem3;
5260
5261 return 0;
5262
5263gunzip_nomem3:
5264 kfree(bp->strm);
5265 bp->strm = NULL;
5266
5267gunzip_nomem2:
5268 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5269 bp->gunzip_mapping);
5270 bp->gunzip_buf = NULL;
5271
5272gunzip_nomem1:
5273 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5274 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5275 return -ENOMEM;
5276}
5277
5278static void bnx2x_gunzip_end(struct bnx2x *bp)
5279{
5280 kfree(bp->strm->workspace);
5281
5282 kfree(bp->strm);
5283 bp->strm = NULL;
5284
5285 if (bp->gunzip_buf) {
5286 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5287 bp->gunzip_mapping);
5288 bp->gunzip_buf = NULL;
5289 }
5290}
5291
94a78b79 5292static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5293{
5294 int n, rc;
5295
5296 /* check gzip header */
94a78b79
VZ
5297 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5298 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5299 return -EINVAL;
94a78b79 5300 }
a2fbb9ea
ET
5301
5302 n = 10;
5303
34f80b04 5304#define FNAME 0x8
a2fbb9ea
ET
5305
5306 if (zbuf[3] & FNAME)
5307 while ((zbuf[n++] != 0) && (n < len));
5308
94a78b79 5309 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5310 bp->strm->avail_in = len - n;
5311 bp->strm->next_out = bp->gunzip_buf;
5312 bp->strm->avail_out = FW_BUF_SIZE;
5313
5314 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5315 if (rc != Z_OK)
5316 return rc;
5317
5318 rc = zlib_inflate(bp->strm, Z_FINISH);
5319 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5320 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5321 bp->dev->name, bp->strm->msg);
5322
5323 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5324 if (bp->gunzip_outlen & 0x3)
5325 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5326 " gunzip_outlen (%d) not aligned\n",
5327 bp->dev->name, bp->gunzip_outlen);
5328 bp->gunzip_outlen >>= 2;
5329
5330 zlib_inflateEnd(bp->strm);
5331
5332 if (rc == Z_STREAM_END)
5333 return 0;
5334
5335 return rc;
5336}
5337
5338/* nic load/unload */
5339
5340/*
34f80b04 5341 * General service functions
a2fbb9ea
ET
5342 */
5343
5344/* send a NIG loopback debug packet */
5345static void bnx2x_lb_pckt(struct bnx2x *bp)
5346{
a2fbb9ea 5347 u32 wb_write[3];
a2fbb9ea
ET
5348
5349 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5350 wb_write[0] = 0x55555555;
5351 wb_write[1] = 0x55555555;
34f80b04 5352 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5353 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5354
5355 /* NON-IP protocol */
a2fbb9ea
ET
5356 wb_write[0] = 0x09000000;
5357 wb_write[1] = 0x55555555;
34f80b04 5358 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5359 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5360}
5361
5362/* some of the internal memories
5363 * are not directly readable from the driver
5364 * to test them we send debug packets
5365 */
5366static int bnx2x_int_mem_test(struct bnx2x *bp)
5367{
5368 int factor;
5369 int count, i;
5370 u32 val = 0;
5371
ad8d3948 5372 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5373 factor = 120;
ad8d3948
EG
5374 else if (CHIP_REV_IS_EMUL(bp))
5375 factor = 200;
5376 else
a2fbb9ea 5377 factor = 1;
a2fbb9ea
ET
5378
5379 DP(NETIF_MSG_HW, "start part1\n");
5380
5381 /* Disable inputs of parser neighbor blocks */
5382 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5383 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5384 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5385 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5386
5387 /* Write 0 to parser credits for CFC search request */
5388 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5389
5390 /* send Ethernet packet */
5391 bnx2x_lb_pckt(bp);
5392
5393 /* TODO do i reset NIG statistic? */
5394 /* Wait until NIG register shows 1 packet of size 0x10 */
5395 count = 1000 * factor;
5396 while (count) {
34f80b04 5397
a2fbb9ea
ET
5398 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5399 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5400 if (val == 0x10)
5401 break;
5402
5403 msleep(10);
5404 count--;
5405 }
5406 if (val != 0x10) {
5407 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5408 return -1;
5409 }
5410
5411 /* Wait until PRS register shows 1 packet */
5412 count = 1000 * factor;
5413 while (count) {
5414 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5415 if (val == 1)
5416 break;
5417
5418 msleep(10);
5419 count--;
5420 }
5421 if (val != 0x1) {
5422 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5423 return -2;
5424 }
5425
5426 /* Reset and init BRB, PRS */
34f80b04 5427 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5428 msleep(50);
34f80b04 5429 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5430 msleep(50);
94a78b79
VZ
5431 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5432 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5433
5434 DP(NETIF_MSG_HW, "part2\n");
5435
5436 /* Disable inputs of parser neighbor blocks */
5437 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5438 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5439 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5440 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5441
5442 /* Write 0 to parser credits for CFC search request */
5443 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5444
5445 /* send 10 Ethernet packets */
5446 for (i = 0; i < 10; i++)
5447 bnx2x_lb_pckt(bp);
5448
5449 /* Wait until NIG register shows 10 + 1
5450 packets of size 11*0x10 = 0xb0 */
5451 count = 1000 * factor;
5452 while (count) {
34f80b04 5453
a2fbb9ea
ET
5454 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5455 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5456 if (val == 0xb0)
5457 break;
5458
5459 msleep(10);
5460 count--;
5461 }
5462 if (val != 0xb0) {
5463 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5464 return -3;
5465 }
5466
5467 /* Wait until PRS register shows 2 packets */
5468 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5469 if (val != 2)
5470 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5471
5472 /* Write 1 to parser credits for CFC search request */
5473 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5474
5475 /* Wait until PRS register shows 3 packets */
5476 msleep(10 * factor);
5477 /* Wait until NIG register shows 1 packet of size 0x10 */
5478 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5479 if (val != 3)
5480 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5481
5482 /* clear NIG EOP FIFO */
5483 for (i = 0; i < 11; i++)
5484 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5485 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5486 if (val != 1) {
5487 BNX2X_ERR("clear of NIG failed\n");
5488 return -4;
5489 }
5490
5491 /* Reset and init BRB, PRS, NIG */
5492 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5493 msleep(50);
5494 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5495 msleep(50);
94a78b79
VZ
5496 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5497 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5498#ifndef BCM_ISCSI
5499 /* set NIC mode */
5500 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5501#endif
5502
5503 /* Enable inputs of parser neighbor blocks */
5504 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5505 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5506 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5507 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5508
5509 DP(NETIF_MSG_HW, "done\n");
5510
5511 return 0; /* OK */
5512}
5513
5514static void enable_blocks_attention(struct bnx2x *bp)
5515{
5516 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5517 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5518 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5519 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5520 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5521 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5522 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5523 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5524 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5525/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5526/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5527 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5528 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5529 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5530/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5531/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5532 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5533 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5534 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5535 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5536/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5537/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5538 if (CHIP_REV_IS_FPGA(bp))
5539 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5540 else
5541 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5542 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5543 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5544 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5545/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5546/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5547 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5548 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5549/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5550 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5551}
5552
34f80b04 5553
81f75bbf
EG
5554static void bnx2x_reset_common(struct bnx2x *bp)
5555{
5556 /* reset_common */
5557 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5558 0xd3ffff7f);
5559 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5560}
5561
fd4ef40d
EG
5562
5563static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5564{
5565 u32 val;
5566 u8 port;
5567 u8 is_required = 0;
5568
5569 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5570 SHARED_HW_CFG_FAN_FAILURE_MASK;
5571
5572 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5573 is_required = 1;
5574
5575 /*
5576 * The fan failure mechanism is usually related to the PHY type since
5577 * the power consumption of the board is affected by the PHY. Currently,
5578 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5579 */
5580 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5581 for (port = PORT_0; port < PORT_MAX; port++) {
5582 u32 phy_type =
5583 SHMEM_RD(bp, dev_info.port_hw_config[port].
5584 external_phy_config) &
5585 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5586 is_required |=
5587 ((phy_type ==
5588 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5589 (phy_type ==
5590 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5591 (phy_type ==
5592 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5593 }
5594
5595 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5596
5597 if (is_required == 0)
5598 return;
5599
5600 /* Fan failure is indicated by SPIO 5 */
5601 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5602 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5603
5604 /* set to active low mode */
5605 val = REG_RD(bp, MISC_REG_SPIO_INT);
5606 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5607 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5608 REG_WR(bp, MISC_REG_SPIO_INT, val);
5609
5610 /* enable interrupt to signal the IGU */
5611 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5612 val |= (1 << MISC_REGISTERS_SPIO_5);
5613 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5614}
5615
34f80b04 5616static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5617{
a2fbb9ea 5618 u32 val, i;
a2fbb9ea 5619
34f80b04 5620 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5621
81f75bbf 5622 bnx2x_reset_common(bp);
34f80b04
EG
5623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5624 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5625
94a78b79 5626 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5627 if (CHIP_IS_E1H(bp))
5628 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5629
34f80b04
EG
5630 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5631 msleep(30);
5632 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5633
94a78b79 5634 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5635 if (CHIP_IS_E1(bp)) {
5636 /* enable HW interrupt from PXP on USDM overflow
5637 bit 16 on INT_MASK_0 */
5638 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5639 }
a2fbb9ea 5640
94a78b79 5641 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5642 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5643
5644#ifdef __BIG_ENDIAN
34f80b04
EG
5645 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5646 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5647 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5648 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5649 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5650 /* make sure this value is 0 */
5651 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5652
5653/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5654 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5655 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5656 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5657 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5658#endif
5659
34f80b04 5660 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5661#ifdef BCM_ISCSI
34f80b04
EG
5662 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5663 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5664 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5665#endif
5666
34f80b04
EG
5667 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5668 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5669
34f80b04
EG
5670 /* let the HW do it's magic ... */
5671 msleep(100);
5672 /* finish PXP init */
5673 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5674 if (val != 1) {
5675 BNX2X_ERR("PXP2 CFG failed\n");
5676 return -EBUSY;
5677 }
5678 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5679 if (val != 1) {
5680 BNX2X_ERR("PXP2 RD_INIT failed\n");
5681 return -EBUSY;
5682 }
a2fbb9ea 5683
34f80b04
EG
5684 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5685 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5686
94a78b79 5687 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5688
34f80b04
EG
5689 /* clean the DMAE memory */
5690 bp->dmae_ready = 1;
5691 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5692
94a78b79
VZ
5693 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5694 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5695 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5696 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5697
34f80b04
EG
5698 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5699 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5700 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5701 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5702
94a78b79 5703 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
5704 /* soft reset pulse */
5705 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5706 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5707
5708#ifdef BCM_ISCSI
94a78b79 5709 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5710#endif
a2fbb9ea 5711
94a78b79 5712 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5713 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5714 if (!CHIP_REV_IS_SLOW(bp)) {
5715 /* enable hw interrupt from doorbell Q */
5716 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5717 }
a2fbb9ea 5718
94a78b79
VZ
5719 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5720 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5721 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5722 /* set NIC mode */
5723 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5724 if (CHIP_IS_E1H(bp))
5725 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5726
94a78b79
VZ
5727 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5728 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5729 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5730 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5731
490c3c9b
EG
5732 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5733 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5734 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5735 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5736
94a78b79
VZ
5737 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5738 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5739 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5740 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5741
34f80b04
EG
5742 /* sync semi rtc */
5743 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5744 0x80000000);
5745 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5746 0x80000000);
a2fbb9ea 5747
94a78b79
VZ
5748 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5749 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5750 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5751
34f80b04
EG
5752 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5753 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5754 REG_WR(bp, i, 0xc0cac01a);
5755 /* TODO: replace with something meaningful */
5756 }
94a78b79 5757 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 5758 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5759
34f80b04
EG
5760 if (sizeof(union cdu_context) != 1024)
5761 /* we currently assume that a context is 1024 bytes */
5762 printk(KERN_ALERT PFX "please adjust the size of"
5763 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5764
94a78b79 5765 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5766 val = (4 << 24) + (0 << 12) + 1024;
5767 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5768 if (CHIP_IS_E1(bp)) {
5769 /* !!! fix pxp client crdit until excel update */
5770 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5771 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5772 }
a2fbb9ea 5773
94a78b79 5774 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5775 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5776 /* enable context validation interrupt from CFC */
5777 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5778
5779 /* set the thresholds to prevent CFC/CDU race */
5780 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5781
94a78b79
VZ
5782 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5783 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5784
34f80b04 5785 /* PXPCS COMMON comes here */
94a78b79 5786 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5787 /* Reset PCIE errors for debug */
5788 REG_WR(bp, 0x2814, 0xffffffff);
5789 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5790
34f80b04 5791 /* EMAC0 COMMON comes here */
94a78b79 5792 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
34f80b04 5793 /* EMAC1 COMMON comes here */
94a78b79 5794 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
34f80b04 5795 /* DBU COMMON comes here */
94a78b79 5796 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
34f80b04 5797 /* DBG COMMON comes here */
94a78b79 5798 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5799
94a78b79 5800 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
5801 if (CHIP_IS_E1H(bp)) {
5802 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5803 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5804 }
5805
5806 if (CHIP_REV_IS_SLOW(bp))
5807 msleep(200);
5808
5809 /* finish CFC init */
5810 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5811 if (val != 1) {
5812 BNX2X_ERR("CFC LL_INIT failed\n");
5813 return -EBUSY;
5814 }
5815 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5816 if (val != 1) {
5817 BNX2X_ERR("CFC AC_INIT failed\n");
5818 return -EBUSY;
5819 }
5820 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5821 if (val != 1) {
5822 BNX2X_ERR("CFC CAM_INIT failed\n");
5823 return -EBUSY;
5824 }
5825 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5826
34f80b04
EG
5827 /* read NIG statistic
5828 to see if this is our first up since powerup */
5829 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5830 val = *bnx2x_sp(bp, wb_data[0]);
5831
5832 /* do internal memory self test */
5833 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5834 BNX2X_ERR("internal mem self test failed\n");
5835 return -EBUSY;
5836 }
5837
35b19ba5 5838 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5839 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5840 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5841 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 5842 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
5843 bp->port.need_hw_lock = 1;
5844 break;
5845
34f80b04
EG
5846 default:
5847 break;
5848 }
f1410647 5849
fd4ef40d
EG
5850 bnx2x_setup_fan_failure_detection(bp);
5851
34f80b04
EG
5852 /* clear PXP2 attentions */
5853 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5854
34f80b04 5855 enable_blocks_attention(bp);
a2fbb9ea 5856
6bbca910
YR
5857 if (!BP_NOMCP(bp)) {
5858 bnx2x_acquire_phy_lock(bp);
5859 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5860 bnx2x_release_phy_lock(bp);
5861 } else
5862 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5863
34f80b04
EG
5864 return 0;
5865}
a2fbb9ea 5866
34f80b04
EG
5867static int bnx2x_init_port(struct bnx2x *bp)
5868{
5869 int port = BP_PORT(bp);
94a78b79 5870 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5871 u32 low, high;
34f80b04 5872 u32 val;
a2fbb9ea 5873
34f80b04
EG
5874 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5875
5876 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5877
5878 /* Port PXP comes here */
94a78b79 5879 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
a2fbb9ea 5880 /* Port PXP2 comes here */
94a78b79 5881 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
a2fbb9ea
ET
5882#ifdef BCM_ISCSI
5883 /* Port0 1
5884 * Port1 385 */
5885 i++;
5886 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5887 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5888 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5889 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5890
5891 /* Port0 2
5892 * Port1 386 */
5893 i++;
5894 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5895 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5896 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5897 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5898
5899 /* Port0 3
5900 * Port1 387 */
5901 i++;
5902 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5903 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5904 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5905 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5906#endif
34f80b04 5907 /* Port CMs come here */
94a78b79 5908 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea
ET
5909
5910 /* Port QM comes here */
a2fbb9ea
ET
5911#ifdef BCM_ISCSI
5912 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5913 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5914
94a78b79 5915 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea
ET
5916#endif
5917 /* Port DQ comes here */
94a78b79 5918 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5919
94a78b79 5920 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
5921 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5922 /* no pause for emulation and FPGA */
5923 low = 0;
5924 high = 513;
5925 } else {
5926 if (IS_E1HMF(bp))
5927 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5928 else if (bp->dev->mtu > 4096) {
5929 if (bp->flags & ONE_PORT_FLAG)
5930 low = 160;
5931 else {
5932 val = bp->dev->mtu;
5933 /* (24*1024 + val*4)/256 */
5934 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5935 }
5936 } else
5937 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5938 high = low + 56; /* 14*1024/256 */
5939 }
5940 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5941 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5942
5943
ad8d3948 5944 /* Port PRS comes here */
94a78b79 5945 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
a2fbb9ea 5946 /* Port TSDM comes here */
94a78b79 5947 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
a2fbb9ea 5948 /* Port CSDM comes here */
94a78b79 5949 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
a2fbb9ea 5950 /* Port USDM comes here */
94a78b79 5951 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
a2fbb9ea 5952 /* Port XSDM comes here */
94a78b79 5953 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5954
94a78b79
VZ
5955 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5956 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5957 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5958 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 5959
a2fbb9ea 5960 /* Port UPB comes here */
94a78b79 5961 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
34f80b04 5962 /* Port XPB comes here */
94a78b79 5963 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5964
94a78b79 5965 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
5966
5967 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5968 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5969
5970 /* update threshold */
34f80b04 5971 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5972 /* update init credit */
34f80b04 5973 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5974
5975 /* probe changes */
34f80b04 5976 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5977 msleep(5);
34f80b04 5978 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5979
5980#ifdef BCM_ISCSI
5981 /* tell the searcher where the T2 table is */
5982 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5983
5984 wb_write[0] = U64_LO(bp->t2_mapping);
5985 wb_write[1] = U64_HI(bp->t2_mapping);
5986 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5987 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5988 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5989 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5990
5991 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5992 /* Port SRCH comes here */
5993#endif
5994 /* Port CDU comes here */
94a78b79 5995 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
a2fbb9ea 5996 /* Port CFC comes here */
94a78b79 5997 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5998
5999 if (CHIP_IS_E1(bp)) {
6000 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6001 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6002 }
94a78b79 6003 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6004
94a78b79 6005 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6006 /* init aeu_mask_attn_func_0/1:
6007 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6008 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6009 * bits 4-7 are used for "per vn group attention" */
6010 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6011 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6012
a2fbb9ea 6013 /* Port PXPCS comes here */
94a78b79 6014 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
a2fbb9ea 6015 /* Port EMAC0 comes here */
94a78b79 6016 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
a2fbb9ea 6017 /* Port EMAC1 comes here */
94a78b79 6018 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
a2fbb9ea 6019 /* Port DBU comes here */
94a78b79 6020 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
a2fbb9ea 6021 /* Port DBG comes here */
94a78b79 6022 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6023
94a78b79 6024 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6025
6026 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6027
6028 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6029 /* 0x2 disable e1hov, 0x1 enable */
6030 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6031 (IS_E1HMF(bp) ? 0x1 : 0x2));
6032
1c06328c
EG
6033 /* support pause requests from USDM, TSDM and BRB */
6034 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6035
6036 {
6037 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6038 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6039 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6040 }
34f80b04
EG
6041 }
6042
a2fbb9ea 6043 /* Port MCP comes here */
94a78b79 6044 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
a2fbb9ea 6045 /* Port DMAE comes here */
94a78b79 6046 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6047
35b19ba5 6048 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6049 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6050 {
6051 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6052
6053 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6054 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6055
6056 /* The GPIO should be swapped if the swap register is
6057 set and active */
6058 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6059 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6060
6061 /* Select function upon port-swap configuration */
6062 if (port == 0) {
6063 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6064 aeu_gpio_mask = (swap_val && swap_override) ?
6065 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6066 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6067 } else {
6068 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6069 aeu_gpio_mask = (swap_val && swap_override) ?
6070 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6071 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6072 }
6073 val = REG_RD(bp, offset);
6074 /* add GPIO3 to group */
6075 val |= aeu_gpio_mask;
6076 REG_WR(bp, offset, val);
6077 }
6078 break;
6079
35b19ba5 6080 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6081 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6082 /* add SPIO 5 to group 0 */
4d295db0
EG
6083 {
6084 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6085 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6086 val = REG_RD(bp, reg_addr);
f1410647 6087 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6088 REG_WR(bp, reg_addr, val);
6089 }
f1410647
ET
6090 break;
6091
6092 default:
6093 break;
6094 }
6095
c18487ee 6096 bnx2x__link_reset(bp);
a2fbb9ea 6097
34f80b04
EG
6098 return 0;
6099}
6100
6101#define ILT_PER_FUNC (768/2)
6102#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6103/* the phys address is shifted right 12 bits and has an added
6104 1=valid bit added to the 53rd bit
6105 then since this is a wide register(TM)
6106 we split it into two 32 bit writes
6107 */
6108#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6109#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6110#define PXP_ONE_ILT(x) (((x) << 10) | x)
6111#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6112
6113#define CNIC_ILT_LINES 0
6114
6115static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6116{
6117 int reg;
6118
6119 if (CHIP_IS_E1H(bp))
6120 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6121 else /* E1 */
6122 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6123
6124 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6125}
6126
6127static int bnx2x_init_func(struct bnx2x *bp)
6128{
6129 int port = BP_PORT(bp);
6130 int func = BP_FUNC(bp);
8badd27a 6131 u32 addr, val;
34f80b04
EG
6132 int i;
6133
6134 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6135
8badd27a
EG
6136 /* set MSI reconfigure capability */
6137 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6138 val = REG_RD(bp, addr);
6139 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6140 REG_WR(bp, addr, val);
6141
34f80b04
EG
6142 i = FUNC_ILT_BASE(func);
6143
6144 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6145 if (CHIP_IS_E1H(bp)) {
6146 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6147 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6148 } else /* E1 */
6149 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6150 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6151
6152
6153 if (CHIP_IS_E1H(bp)) {
6154 for (i = 0; i < 9; i++)
6155 bnx2x_init_block(bp,
94a78b79 6156 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6157
6158 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6159 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6160 }
6161
6162 /* HC init per function */
6163 if (CHIP_IS_E1H(bp)) {
6164 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6165
6166 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6167 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6168 }
94a78b79 6169 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6170
c14423fe 6171 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6172 REG_WR(bp, 0x2114, 0xffffffff);
6173 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6174
34f80b04
EG
6175 return 0;
6176}
6177
6178static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6179{
6180 int i, rc = 0;
a2fbb9ea 6181
34f80b04
EG
6182 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6183 BP_FUNC(bp), load_code);
a2fbb9ea 6184
34f80b04
EG
6185 bp->dmae_ready = 0;
6186 mutex_init(&bp->dmae_mutex);
6187 bnx2x_gunzip_init(bp);
a2fbb9ea 6188
34f80b04
EG
6189 switch (load_code) {
6190 case FW_MSG_CODE_DRV_LOAD_COMMON:
6191 rc = bnx2x_init_common(bp);
6192 if (rc)
6193 goto init_hw_err;
6194 /* no break */
6195
6196 case FW_MSG_CODE_DRV_LOAD_PORT:
6197 bp->dmae_ready = 1;
6198 rc = bnx2x_init_port(bp);
6199 if (rc)
6200 goto init_hw_err;
6201 /* no break */
6202
6203 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6204 bp->dmae_ready = 1;
6205 rc = bnx2x_init_func(bp);
6206 if (rc)
6207 goto init_hw_err;
6208 break;
6209
6210 default:
6211 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6212 break;
6213 }
6214
6215 if (!BP_NOMCP(bp)) {
6216 int func = BP_FUNC(bp);
a2fbb9ea
ET
6217
6218 bp->fw_drv_pulse_wr_seq =
34f80b04 6219 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6220 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6221 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6222 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6223 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6224 } else
6225 bp->func_stx = 0;
a2fbb9ea 6226
34f80b04
EG
6227 /* this needs to be done before gunzip end */
6228 bnx2x_zero_def_sb(bp);
6229 for_each_queue(bp, i)
6230 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6231
6232init_hw_err:
6233 bnx2x_gunzip_end(bp);
6234
6235 return rc;
a2fbb9ea
ET
6236}
6237
c14423fe 6238/* send the MCP a request, block until there is a reply */
4d295db0 6239u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
a2fbb9ea 6240{
34f80b04 6241 int func = BP_FUNC(bp);
f1410647
ET
6242 u32 seq = ++bp->fw_seq;
6243 u32 rc = 0;
19680c48
EG
6244 u32 cnt = 1;
6245 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6246
34f80b04 6247 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6248 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6249
19680c48
EG
6250 do {
6251 /* let the FW do it's magic ... */
6252 msleep(delay);
a2fbb9ea 6253
19680c48 6254 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6255
19680c48
EG
6256 /* Give the FW up to 2 second (200*10ms) */
6257 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6258
6259 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6260 cnt*delay, rc, seq);
a2fbb9ea
ET
6261
6262 /* is this a reply to our command? */
6263 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6264 rc &= FW_MSG_CODE_MASK;
f1410647 6265
a2fbb9ea
ET
6266 } else {
6267 /* FW BUG! */
6268 BNX2X_ERR("FW failed to respond!\n");
6269 bnx2x_fw_dump(bp);
6270 rc = 0;
6271 }
f1410647 6272
a2fbb9ea
ET
6273 return rc;
6274}
6275
6276static void bnx2x_free_mem(struct bnx2x *bp)
6277{
6278
6279#define BNX2X_PCI_FREE(x, y, size) \
6280 do { \
6281 if (x) { \
6282 pci_free_consistent(bp->pdev, size, x, y); \
6283 x = NULL; \
6284 y = 0; \
6285 } \
6286 } while (0)
6287
6288#define BNX2X_FREE(x) \
6289 do { \
6290 if (x) { \
6291 vfree(x); \
6292 x = NULL; \
6293 } \
6294 } while (0)
6295
6296 int i;
6297
6298 /* fastpath */
555f6c78 6299 /* Common */
a2fbb9ea
ET
6300 for_each_queue(bp, i) {
6301
555f6c78 6302 /* status blocks */
a2fbb9ea
ET
6303 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6304 bnx2x_fp(bp, i, status_blk_mapping),
6305 sizeof(struct host_status_block) +
6306 sizeof(struct eth_tx_db_data));
555f6c78
EG
6307 }
6308 /* Rx */
6309 for_each_rx_queue(bp, i) {
a2fbb9ea 6310
555f6c78 6311 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6312 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6313 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6314 bnx2x_fp(bp, i, rx_desc_mapping),
6315 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6316
6317 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6318 bnx2x_fp(bp, i, rx_comp_mapping),
6319 sizeof(struct eth_fast_path_rx_cqe) *
6320 NUM_RCQ_BD);
a2fbb9ea 6321
7a9b2557 6322 /* SGE ring */
32626230 6323 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6324 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6325 bnx2x_fp(bp, i, rx_sge_mapping),
6326 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6327 }
555f6c78
EG
6328 /* Tx */
6329 for_each_tx_queue(bp, i) {
6330
6331 /* fastpath tx rings: tx_buf tx_desc */
6332 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6333 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6334 bnx2x_fp(bp, i, tx_desc_mapping),
6335 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6336 }
a2fbb9ea
ET
6337 /* end of fastpath */
6338
6339 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6340 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6341
6342 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6343 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6344
6345#ifdef BCM_ISCSI
6346 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6347 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6348 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6349 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6350#endif
7a9b2557 6351 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6352
6353#undef BNX2X_PCI_FREE
6354#undef BNX2X_KFREE
6355}
6356
6357static int bnx2x_alloc_mem(struct bnx2x *bp)
6358{
6359
6360#define BNX2X_PCI_ALLOC(x, y, size) \
6361 do { \
6362 x = pci_alloc_consistent(bp->pdev, size, y); \
6363 if (x == NULL) \
6364 goto alloc_mem_err; \
6365 memset(x, 0, size); \
6366 } while (0)
6367
6368#define BNX2X_ALLOC(x, size) \
6369 do { \
6370 x = vmalloc(size); \
6371 if (x == NULL) \
6372 goto alloc_mem_err; \
6373 memset(x, 0, size); \
6374 } while (0)
6375
6376 int i;
6377
6378 /* fastpath */
555f6c78 6379 /* Common */
a2fbb9ea
ET
6380 for_each_queue(bp, i) {
6381 bnx2x_fp(bp, i, bp) = bp;
6382
555f6c78 6383 /* status blocks */
a2fbb9ea
ET
6384 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6385 &bnx2x_fp(bp, i, status_blk_mapping),
6386 sizeof(struct host_status_block) +
6387 sizeof(struct eth_tx_db_data));
555f6c78
EG
6388 }
6389 /* Rx */
6390 for_each_rx_queue(bp, i) {
a2fbb9ea 6391
555f6c78 6392 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6393 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6394 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6395 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6396 &bnx2x_fp(bp, i, rx_desc_mapping),
6397 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6398
6399 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6400 &bnx2x_fp(bp, i, rx_comp_mapping),
6401 sizeof(struct eth_fast_path_rx_cqe) *
6402 NUM_RCQ_BD);
6403
7a9b2557
VZ
6404 /* SGE ring */
6405 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6406 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6407 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6408 &bnx2x_fp(bp, i, rx_sge_mapping),
6409 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6410 }
555f6c78
EG
6411 /* Tx */
6412 for_each_tx_queue(bp, i) {
6413
6414 bnx2x_fp(bp, i, hw_tx_prods) =
6415 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6416
6417 bnx2x_fp(bp, i, tx_prods_mapping) =
6418 bnx2x_fp(bp, i, status_blk_mapping) +
6419 sizeof(struct host_status_block);
6420
6421 /* fastpath tx rings: tx_buf tx_desc */
6422 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6423 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6424 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6425 &bnx2x_fp(bp, i, tx_desc_mapping),
6426 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6427 }
a2fbb9ea
ET
6428 /* end of fastpath */
6429
6430 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6431 sizeof(struct host_def_status_block));
6432
6433 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6434 sizeof(struct bnx2x_slowpath));
6435
6436#ifdef BCM_ISCSI
6437 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6438
6439 /* Initialize T1 */
6440 for (i = 0; i < 64*1024; i += 64) {
6441 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6442 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6443 }
6444
6445 /* allocate searcher T2 table
6446 we allocate 1/4 of alloc num for T2
6447 (which is not entered into the ILT) */
6448 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6449
6450 /* Initialize T2 */
6451 for (i = 0; i < 16*1024; i += 64)
6452 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6453
c14423fe 6454 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6455 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6456
6457 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6458 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6459
6460 /* QM queues (128*MAX_CONN) */
6461 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6462#endif
6463
6464 /* Slow path ring */
6465 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6466
6467 return 0;
6468
6469alloc_mem_err:
6470 bnx2x_free_mem(bp);
6471 return -ENOMEM;
6472
6473#undef BNX2X_PCI_ALLOC
6474#undef BNX2X_ALLOC
6475}
6476
6477static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6478{
6479 int i;
6480
555f6c78 6481 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6482 struct bnx2x_fastpath *fp = &bp->fp[i];
6483
6484 u16 bd_cons = fp->tx_bd_cons;
6485 u16 sw_prod = fp->tx_pkt_prod;
6486 u16 sw_cons = fp->tx_pkt_cons;
6487
a2fbb9ea
ET
6488 while (sw_cons != sw_prod) {
6489 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6490 sw_cons++;
6491 }
6492 }
6493}
6494
6495static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6496{
6497 int i, j;
6498
555f6c78 6499 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6500 struct bnx2x_fastpath *fp = &bp->fp[j];
6501
a2fbb9ea
ET
6502 for (i = 0; i < NUM_RX_BD; i++) {
6503 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6504 struct sk_buff *skb = rx_buf->skb;
6505
6506 if (skb == NULL)
6507 continue;
6508
6509 pci_unmap_single(bp->pdev,
6510 pci_unmap_addr(rx_buf, mapping),
356e2385 6511 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6512
6513 rx_buf->skb = NULL;
6514 dev_kfree_skb(skb);
6515 }
7a9b2557 6516 if (!fp->disable_tpa)
32626230
EG
6517 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6518 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6519 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6520 }
6521}
6522
6523static void bnx2x_free_skbs(struct bnx2x *bp)
6524{
6525 bnx2x_free_tx_skbs(bp);
6526 bnx2x_free_rx_skbs(bp);
6527}
6528
6529static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6530{
34f80b04 6531 int i, offset = 1;
a2fbb9ea
ET
6532
6533 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6534 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6535 bp->msix_table[0].vector);
6536
6537 for_each_queue(bp, i) {
c14423fe 6538 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6539 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6540 bnx2x_fp(bp, i, state));
6541
34f80b04 6542 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6543 }
a2fbb9ea
ET
6544}
6545
6546static void bnx2x_free_irq(struct bnx2x *bp)
6547{
a2fbb9ea 6548 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6549 bnx2x_free_msix_irqs(bp);
6550 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6551 bp->flags &= ~USING_MSIX_FLAG;
6552
8badd27a
EG
6553 } else if (bp->flags & USING_MSI_FLAG) {
6554 free_irq(bp->pdev->irq, bp->dev);
6555 pci_disable_msi(bp->pdev);
6556 bp->flags &= ~USING_MSI_FLAG;
6557
a2fbb9ea
ET
6558 } else
6559 free_irq(bp->pdev->irq, bp->dev);
6560}
6561
6562static int bnx2x_enable_msix(struct bnx2x *bp)
6563{
8badd27a
EG
6564 int i, rc, offset = 1;
6565 int igu_vec = 0;
a2fbb9ea 6566
8badd27a
EG
6567 bp->msix_table[0].entry = igu_vec;
6568 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6569
34f80b04 6570 for_each_queue(bp, i) {
8badd27a 6571 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6572 bp->msix_table[i + offset].entry = igu_vec;
6573 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6574 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6575 }
6576
34f80b04 6577 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6578 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6579 if (rc) {
8badd27a
EG
6580 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6581 return rc;
34f80b04 6582 }
8badd27a 6583
a2fbb9ea
ET
6584 bp->flags |= USING_MSIX_FLAG;
6585
6586 return 0;
a2fbb9ea
ET
6587}
6588
a2fbb9ea
ET
6589static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6590{
34f80b04 6591 int i, rc, offset = 1;
a2fbb9ea 6592
a2fbb9ea
ET
6593 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6594 bp->dev->name, bp->dev);
a2fbb9ea
ET
6595 if (rc) {
6596 BNX2X_ERR("request sp irq failed\n");
6597 return -EBUSY;
6598 }
6599
6600 for_each_queue(bp, i) {
555f6c78
EG
6601 struct bnx2x_fastpath *fp = &bp->fp[i];
6602
6603 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6604 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6605 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6606 if (rc) {
555f6c78 6607 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6608 bnx2x_free_msix_irqs(bp);
6609 return -EBUSY;
6610 }
6611
555f6c78 6612 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6613 }
6614
555f6c78
EG
6615 i = BNX2X_NUM_QUEUES(bp);
6616 if (is_multi(bp))
6617 printk(KERN_INFO PFX
6618 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6619 bp->dev->name, bp->msix_table[0].vector,
6620 bp->msix_table[offset].vector,
6621 bp->msix_table[offset + i - 1].vector);
6622 else
6623 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6624 bp->dev->name, bp->msix_table[0].vector,
6625 bp->msix_table[offset + i - 1].vector);
6626
a2fbb9ea 6627 return 0;
a2fbb9ea
ET
6628}
6629
8badd27a
EG
6630static int bnx2x_enable_msi(struct bnx2x *bp)
6631{
6632 int rc;
6633
6634 rc = pci_enable_msi(bp->pdev);
6635 if (rc) {
6636 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6637 return -1;
6638 }
6639 bp->flags |= USING_MSI_FLAG;
6640
6641 return 0;
6642}
6643
a2fbb9ea
ET
6644static int bnx2x_req_irq(struct bnx2x *bp)
6645{
8badd27a 6646 unsigned long flags;
34f80b04 6647 int rc;
a2fbb9ea 6648
8badd27a
EG
6649 if (bp->flags & USING_MSI_FLAG)
6650 flags = 0;
6651 else
6652 flags = IRQF_SHARED;
6653
6654 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6655 bp->dev->name, bp->dev);
a2fbb9ea
ET
6656 if (!rc)
6657 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6658
6659 return rc;
a2fbb9ea
ET
6660}
6661
65abd74d
YG
6662static void bnx2x_napi_enable(struct bnx2x *bp)
6663{
6664 int i;
6665
555f6c78 6666 for_each_rx_queue(bp, i)
65abd74d
YG
6667 napi_enable(&bnx2x_fp(bp, i, napi));
6668}
6669
6670static void bnx2x_napi_disable(struct bnx2x *bp)
6671{
6672 int i;
6673
555f6c78 6674 for_each_rx_queue(bp, i)
65abd74d
YG
6675 napi_disable(&bnx2x_fp(bp, i, napi));
6676}
6677
6678static void bnx2x_netif_start(struct bnx2x *bp)
6679{
e1510706
EG
6680 int intr_sem;
6681
6682 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6683 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6684
6685 if (intr_sem) {
65abd74d 6686 if (netif_running(bp->dev)) {
65abd74d
YG
6687 bnx2x_napi_enable(bp);
6688 bnx2x_int_enable(bp);
555f6c78
EG
6689 if (bp->state == BNX2X_STATE_OPEN)
6690 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6691 }
6692 }
6693}
6694
f8ef6e44 6695static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6696{
f8ef6e44 6697 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6698 bnx2x_napi_disable(bp);
762d5f6c
EG
6699 netif_tx_disable(bp->dev);
6700 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6701}
6702
a2fbb9ea
ET
6703/*
6704 * Init service functions
6705 */
6706
3101c2bc 6707static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6708{
6709 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6710 int port = BP_PORT(bp);
a2fbb9ea
ET
6711
6712 /* CAM allocation
6713 * unicasts 0-31:port0 32-63:port1
6714 * multicast 64-127:port0 128-191:port1
6715 */
8d9c5f34 6716 config->hdr.length = 2;
af246401 6717 config->hdr.offset = port ? 32 : 0;
0626b899 6718 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6719 config->hdr.reserved1 = 0;
6720
6721 /* primary MAC */
6722 config->config_table[0].cam_entry.msb_mac_addr =
6723 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6724 config->config_table[0].cam_entry.middle_mac_addr =
6725 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6726 config->config_table[0].cam_entry.lsb_mac_addr =
6727 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6728 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6729 if (set)
6730 config->config_table[0].target_table_entry.flags = 0;
6731 else
6732 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6733 config->config_table[0].target_table_entry.client_id = 0;
6734 config->config_table[0].target_table_entry.vlan_id = 0;
6735
3101c2bc
YG
6736 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6737 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6738 config->config_table[0].cam_entry.msb_mac_addr,
6739 config->config_table[0].cam_entry.middle_mac_addr,
6740 config->config_table[0].cam_entry.lsb_mac_addr);
6741
6742 /* broadcast */
4781bfad
EG
6743 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6744 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6745 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6746 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6747 if (set)
6748 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6749 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6750 else
6751 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6752 config->config_table[1].target_table_entry.client_id = 0;
6753 config->config_table[1].target_table_entry.vlan_id = 0;
6754
6755 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6756 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6757 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6758}
6759
3101c2bc 6760static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6761{
6762 struct mac_configuration_cmd_e1h *config =
6763 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6764
3101c2bc 6765 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6766 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6767 return;
6768 }
6769
6770 /* CAM allocation for E1H
6771 * unicasts: by func number
6772 * multicast: 20+FUNC*20, 20 each
6773 */
8d9c5f34 6774 config->hdr.length = 1;
34f80b04 6775 config->hdr.offset = BP_FUNC(bp);
0626b899 6776 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6777 config->hdr.reserved1 = 0;
6778
6779 /* primary MAC */
6780 config->config_table[0].msb_mac_addr =
6781 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6782 config->config_table[0].middle_mac_addr =
6783 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6784 config->config_table[0].lsb_mac_addr =
6785 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6786 config->config_table[0].client_id = BP_L_ID(bp);
6787 config->config_table[0].vlan_id = 0;
6788 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6789 if (set)
6790 config->config_table[0].flags = BP_PORT(bp);
6791 else
6792 config->config_table[0].flags =
6793 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6794
3101c2bc
YG
6795 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6796 (set ? "setting" : "clearing"),
34f80b04
EG
6797 config->config_table[0].msb_mac_addr,
6798 config->config_table[0].middle_mac_addr,
6799 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6800
6801 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6802 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6803 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6804}
6805
a2fbb9ea
ET
6806static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6807 int *state_p, int poll)
6808{
6809 /* can take a while if any port is running */
8b3a0f0b 6810 int cnt = 5000;
a2fbb9ea 6811
c14423fe
ET
6812 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6813 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6814
6815 might_sleep();
34f80b04 6816 while (cnt--) {
a2fbb9ea
ET
6817 if (poll) {
6818 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6819 /* if index is different from 0
6820 * the reply for some commands will
3101c2bc 6821 * be on the non default queue
a2fbb9ea
ET
6822 */
6823 if (idx)
6824 bnx2x_rx_int(&bp->fp[idx], 10);
6825 }
a2fbb9ea 6826
3101c2bc 6827 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6828 if (*state_p == state) {
6829#ifdef BNX2X_STOP_ON_ERROR
6830 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6831#endif
a2fbb9ea 6832 return 0;
8b3a0f0b 6833 }
a2fbb9ea 6834
a2fbb9ea 6835 msleep(1);
a2fbb9ea
ET
6836 }
6837
a2fbb9ea 6838 /* timeout! */
49d66772
ET
6839 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6840 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6841#ifdef BNX2X_STOP_ON_ERROR
6842 bnx2x_panic();
6843#endif
a2fbb9ea 6844
49d66772 6845 return -EBUSY;
a2fbb9ea
ET
6846}
6847
6848static int bnx2x_setup_leading(struct bnx2x *bp)
6849{
34f80b04 6850 int rc;
a2fbb9ea 6851
c14423fe 6852 /* reset IGU state */
34f80b04 6853 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6854
6855 /* SETUP ramrod */
6856 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6857
34f80b04
EG
6858 /* Wait for completion */
6859 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6860
34f80b04 6861 return rc;
a2fbb9ea
ET
6862}
6863
6864static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6865{
555f6c78
EG
6866 struct bnx2x_fastpath *fp = &bp->fp[index];
6867
a2fbb9ea 6868 /* reset IGU state */
555f6c78 6869 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6870
228241eb 6871 /* SETUP ramrod */
555f6c78
EG
6872 fp->state = BNX2X_FP_STATE_OPENING;
6873 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6874 fp->cl_id, 0);
a2fbb9ea
ET
6875
6876 /* Wait for completion */
6877 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6878 &(fp->state), 0);
a2fbb9ea
ET
6879}
6880
a2fbb9ea 6881static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6882
8badd27a 6883static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6884{
555f6c78 6885 int num_queues;
a2fbb9ea 6886
8badd27a
EG
6887 switch (int_mode) {
6888 case INT_MODE_INTx:
6889 case INT_MODE_MSI:
555f6c78
EG
6890 num_queues = 1;
6891 bp->num_rx_queues = num_queues;
6892 bp->num_tx_queues = num_queues;
6893 DP(NETIF_MSG_IFUP,
6894 "set number of queues to %d\n", num_queues);
8badd27a
EG
6895 break;
6896
6897 case INT_MODE_MSIX:
6898 default:
555f6c78
EG
6899 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6900 num_queues = min_t(u32, num_online_cpus(),
6901 BNX2X_MAX_QUEUES(bp));
34f80b04 6902 else
555f6c78
EG
6903 num_queues = 1;
6904 bp->num_rx_queues = num_queues;
6905 bp->num_tx_queues = num_queues;
6906 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6907 " number of tx queues to %d\n",
6908 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6909 /* if we can't use MSI-X we only need one fp,
6910 * so try to enable MSI-X with the requested number of fp's
6911 * and fallback to MSI or legacy INTx with one fp
6912 */
8badd27a 6913 if (bnx2x_enable_msix(bp)) {
34f80b04 6914 /* failed to enable MSI-X */
555f6c78
EG
6915 num_queues = 1;
6916 bp->num_rx_queues = num_queues;
6917 bp->num_tx_queues = num_queues;
6918 if (bp->multi_mode)
6919 BNX2X_ERR("Multi requested but failed to "
6920 "enable MSI-X set number of "
6921 "queues to %d\n", num_queues);
a2fbb9ea 6922 }
8badd27a 6923 break;
a2fbb9ea 6924 }
555f6c78 6925 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6926}
6927
6928static void bnx2x_set_rx_mode(struct net_device *dev);
6929
6930/* must be called with rtnl_lock */
6931static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6932{
6933 u32 load_code;
6934 int i, rc = 0;
6935#ifdef BNX2X_STOP_ON_ERROR
6936 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6937 if (unlikely(bp->panic))
6938 return -EPERM;
6939#endif
6940
6941 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6942
6943 bnx2x_set_int_mode(bp);
c14423fe 6944
a2fbb9ea
ET
6945 if (bnx2x_alloc_mem(bp))
6946 return -ENOMEM;
6947
555f6c78 6948 for_each_rx_queue(bp, i)
7a9b2557
VZ
6949 bnx2x_fp(bp, i, disable_tpa) =
6950 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6951
555f6c78 6952 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6953 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6954 bnx2x_poll, 128);
6955
6956#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6957 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6958 struct bnx2x_fastpath *fp = &bp->fp[i];
6959
6960 fp->poll_no_work = 0;
6961 fp->poll_calls = 0;
6962 fp->poll_max_calls = 0;
6963 fp->poll_complete = 0;
6964 fp->poll_exit = 0;
6965 }
6966#endif
6967 bnx2x_napi_enable(bp);
6968
34f80b04
EG
6969 if (bp->flags & USING_MSIX_FLAG) {
6970 rc = bnx2x_req_msix_irqs(bp);
6971 if (rc) {
6972 pci_disable_msix(bp->pdev);
2dfe0e1f 6973 goto load_error1;
34f80b04
EG
6974 }
6975 } else {
8badd27a
EG
6976 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6977 bnx2x_enable_msi(bp);
34f80b04
EG
6978 bnx2x_ack_int(bp);
6979 rc = bnx2x_req_irq(bp);
6980 if (rc) {
2dfe0e1f 6981 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6982 if (bp->flags & USING_MSI_FLAG)
6983 pci_disable_msi(bp->pdev);
2dfe0e1f 6984 goto load_error1;
a2fbb9ea 6985 }
8badd27a
EG
6986 if (bp->flags & USING_MSI_FLAG) {
6987 bp->dev->irq = bp->pdev->irq;
6988 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6989 bp->dev->name, bp->pdev->irq);
6990 }
a2fbb9ea
ET
6991 }
6992
2dfe0e1f
EG
6993 /* Send LOAD_REQUEST command to MCP
6994 Returns the type of LOAD command:
6995 if it is the first port to be initialized
6996 common blocks should be initialized, otherwise - not
6997 */
6998 if (!BP_NOMCP(bp)) {
6999 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7000 if (!load_code) {
7001 BNX2X_ERR("MCP response failure, aborting\n");
7002 rc = -EBUSY;
7003 goto load_error2;
7004 }
7005 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7006 rc = -EBUSY; /* other port in diagnostic mode */
7007 goto load_error2;
7008 }
7009
7010 } else {
7011 int port = BP_PORT(bp);
7012
f5372251 7013 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7014 load_count[0], load_count[1], load_count[2]);
7015 load_count[0]++;
7016 load_count[1 + port]++;
f5372251 7017 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7018 load_count[0], load_count[1], load_count[2]);
7019 if (load_count[0] == 1)
7020 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7021 else if (load_count[1 + port] == 1)
7022 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7023 else
7024 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7025 }
7026
7027 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7028 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7029 bp->port.pmf = 1;
7030 else
7031 bp->port.pmf = 0;
7032 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7033
a2fbb9ea 7034 /* Initialize HW */
34f80b04
EG
7035 rc = bnx2x_init_hw(bp, load_code);
7036 if (rc) {
a2fbb9ea 7037 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7038 goto load_error2;
a2fbb9ea
ET
7039 }
7040
a2fbb9ea 7041 /* Setup NIC internals and enable interrupts */
471de716 7042 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
7043
7044 /* Send LOAD_DONE command to MCP */
34f80b04 7045 if (!BP_NOMCP(bp)) {
228241eb
ET
7046 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7047 if (!load_code) {
da5a662a 7048 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7049 rc = -EBUSY;
2dfe0e1f 7050 goto load_error3;
a2fbb9ea
ET
7051 }
7052 }
7053
7054 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7055
34f80b04
EG
7056 rc = bnx2x_setup_leading(bp);
7057 if (rc) {
da5a662a 7058 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7059 goto load_error3;
34f80b04 7060 }
a2fbb9ea 7061
34f80b04
EG
7062 if (CHIP_IS_E1H(bp))
7063 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7064 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7065 bp->state = BNX2X_STATE_DISABLED;
7066 }
a2fbb9ea 7067
34f80b04
EG
7068 if (bp->state == BNX2X_STATE_OPEN)
7069 for_each_nondefault_queue(bp, i) {
7070 rc = bnx2x_setup_multi(bp, i);
7071 if (rc)
2dfe0e1f 7072 goto load_error3;
34f80b04 7073 }
a2fbb9ea 7074
34f80b04 7075 if (CHIP_IS_E1(bp))
3101c2bc 7076 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 7077 else
3101c2bc 7078 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
7079
7080 if (bp->port.pmf)
b5bf9068 7081 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7082
7083 /* Start fast path */
34f80b04
EG
7084 switch (load_mode) {
7085 case LOAD_NORMAL:
7086 /* Tx queue should be only reenabled */
555f6c78 7087 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 7088 /* Initialize the receive filter. */
34f80b04
EG
7089 bnx2x_set_rx_mode(bp->dev);
7090 break;
7091
7092 case LOAD_OPEN:
555f6c78 7093 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 7094 /* Initialize the receive filter. */
34f80b04 7095 bnx2x_set_rx_mode(bp->dev);
34f80b04 7096 break;
a2fbb9ea 7097
34f80b04 7098 case LOAD_DIAG:
2dfe0e1f 7099 /* Initialize the receive filter. */
a2fbb9ea 7100 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7101 bp->state = BNX2X_STATE_DIAG;
7102 break;
7103
7104 default:
7105 break;
a2fbb9ea
ET
7106 }
7107
34f80b04
EG
7108 if (!bp->port.pmf)
7109 bnx2x__link_status_update(bp);
7110
a2fbb9ea
ET
7111 /* start the timer */
7112 mod_timer(&bp->timer, jiffies + bp->current_interval);
7113
34f80b04 7114
a2fbb9ea
ET
7115 return 0;
7116
2dfe0e1f
EG
7117load_error3:
7118 bnx2x_int_disable_sync(bp, 1);
7119 if (!BP_NOMCP(bp)) {
7120 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7121 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7122 }
7123 bp->port.pmf = 0;
7a9b2557
VZ
7124 /* Free SKBs, SGEs, TPA pool and driver internals */
7125 bnx2x_free_skbs(bp);
555f6c78 7126 for_each_rx_queue(bp, i)
3196a88a 7127 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7128load_error2:
d1014634
YG
7129 /* Release IRQs */
7130 bnx2x_free_irq(bp);
2dfe0e1f
EG
7131load_error1:
7132 bnx2x_napi_disable(bp);
555f6c78 7133 for_each_rx_queue(bp, i)
7cde1c8b 7134 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7135 bnx2x_free_mem(bp);
7136
34f80b04 7137 return rc;
a2fbb9ea
ET
7138}
7139
7140static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7141{
555f6c78 7142 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7143 int rc;
7144
c14423fe 7145 /* halt the connection */
555f6c78
EG
7146 fp->state = BNX2X_FP_STATE_HALTING;
7147 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7148
34f80b04 7149 /* Wait for completion */
a2fbb9ea 7150 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7151 &(fp->state), 1);
c14423fe 7152 if (rc) /* timeout */
a2fbb9ea
ET
7153 return rc;
7154
7155 /* delete cfc entry */
7156 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7157
34f80b04
EG
7158 /* Wait for completion */
7159 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7160 &(fp->state), 1);
34f80b04 7161 return rc;
a2fbb9ea
ET
7162}
7163
da5a662a 7164static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7165{
4781bfad 7166 __le16 dsb_sp_prod_idx;
c14423fe 7167 /* if the other port is handling traffic,
a2fbb9ea 7168 this can take a lot of time */
34f80b04
EG
7169 int cnt = 500;
7170 int rc;
a2fbb9ea
ET
7171
7172 might_sleep();
7173
7174 /* Send HALT ramrod */
7175 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7176 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7177
34f80b04
EG
7178 /* Wait for completion */
7179 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7180 &(bp->fp[0].state), 1);
7181 if (rc) /* timeout */
da5a662a 7182 return rc;
a2fbb9ea 7183
49d66772 7184 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7185
228241eb 7186 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7187 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7188
49d66772 7189 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7190 we are going to reset the chip anyway
7191 so there is not much to do if this times out
7192 */
34f80b04 7193 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7194 if (!cnt) {
7195 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7196 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7197 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7198#ifdef BNX2X_STOP_ON_ERROR
7199 bnx2x_panic();
7200#endif
36e552ab 7201 rc = -EBUSY;
34f80b04
EG
7202 break;
7203 }
7204 cnt--;
da5a662a 7205 msleep(1);
5650d9d4 7206 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7207 }
7208 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7209 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7210
7211 return rc;
a2fbb9ea
ET
7212}
7213
34f80b04
EG
7214static void bnx2x_reset_func(struct bnx2x *bp)
7215{
7216 int port = BP_PORT(bp);
7217 int func = BP_FUNC(bp);
7218 int base, i;
7219
7220 /* Configure IGU */
7221 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7222 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7223
34f80b04
EG
7224 /* Clear ILT */
7225 base = FUNC_ILT_BASE(func);
7226 for (i = base; i < base + ILT_PER_FUNC; i++)
7227 bnx2x_ilt_wr(bp, i, 0);
7228}
7229
7230static void bnx2x_reset_port(struct bnx2x *bp)
7231{
7232 int port = BP_PORT(bp);
7233 u32 val;
7234
7235 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7236
7237 /* Do not rcv packets to BRB */
7238 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7239 /* Do not direct rcv packets that are not for MCP to the BRB */
7240 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7241 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7242
7243 /* Configure AEU */
7244 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7245
7246 msleep(100);
7247 /* Check for BRB port occupancy */
7248 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7249 if (val)
7250 DP(NETIF_MSG_IFDOWN,
33471629 7251 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7252
7253 /* TODO: Close Doorbell port? */
7254}
7255
34f80b04
EG
7256static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7257{
7258 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7259 BP_FUNC(bp), reset_code);
7260
7261 switch (reset_code) {
7262 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7263 bnx2x_reset_port(bp);
7264 bnx2x_reset_func(bp);
7265 bnx2x_reset_common(bp);
7266 break;
7267
7268 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7269 bnx2x_reset_port(bp);
7270 bnx2x_reset_func(bp);
7271 break;
7272
7273 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7274 bnx2x_reset_func(bp);
7275 break;
49d66772 7276
34f80b04
EG
7277 default:
7278 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7279 break;
7280 }
7281}
7282
33471629 7283/* must be called with rtnl_lock */
34f80b04 7284static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7285{
da5a662a 7286 int port = BP_PORT(bp);
a2fbb9ea 7287 u32 reset_code = 0;
da5a662a 7288 int i, cnt, rc;
a2fbb9ea
ET
7289
7290 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7291
228241eb
ET
7292 bp->rx_mode = BNX2X_RX_MODE_NONE;
7293 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7294
f8ef6e44 7295 bnx2x_netif_stop(bp, 1);
e94d8af3 7296
34f80b04
EG
7297 del_timer_sync(&bp->timer);
7298 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7299 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7300 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7301
70b9986c
EG
7302 /* Release IRQs */
7303 bnx2x_free_irq(bp);
7304
555f6c78
EG
7305 /* Wait until tx fastpath tasks complete */
7306 for_each_tx_queue(bp, i) {
228241eb
ET
7307 struct bnx2x_fastpath *fp = &bp->fp[i];
7308
34f80b04 7309 cnt = 1000;
e8b5fc51 7310 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7311
7961f791 7312 bnx2x_tx_int(fp);
34f80b04
EG
7313 if (!cnt) {
7314 BNX2X_ERR("timeout waiting for queue[%d]\n",
7315 i);
7316#ifdef BNX2X_STOP_ON_ERROR
7317 bnx2x_panic();
7318 return -EBUSY;
7319#else
7320 break;
7321#endif
7322 }
7323 cnt--;
da5a662a 7324 msleep(1);
34f80b04 7325 }
228241eb 7326 }
da5a662a
VZ
7327 /* Give HW time to discard old tx messages */
7328 msleep(1);
a2fbb9ea 7329
3101c2bc
YG
7330 if (CHIP_IS_E1(bp)) {
7331 struct mac_configuration_cmd *config =
7332 bnx2x_sp(bp, mcast_config);
7333
7334 bnx2x_set_mac_addr_e1(bp, 0);
7335
8d9c5f34 7336 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7337 CAM_INVALIDATE(config->config_table[i]);
7338
8d9c5f34 7339 config->hdr.length = i;
3101c2bc
YG
7340 if (CHIP_REV_IS_SLOW(bp))
7341 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7342 else
7343 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7344 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7345 config->hdr.reserved1 = 0;
7346
7347 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7348 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7349 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7350
7351 } else { /* E1H */
65abd74d
YG
7352 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7353
3101c2bc
YG
7354 bnx2x_set_mac_addr_e1h(bp, 0);
7355
7356 for (i = 0; i < MC_HASH_SIZE; i++)
7357 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7358
7359 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7360 }
7361
65abd74d
YG
7362 if (unload_mode == UNLOAD_NORMAL)
7363 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7364
7d0446c2 7365 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7366 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7367
7d0446c2 7368 else if (bp->wol) {
65abd74d
YG
7369 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7370 u8 *mac_addr = bp->dev->dev_addr;
7371 u32 val;
7372 /* The mac address is written to entries 1-4 to
7373 preserve entry 0 which is used by the PMF */
7374 u8 entry = (BP_E1HVN(bp) + 1)*8;
7375
7376 val = (mac_addr[0] << 8) | mac_addr[1];
7377 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7378
7379 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7380 (mac_addr[4] << 8) | mac_addr[5];
7381 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7382
7383 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7384
7385 } else
7386 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7387
34f80b04
EG
7388 /* Close multi and leading connections
7389 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7390 for_each_nondefault_queue(bp, i)
7391 if (bnx2x_stop_multi(bp, i))
228241eb 7392 goto unload_error;
a2fbb9ea 7393
da5a662a
VZ
7394 rc = bnx2x_stop_leading(bp);
7395 if (rc) {
34f80b04 7396 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7397#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7398 return -EBUSY;
da5a662a
VZ
7399#else
7400 goto unload_error;
34f80b04 7401#endif
228241eb
ET
7402 }
7403
7404unload_error:
34f80b04 7405 if (!BP_NOMCP(bp))
228241eb 7406 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7407 else {
f5372251 7408 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7409 load_count[0], load_count[1], load_count[2]);
7410 load_count[0]--;
da5a662a 7411 load_count[1 + port]--;
f5372251 7412 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7413 load_count[0], load_count[1], load_count[2]);
7414 if (load_count[0] == 0)
7415 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7416 else if (load_count[1 + port] == 0)
34f80b04
EG
7417 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7418 else
7419 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7420 }
a2fbb9ea 7421
34f80b04
EG
7422 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7423 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7424 bnx2x__link_reset(bp);
a2fbb9ea
ET
7425
7426 /* Reset the chip */
228241eb 7427 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7428
7429 /* Report UNLOAD_DONE to MCP */
34f80b04 7430 if (!BP_NOMCP(bp))
a2fbb9ea 7431 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7432
9a035440 7433 bp->port.pmf = 0;
a2fbb9ea 7434
7a9b2557 7435 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7436 bnx2x_free_skbs(bp);
555f6c78 7437 for_each_rx_queue(bp, i)
3196a88a 7438 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7439 for_each_rx_queue(bp, i)
7cde1c8b 7440 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7441 bnx2x_free_mem(bp);
7442
7443 bp->state = BNX2X_STATE_CLOSED;
228241eb 7444
a2fbb9ea
ET
7445 netif_carrier_off(bp->dev);
7446
7447 return 0;
7448}
7449
34f80b04
EG
7450static void bnx2x_reset_task(struct work_struct *work)
7451{
7452 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7453
7454#ifdef BNX2X_STOP_ON_ERROR
7455 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7456 " so reset not done to allow debug dump,\n"
ad361c98 7457 " you will need to reboot when done\n");
34f80b04
EG
7458 return;
7459#endif
7460
7461 rtnl_lock();
7462
7463 if (!netif_running(bp->dev))
7464 goto reset_task_exit;
7465
7466 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7467 bnx2x_nic_load(bp, LOAD_NORMAL);
7468
7469reset_task_exit:
7470 rtnl_unlock();
7471}
7472
a2fbb9ea
ET
7473/* end of nic load/unload */
7474
7475/* ethtool_ops */
7476
7477/*
7478 * Init service functions
7479 */
7480
f1ef27ef
EG
7481static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7482{
7483 switch (func) {
7484 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7485 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7486 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7487 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7488 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7489 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7490 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7491 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7492 default:
7493 BNX2X_ERR("Unsupported function index: %d\n", func);
7494 return (u32)(-1);
7495 }
7496}
7497
7498static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7499{
7500 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7501
7502 /* Flush all outstanding writes */
7503 mmiowb();
7504
7505 /* Pretend to be function 0 */
7506 REG_WR(bp, reg, 0);
7507 /* Flush the GRC transaction (in the chip) */
7508 new_val = REG_RD(bp, reg);
7509 if (new_val != 0) {
7510 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7511 new_val);
7512 BUG();
7513 }
7514
7515 /* From now we are in the "like-E1" mode */
7516 bnx2x_int_disable(bp);
7517
7518 /* Flush all outstanding writes */
7519 mmiowb();
7520
7521 /* Restore the original funtion settings */
7522 REG_WR(bp, reg, orig_func);
7523 new_val = REG_RD(bp, reg);
7524 if (new_val != orig_func) {
7525 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7526 orig_func, new_val);
7527 BUG();
7528 }
7529}
7530
7531static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7532{
7533 if (CHIP_IS_E1H(bp))
7534 bnx2x_undi_int_disable_e1h(bp, func);
7535 else
7536 bnx2x_int_disable(bp);
7537}
7538
34f80b04
EG
7539static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7540{
7541 u32 val;
7542
7543 /* Check if there is any driver already loaded */
7544 val = REG_RD(bp, MISC_REG_UNPREPARED);
7545 if (val == 0x1) {
7546 /* Check if it is the UNDI driver
7547 * UNDI driver initializes CID offset for normal bell to 0x7
7548 */
4a37fb66 7549 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7550 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7551 if (val == 0x7) {
7552 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7553 /* save our func */
34f80b04 7554 int func = BP_FUNC(bp);
da5a662a
VZ
7555 u32 swap_en;
7556 u32 swap_val;
34f80b04 7557
b4661739
EG
7558 /* clear the UNDI indication */
7559 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7560
34f80b04
EG
7561 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7562
7563 /* try unload UNDI on port 0 */
7564 bp->func = 0;
da5a662a
VZ
7565 bp->fw_seq =
7566 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7567 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7568 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7569
7570 /* if UNDI is loaded on the other port */
7571 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7572
da5a662a
VZ
7573 /* send "DONE" for previous unload */
7574 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7575
7576 /* unload UNDI on port 1 */
34f80b04 7577 bp->func = 1;
da5a662a
VZ
7578 bp->fw_seq =
7579 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7580 DRV_MSG_SEQ_NUMBER_MASK);
7581 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7582
7583 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7584 }
7585
b4661739
EG
7586 /* now it's safe to release the lock */
7587 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7588
f1ef27ef 7589 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7590
7591 /* close input traffic and wait for it */
7592 /* Do not rcv packets to BRB */
7593 REG_WR(bp,
7594 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7595 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7596 /* Do not direct rcv packets that are not for MCP to
7597 * the BRB */
7598 REG_WR(bp,
7599 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7600 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7601 /* clear AEU */
7602 REG_WR(bp,
7603 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7604 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7605 msleep(10);
7606
7607 /* save NIG port swap info */
7608 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7609 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7610 /* reset device */
7611 REG_WR(bp,
7612 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7613 0xd3ffffff);
34f80b04
EG
7614 REG_WR(bp,
7615 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7616 0x1403);
da5a662a
VZ
7617 /* take the NIG out of reset and restore swap values */
7618 REG_WR(bp,
7619 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7620 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7621 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7622 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7623
7624 /* send unload done to the MCP */
7625 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7626
7627 /* restore our func and fw_seq */
7628 bp->func = func;
7629 bp->fw_seq =
7630 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7631 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7632
7633 } else
7634 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7635 }
7636}
7637
7638static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7639{
7640 u32 val, val2, val3, val4, id;
72ce58c3 7641 u16 pmc;
34f80b04
EG
7642
7643 /* Get the chip revision id and number. */
7644 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7645 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7646 id = ((val & 0xffff) << 16);
7647 val = REG_RD(bp, MISC_REG_CHIP_REV);
7648 id |= ((val & 0xf) << 12);
7649 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7650 id |= ((val & 0xff) << 4);
5a40e08e 7651 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7652 id |= (val & 0xf);
7653 bp->common.chip_id = id;
7654 bp->link_params.chip_id = bp->common.chip_id;
7655 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7656
1c06328c
EG
7657 val = (REG_RD(bp, 0x2874) & 0x55);
7658 if ((bp->common.chip_id & 0x1) ||
7659 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7660 bp->flags |= ONE_PORT_FLAG;
7661 BNX2X_DEV_INFO("single port device\n");
7662 }
7663
34f80b04
EG
7664 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7665 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7666 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7667 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7668 bp->common.flash_size, bp->common.flash_size);
7669
7670 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7671 bp->link_params.shmem_base = bp->common.shmem_base;
7672 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7673
7674 if (!bp->common.shmem_base ||
7675 (bp->common.shmem_base < 0xA0000) ||
7676 (bp->common.shmem_base >= 0xC0000)) {
7677 BNX2X_DEV_INFO("MCP not active\n");
7678 bp->flags |= NO_MCP_FLAG;
7679 return;
7680 }
7681
7682 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7683 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7684 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7685 BNX2X_ERR("BAD MCP validity signature\n");
7686
7687 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7688 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7689
7690 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7691 SHARED_HW_CFG_LED_MODE_MASK) >>
7692 SHARED_HW_CFG_LED_MODE_SHIFT);
7693
c2c8b03e
EG
7694 bp->link_params.feature_config_flags = 0;
7695 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7696 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7697 bp->link_params.feature_config_flags |=
7698 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7699 else
7700 bp->link_params.feature_config_flags &=
7701 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7702
34f80b04
EG
7703 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7704 bp->common.bc_ver = val;
7705 BNX2X_DEV_INFO("bc_ver %X\n", val);
7706 if (val < BNX2X_BC_VER) {
7707 /* for now only warn
7708 * later we might need to enforce this */
7709 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7710 " please upgrade BC\n", BNX2X_BC_VER, val);
7711 }
4d295db0
EG
7712 bp->link_params.feature_config_flags |=
7713 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7714 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
7715
7716 if (BP_E1HVN(bp) == 0) {
7717 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7718 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7719 } else {
7720 /* no WOL capability for E1HVN != 0 */
7721 bp->flags |= NO_WOL_FLAG;
7722 }
7723 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7724 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7725
7726 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7727 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7728 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7729 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7730
7731 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7732 val, val2, val3, val4);
7733}
7734
7735static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7736 u32 switch_cfg)
a2fbb9ea 7737{
34f80b04 7738 int port = BP_PORT(bp);
a2fbb9ea
ET
7739 u32 ext_phy_type;
7740
a2fbb9ea
ET
7741 switch (switch_cfg) {
7742 case SWITCH_CFG_1G:
7743 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7744
c18487ee
YR
7745 ext_phy_type =
7746 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7747 switch (ext_phy_type) {
7748 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7749 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7750 ext_phy_type);
7751
34f80b04
EG
7752 bp->port.supported |= (SUPPORTED_10baseT_Half |
7753 SUPPORTED_10baseT_Full |
7754 SUPPORTED_100baseT_Half |
7755 SUPPORTED_100baseT_Full |
7756 SUPPORTED_1000baseT_Full |
7757 SUPPORTED_2500baseX_Full |
7758 SUPPORTED_TP |
7759 SUPPORTED_FIBRE |
7760 SUPPORTED_Autoneg |
7761 SUPPORTED_Pause |
7762 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7763 break;
7764
7765 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7766 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7767 ext_phy_type);
7768
34f80b04
EG
7769 bp->port.supported |= (SUPPORTED_10baseT_Half |
7770 SUPPORTED_10baseT_Full |
7771 SUPPORTED_100baseT_Half |
7772 SUPPORTED_100baseT_Full |
7773 SUPPORTED_1000baseT_Full |
7774 SUPPORTED_TP |
7775 SUPPORTED_FIBRE |
7776 SUPPORTED_Autoneg |
7777 SUPPORTED_Pause |
7778 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7779 break;
7780
7781 default:
7782 BNX2X_ERR("NVRAM config error. "
7783 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7784 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7785 return;
7786 }
7787
34f80b04
EG
7788 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7789 port*0x10);
7790 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7791 break;
7792
7793 case SWITCH_CFG_10G:
7794 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7795
c18487ee
YR
7796 ext_phy_type =
7797 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7798 switch (ext_phy_type) {
7799 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7800 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7801 ext_phy_type);
7802
34f80b04
EG
7803 bp->port.supported |= (SUPPORTED_10baseT_Half |
7804 SUPPORTED_10baseT_Full |
7805 SUPPORTED_100baseT_Half |
7806 SUPPORTED_100baseT_Full |
7807 SUPPORTED_1000baseT_Full |
7808 SUPPORTED_2500baseX_Full |
7809 SUPPORTED_10000baseT_Full |
7810 SUPPORTED_TP |
7811 SUPPORTED_FIBRE |
7812 SUPPORTED_Autoneg |
7813 SUPPORTED_Pause |
7814 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7815 break;
7816
589abe3a
EG
7817 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7818 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7819 ext_phy_type);
f1410647 7820
34f80b04 7821 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7822 SUPPORTED_1000baseT_Full |
34f80b04 7823 SUPPORTED_FIBRE |
589abe3a 7824 SUPPORTED_Autoneg |
34f80b04
EG
7825 SUPPORTED_Pause |
7826 SUPPORTED_Asym_Pause);
f1410647
ET
7827 break;
7828
589abe3a
EG
7829 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7830 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7831 ext_phy_type);
7832
34f80b04 7833 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7834 SUPPORTED_2500baseX_Full |
34f80b04 7835 SUPPORTED_1000baseT_Full |
589abe3a
EG
7836 SUPPORTED_FIBRE |
7837 SUPPORTED_Autoneg |
7838 SUPPORTED_Pause |
7839 SUPPORTED_Asym_Pause);
7840 break;
7841
7842 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7843 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7844 ext_phy_type);
7845
7846 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7847 SUPPORTED_FIBRE |
7848 SUPPORTED_Pause |
7849 SUPPORTED_Asym_Pause);
f1410647
ET
7850 break;
7851
589abe3a
EG
7852 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7853 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7854 ext_phy_type);
7855
34f80b04
EG
7856 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7857 SUPPORTED_1000baseT_Full |
7858 SUPPORTED_FIBRE |
34f80b04
EG
7859 SUPPORTED_Pause |
7860 SUPPORTED_Asym_Pause);
f1410647
ET
7861 break;
7862
589abe3a
EG
7863 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7864 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7865 ext_phy_type);
7866
34f80b04 7867 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7868 SUPPORTED_1000baseT_Full |
34f80b04 7869 SUPPORTED_Autoneg |
589abe3a 7870 SUPPORTED_FIBRE |
34f80b04
EG
7871 SUPPORTED_Pause |
7872 SUPPORTED_Asym_Pause);
c18487ee
YR
7873 break;
7874
4d295db0
EG
7875 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7876 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7877 ext_phy_type);
7878
7879 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7880 SUPPORTED_1000baseT_Full |
7881 SUPPORTED_Autoneg |
7882 SUPPORTED_FIBRE |
7883 SUPPORTED_Pause |
7884 SUPPORTED_Asym_Pause);
7885 break;
7886
f1410647
ET
7887 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7888 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7889 ext_phy_type);
7890
34f80b04
EG
7891 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7892 SUPPORTED_TP |
7893 SUPPORTED_Autoneg |
7894 SUPPORTED_Pause |
7895 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7896 break;
7897
28577185
EG
7898 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7899 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7900 ext_phy_type);
7901
7902 bp->port.supported |= (SUPPORTED_10baseT_Half |
7903 SUPPORTED_10baseT_Full |
7904 SUPPORTED_100baseT_Half |
7905 SUPPORTED_100baseT_Full |
7906 SUPPORTED_1000baseT_Full |
7907 SUPPORTED_10000baseT_Full |
7908 SUPPORTED_TP |
7909 SUPPORTED_Autoneg |
7910 SUPPORTED_Pause |
7911 SUPPORTED_Asym_Pause);
7912 break;
7913
c18487ee
YR
7914 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7915 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7916 bp->link_params.ext_phy_config);
7917 break;
7918
a2fbb9ea
ET
7919 default:
7920 BNX2X_ERR("NVRAM config error. "
7921 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7922 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7923 return;
7924 }
7925
34f80b04
EG
7926 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7927 port*0x18);
7928 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7929
a2fbb9ea
ET
7930 break;
7931
7932 default:
7933 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7934 bp->port.link_config);
a2fbb9ea
ET
7935 return;
7936 }
34f80b04 7937 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7938
7939 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7940 if (!(bp->link_params.speed_cap_mask &
7941 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7942 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7943
c18487ee
YR
7944 if (!(bp->link_params.speed_cap_mask &
7945 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7946 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7947
c18487ee
YR
7948 if (!(bp->link_params.speed_cap_mask &
7949 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7950 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7951
c18487ee
YR
7952 if (!(bp->link_params.speed_cap_mask &
7953 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7954 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7955
c18487ee
YR
7956 if (!(bp->link_params.speed_cap_mask &
7957 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7958 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7959 SUPPORTED_1000baseT_Full);
a2fbb9ea 7960
c18487ee
YR
7961 if (!(bp->link_params.speed_cap_mask &
7962 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7963 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7964
c18487ee
YR
7965 if (!(bp->link_params.speed_cap_mask &
7966 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7967 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7968
34f80b04 7969 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7970}
7971
34f80b04 7972static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7973{
c18487ee 7974 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7975
34f80b04 7976 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7977 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7978 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7979 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7980 bp->port.advertising = bp->port.supported;
a2fbb9ea 7981 } else {
c18487ee
YR
7982 u32 ext_phy_type =
7983 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7984
7985 if ((ext_phy_type ==
7986 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7987 (ext_phy_type ==
7988 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7989 /* force 10G, no AN */
c18487ee 7990 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7991 bp->port.advertising =
a2fbb9ea
ET
7992 (ADVERTISED_10000baseT_Full |
7993 ADVERTISED_FIBRE);
7994 break;
7995 }
7996 BNX2X_ERR("NVRAM config error. "
7997 "Invalid link_config 0x%x"
7998 " Autoneg not supported\n",
34f80b04 7999 bp->port.link_config);
a2fbb9ea
ET
8000 return;
8001 }
8002 break;
8003
8004 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8005 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8006 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8007 bp->port.advertising = (ADVERTISED_10baseT_Full |
8008 ADVERTISED_TP);
a2fbb9ea
ET
8009 } else {
8010 BNX2X_ERR("NVRAM config error. "
8011 "Invalid link_config 0x%x"
8012 " speed_cap_mask 0x%x\n",
34f80b04 8013 bp->port.link_config,
c18487ee 8014 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8015 return;
8016 }
8017 break;
8018
8019 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8020 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8021 bp->link_params.req_line_speed = SPEED_10;
8022 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8023 bp->port.advertising = (ADVERTISED_10baseT_Half |
8024 ADVERTISED_TP);
a2fbb9ea
ET
8025 } else {
8026 BNX2X_ERR("NVRAM config error. "
8027 "Invalid link_config 0x%x"
8028 " speed_cap_mask 0x%x\n",
34f80b04 8029 bp->port.link_config,
c18487ee 8030 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8031 return;
8032 }
8033 break;
8034
8035 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8036 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8037 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8038 bp->port.advertising = (ADVERTISED_100baseT_Full |
8039 ADVERTISED_TP);
a2fbb9ea
ET
8040 } else {
8041 BNX2X_ERR("NVRAM config error. "
8042 "Invalid link_config 0x%x"
8043 " speed_cap_mask 0x%x\n",
34f80b04 8044 bp->port.link_config,
c18487ee 8045 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8046 return;
8047 }
8048 break;
8049
8050 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8051 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8052 bp->link_params.req_line_speed = SPEED_100;
8053 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8054 bp->port.advertising = (ADVERTISED_100baseT_Half |
8055 ADVERTISED_TP);
a2fbb9ea
ET
8056 } else {
8057 BNX2X_ERR("NVRAM config error. "
8058 "Invalid link_config 0x%x"
8059 " speed_cap_mask 0x%x\n",
34f80b04 8060 bp->port.link_config,
c18487ee 8061 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8062 return;
8063 }
8064 break;
8065
8066 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8067 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8068 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8069 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8070 ADVERTISED_TP);
a2fbb9ea
ET
8071 } else {
8072 BNX2X_ERR("NVRAM config error. "
8073 "Invalid link_config 0x%x"
8074 " speed_cap_mask 0x%x\n",
34f80b04 8075 bp->port.link_config,
c18487ee 8076 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8077 return;
8078 }
8079 break;
8080
8081 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8082 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8083 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8084 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8085 ADVERTISED_TP);
a2fbb9ea
ET
8086 } else {
8087 BNX2X_ERR("NVRAM config error. "
8088 "Invalid link_config 0x%x"
8089 " speed_cap_mask 0x%x\n",
34f80b04 8090 bp->port.link_config,
c18487ee 8091 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8092 return;
8093 }
8094 break;
8095
8096 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8097 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8098 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8099 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8100 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8101 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8102 ADVERTISED_FIBRE);
a2fbb9ea
ET
8103 } else {
8104 BNX2X_ERR("NVRAM config error. "
8105 "Invalid link_config 0x%x"
8106 " speed_cap_mask 0x%x\n",
34f80b04 8107 bp->port.link_config,
c18487ee 8108 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8109 return;
8110 }
8111 break;
8112
8113 default:
8114 BNX2X_ERR("NVRAM config error. "
8115 "BAD link speed link_config 0x%x\n",
34f80b04 8116 bp->port.link_config);
c18487ee 8117 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8118 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8119 break;
8120 }
a2fbb9ea 8121
34f80b04
EG
8122 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8123 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8124 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8125 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8126 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8127
c18487ee 8128 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8129 " advertising 0x%x\n",
c18487ee
YR
8130 bp->link_params.req_line_speed,
8131 bp->link_params.req_duplex,
34f80b04 8132 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8133}
8134
34f80b04 8135static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8136{
34f80b04
EG
8137 int port = BP_PORT(bp);
8138 u32 val, val2;
589abe3a 8139 u32 config;
c2c8b03e 8140 u16 i;
a2fbb9ea 8141
c18487ee 8142 bp->link_params.bp = bp;
34f80b04 8143 bp->link_params.port = port;
c18487ee 8144
c18487ee 8145 bp->link_params.lane_config =
a2fbb9ea 8146 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8147 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8148 SHMEM_RD(bp,
8149 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8150 /* BCM8727_NOC => BCM8727 no over current */
8151 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8152 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8153 bp->link_params.ext_phy_config &=
8154 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8155 bp->link_params.ext_phy_config |=
8156 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8157 bp->link_params.feature_config_flags |=
8158 FEATURE_CONFIG_BCM8727_NOC;
8159 }
8160
c18487ee 8161 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8162 SHMEM_RD(bp,
8163 dev_info.port_hw_config[port].speed_capability_mask);
8164
34f80b04 8165 bp->port.link_config =
a2fbb9ea
ET
8166 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8167
c2c8b03e
EG
8168 /* Get the 4 lanes xgxs config rx and tx */
8169 for (i = 0; i < 2; i++) {
8170 val = SHMEM_RD(bp,
8171 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8172 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8173 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8174
8175 val = SHMEM_RD(bp,
8176 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8177 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8178 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8179 }
8180
3ce2c3f9
EG
8181 /* If the device is capable of WoL, set the default state according
8182 * to the HW
8183 */
4d295db0 8184 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8185 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8186 (config & PORT_FEATURE_WOL_ENABLED));
8187
c2c8b03e
EG
8188 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8189 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8190 bp->link_params.lane_config,
8191 bp->link_params.ext_phy_config,
34f80b04 8192 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8193
4d295db0
EG
8194 bp->link_params.switch_cfg |= (bp->port.link_config &
8195 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8196 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8197
8198 bnx2x_link_settings_requested(bp);
8199
8200 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8201 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8202 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8203 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8204 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8205 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8206 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8207 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8208 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8209 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8210}
8211
8212static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8213{
8214 int func = BP_FUNC(bp);
8215 u32 val, val2;
8216 int rc = 0;
a2fbb9ea 8217
34f80b04 8218 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8219
34f80b04
EG
8220 bp->e1hov = 0;
8221 bp->e1hmf = 0;
8222 if (CHIP_IS_E1H(bp)) {
8223 bp->mf_config =
8224 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8225
3196a88a
EG
8226 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8227 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8228 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8229
34f80b04
EG
8230 bp->e1hov = val;
8231 bp->e1hmf = 1;
8232 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8233 "(0x%04x)\n",
8234 func, bp->e1hov, bp->e1hov);
8235 } else {
f5372251 8236 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8237 if (BP_E1HVN(bp)) {
8238 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8239 " aborting\n", func);
8240 rc = -EPERM;
8241 }
8242 }
8243 }
a2fbb9ea 8244
34f80b04
EG
8245 if (!BP_NOMCP(bp)) {
8246 bnx2x_get_port_hwinfo(bp);
8247
8248 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8249 DRV_MSG_SEQ_NUMBER_MASK);
8250 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8251 }
8252
8253 if (IS_E1HMF(bp)) {
8254 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8255 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8256 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8257 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8258 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8259 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8260 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8261 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8262 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8263 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8264 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8265 ETH_ALEN);
8266 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8267 ETH_ALEN);
a2fbb9ea 8268 }
34f80b04
EG
8269
8270 return rc;
a2fbb9ea
ET
8271 }
8272
34f80b04
EG
8273 if (BP_NOMCP(bp)) {
8274 /* only supposed to happen on emulation/FPGA */
33471629 8275 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8276 random_ether_addr(bp->dev->dev_addr);
8277 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8278 }
a2fbb9ea 8279
34f80b04
EG
8280 return rc;
8281}
8282
8283static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8284{
8285 int func = BP_FUNC(bp);
87942b46 8286 int timer_interval;
34f80b04
EG
8287 int rc;
8288
da5a662a
VZ
8289 /* Disable interrupt handling until HW is initialized */
8290 atomic_set(&bp->intr_sem, 1);
e1510706 8291 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8292
34f80b04 8293 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8294
1cf167f2 8295 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8296 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8297
8298 rc = bnx2x_get_hwinfo(bp);
8299
8300 /* need to reset chip if undi was active */
8301 if (!BP_NOMCP(bp))
8302 bnx2x_undi_unload(bp);
8303
8304 if (CHIP_REV_IS_FPGA(bp))
8305 printk(KERN_ERR PFX "FPGA detected\n");
8306
8307 if (BP_NOMCP(bp) && (func == 0))
8308 printk(KERN_ERR PFX
8309 "MCP disabled, must load devices in order!\n");
8310
555f6c78 8311 /* Set multi queue mode */
8badd27a
EG
8312 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8313 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8314 printk(KERN_ERR PFX
8badd27a 8315 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8316 multi_mode = ETH_RSS_MODE_DISABLED;
8317 }
8318 bp->multi_mode = multi_mode;
8319
8320
7a9b2557
VZ
8321 /* Set TPA flags */
8322 if (disable_tpa) {
8323 bp->flags &= ~TPA_ENABLE_FLAG;
8324 bp->dev->features &= ~NETIF_F_LRO;
8325 } else {
8326 bp->flags |= TPA_ENABLE_FLAG;
8327 bp->dev->features |= NETIF_F_LRO;
8328 }
8329
8d5726c4 8330 bp->mrrs = mrrs;
7a9b2557 8331
34f80b04
EG
8332 bp->tx_ring_size = MAX_TX_AVAIL;
8333 bp->rx_ring_size = MAX_RX_AVAIL;
8334
8335 bp->rx_csum = 1;
34f80b04
EG
8336
8337 bp->tx_ticks = 50;
8338 bp->rx_ticks = 25;
8339
87942b46
EG
8340 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8341 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8342
8343 init_timer(&bp->timer);
8344 bp->timer.expires = jiffies + bp->current_interval;
8345 bp->timer.data = (unsigned long) bp;
8346 bp->timer.function = bnx2x_timer;
8347
8348 return rc;
a2fbb9ea
ET
8349}
8350
8351/*
8352 * ethtool service functions
8353 */
8354
8355/* All ethtool functions called with rtnl_lock */
8356
8357static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8358{
8359 struct bnx2x *bp = netdev_priv(dev);
8360
34f80b04
EG
8361 cmd->supported = bp->port.supported;
8362 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8363
8364 if (netif_carrier_ok(dev)) {
c18487ee
YR
8365 cmd->speed = bp->link_vars.line_speed;
8366 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8367 } else {
c18487ee
YR
8368 cmd->speed = bp->link_params.req_line_speed;
8369 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8370 }
34f80b04
EG
8371 if (IS_E1HMF(bp)) {
8372 u16 vn_max_rate;
8373
8374 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8375 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8376 if (vn_max_rate < cmd->speed)
8377 cmd->speed = vn_max_rate;
8378 }
a2fbb9ea 8379
c18487ee
YR
8380 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8381 u32 ext_phy_type =
8382 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8383
8384 switch (ext_phy_type) {
8385 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8387 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8388 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8389 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8390 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8391 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8392 cmd->port = PORT_FIBRE;
8393 break;
8394
8395 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8396 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8397 cmd->port = PORT_TP;
8398 break;
8399
c18487ee
YR
8400 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8401 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8402 bp->link_params.ext_phy_config);
8403 break;
8404
f1410647
ET
8405 default:
8406 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8407 bp->link_params.ext_phy_config);
8408 break;
f1410647
ET
8409 }
8410 } else
a2fbb9ea 8411 cmd->port = PORT_TP;
a2fbb9ea 8412
34f80b04 8413 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8414 cmd->transceiver = XCVR_INTERNAL;
8415
c18487ee 8416 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8417 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8418 else
a2fbb9ea 8419 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8420
8421 cmd->maxtxpkt = 0;
8422 cmd->maxrxpkt = 0;
8423
8424 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8425 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8426 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8427 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8428 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8429 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8430 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8431
8432 return 0;
8433}
8434
8435static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8436{
8437 struct bnx2x *bp = netdev_priv(dev);
8438 u32 advertising;
8439
34f80b04
EG
8440 if (IS_E1HMF(bp))
8441 return 0;
8442
a2fbb9ea
ET
8443 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8444 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8445 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8446 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8447 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8448 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8449 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8450
a2fbb9ea 8451 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8452 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8453 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8454 return -EINVAL;
f1410647 8455 }
a2fbb9ea
ET
8456
8457 /* advertise the requested speed and duplex if supported */
34f80b04 8458 cmd->advertising &= bp->port.supported;
a2fbb9ea 8459
c18487ee
YR
8460 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8461 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8462 bp->port.advertising |= (ADVERTISED_Autoneg |
8463 cmd->advertising);
a2fbb9ea
ET
8464
8465 } else { /* forced speed */
8466 /* advertise the requested speed and duplex if supported */
8467 switch (cmd->speed) {
8468 case SPEED_10:
8469 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8470 if (!(bp->port.supported &
f1410647
ET
8471 SUPPORTED_10baseT_Full)) {
8472 DP(NETIF_MSG_LINK,
8473 "10M full not supported\n");
a2fbb9ea 8474 return -EINVAL;
f1410647 8475 }
a2fbb9ea
ET
8476
8477 advertising = (ADVERTISED_10baseT_Full |
8478 ADVERTISED_TP);
8479 } else {
34f80b04 8480 if (!(bp->port.supported &
f1410647
ET
8481 SUPPORTED_10baseT_Half)) {
8482 DP(NETIF_MSG_LINK,
8483 "10M half not supported\n");
a2fbb9ea 8484 return -EINVAL;
f1410647 8485 }
a2fbb9ea
ET
8486
8487 advertising = (ADVERTISED_10baseT_Half |
8488 ADVERTISED_TP);
8489 }
8490 break;
8491
8492 case SPEED_100:
8493 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8494 if (!(bp->port.supported &
f1410647
ET
8495 SUPPORTED_100baseT_Full)) {
8496 DP(NETIF_MSG_LINK,
8497 "100M full not supported\n");
a2fbb9ea 8498 return -EINVAL;
f1410647 8499 }
a2fbb9ea
ET
8500
8501 advertising = (ADVERTISED_100baseT_Full |
8502 ADVERTISED_TP);
8503 } else {
34f80b04 8504 if (!(bp->port.supported &
f1410647
ET
8505 SUPPORTED_100baseT_Half)) {
8506 DP(NETIF_MSG_LINK,
8507 "100M half not supported\n");
a2fbb9ea 8508 return -EINVAL;
f1410647 8509 }
a2fbb9ea
ET
8510
8511 advertising = (ADVERTISED_100baseT_Half |
8512 ADVERTISED_TP);
8513 }
8514 break;
8515
8516 case SPEED_1000:
f1410647
ET
8517 if (cmd->duplex != DUPLEX_FULL) {
8518 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8519 return -EINVAL;
f1410647 8520 }
a2fbb9ea 8521
34f80b04 8522 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8523 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8524 return -EINVAL;
f1410647 8525 }
a2fbb9ea
ET
8526
8527 advertising = (ADVERTISED_1000baseT_Full |
8528 ADVERTISED_TP);
8529 break;
8530
8531 case SPEED_2500:
f1410647
ET
8532 if (cmd->duplex != DUPLEX_FULL) {
8533 DP(NETIF_MSG_LINK,
8534 "2.5G half not supported\n");
a2fbb9ea 8535 return -EINVAL;
f1410647 8536 }
a2fbb9ea 8537
34f80b04 8538 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8539 DP(NETIF_MSG_LINK,
8540 "2.5G full not supported\n");
a2fbb9ea 8541 return -EINVAL;
f1410647 8542 }
a2fbb9ea 8543
f1410647 8544 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8545 ADVERTISED_TP);
8546 break;
8547
8548 case SPEED_10000:
f1410647
ET
8549 if (cmd->duplex != DUPLEX_FULL) {
8550 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8551 return -EINVAL;
f1410647 8552 }
a2fbb9ea 8553
34f80b04 8554 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8555 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8556 return -EINVAL;
f1410647 8557 }
a2fbb9ea
ET
8558
8559 advertising = (ADVERTISED_10000baseT_Full |
8560 ADVERTISED_FIBRE);
8561 break;
8562
8563 default:
f1410647 8564 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8565 return -EINVAL;
8566 }
8567
c18487ee
YR
8568 bp->link_params.req_line_speed = cmd->speed;
8569 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8570 bp->port.advertising = advertising;
a2fbb9ea
ET
8571 }
8572
c18487ee 8573 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8574 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8575 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8576 bp->port.advertising);
a2fbb9ea 8577
34f80b04 8578 if (netif_running(dev)) {
bb2a0f7a 8579 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8580 bnx2x_link_set(bp);
8581 }
a2fbb9ea
ET
8582
8583 return 0;
8584}
8585
c18487ee
YR
8586#define PHY_FW_VER_LEN 10
8587
a2fbb9ea
ET
8588static void bnx2x_get_drvinfo(struct net_device *dev,
8589 struct ethtool_drvinfo *info)
8590{
8591 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8592 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8593
8594 strcpy(info->driver, DRV_MODULE_NAME);
8595 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8596
8597 phy_fw_ver[0] = '\0';
34f80b04 8598 if (bp->port.pmf) {
4a37fb66 8599 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8600 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8601 (bp->state != BNX2X_STATE_CLOSED),
8602 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8603 bnx2x_release_phy_lock(bp);
34f80b04 8604 }
c18487ee 8605
f0e53a84
EG
8606 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8607 (bp->common.bc_ver & 0xff0000) >> 16,
8608 (bp->common.bc_ver & 0xff00) >> 8,
8609 (bp->common.bc_ver & 0xff),
8610 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8611 strcpy(info->bus_info, pci_name(bp->pdev));
8612 info->n_stats = BNX2X_NUM_STATS;
8613 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8614 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8615 info->regdump_len = 0;
8616}
8617
0a64ea57
EG
8618#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8619#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8620
8621static int bnx2x_get_regs_len(struct net_device *dev)
8622{
8623 static u32 regdump_len;
8624 struct bnx2x *bp = netdev_priv(dev);
8625 int i;
8626
8627 if (regdump_len)
8628 return regdump_len;
8629
8630 if (CHIP_IS_E1(bp)) {
8631 for (i = 0; i < REGS_COUNT; i++)
8632 if (IS_E1_ONLINE(reg_addrs[i].info))
8633 regdump_len += reg_addrs[i].size;
8634
8635 for (i = 0; i < WREGS_COUNT_E1; i++)
8636 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8637 regdump_len += wreg_addrs_e1[i].size *
8638 (1 + wreg_addrs_e1[i].read_regs_count);
8639
8640 } else { /* E1H */
8641 for (i = 0; i < REGS_COUNT; i++)
8642 if (IS_E1H_ONLINE(reg_addrs[i].info))
8643 regdump_len += reg_addrs[i].size;
8644
8645 for (i = 0; i < WREGS_COUNT_E1H; i++)
8646 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8647 regdump_len += wreg_addrs_e1h[i].size *
8648 (1 + wreg_addrs_e1h[i].read_regs_count);
8649 }
8650 regdump_len *= 4;
8651 regdump_len += sizeof(struct dump_hdr);
8652
8653 return regdump_len;
8654}
8655
8656static void bnx2x_get_regs(struct net_device *dev,
8657 struct ethtool_regs *regs, void *_p)
8658{
8659 u32 *p = _p, i, j;
8660 struct bnx2x *bp = netdev_priv(dev);
8661 struct dump_hdr dump_hdr = {0};
8662
8663 regs->version = 0;
8664 memset(p, 0, regs->len);
8665
8666 if (!netif_running(bp->dev))
8667 return;
8668
8669 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8670 dump_hdr.dump_sign = dump_sign_all;
8671 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8672 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8673 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8674 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8675 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8676
8677 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8678 p += dump_hdr.hdr_size + 1;
8679
8680 if (CHIP_IS_E1(bp)) {
8681 for (i = 0; i < REGS_COUNT; i++)
8682 if (IS_E1_ONLINE(reg_addrs[i].info))
8683 for (j = 0; j < reg_addrs[i].size; j++)
8684 *p++ = REG_RD(bp,
8685 reg_addrs[i].addr + j*4);
8686
8687 } else { /* E1H */
8688 for (i = 0; i < REGS_COUNT; i++)
8689 if (IS_E1H_ONLINE(reg_addrs[i].info))
8690 for (j = 0; j < reg_addrs[i].size; j++)
8691 *p++ = REG_RD(bp,
8692 reg_addrs[i].addr + j*4);
8693 }
8694}
8695
a2fbb9ea
ET
8696static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8697{
8698 struct bnx2x *bp = netdev_priv(dev);
8699
8700 if (bp->flags & NO_WOL_FLAG) {
8701 wol->supported = 0;
8702 wol->wolopts = 0;
8703 } else {
8704 wol->supported = WAKE_MAGIC;
8705 if (bp->wol)
8706 wol->wolopts = WAKE_MAGIC;
8707 else
8708 wol->wolopts = 0;
8709 }
8710 memset(&wol->sopass, 0, sizeof(wol->sopass));
8711}
8712
8713static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8714{
8715 struct bnx2x *bp = netdev_priv(dev);
8716
8717 if (wol->wolopts & ~WAKE_MAGIC)
8718 return -EINVAL;
8719
8720 if (wol->wolopts & WAKE_MAGIC) {
8721 if (bp->flags & NO_WOL_FLAG)
8722 return -EINVAL;
8723
8724 bp->wol = 1;
34f80b04 8725 } else
a2fbb9ea 8726 bp->wol = 0;
34f80b04 8727
a2fbb9ea
ET
8728 return 0;
8729}
8730
8731static u32 bnx2x_get_msglevel(struct net_device *dev)
8732{
8733 struct bnx2x *bp = netdev_priv(dev);
8734
8735 return bp->msglevel;
8736}
8737
8738static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8739{
8740 struct bnx2x *bp = netdev_priv(dev);
8741
8742 if (capable(CAP_NET_ADMIN))
8743 bp->msglevel = level;
8744}
8745
8746static int bnx2x_nway_reset(struct net_device *dev)
8747{
8748 struct bnx2x *bp = netdev_priv(dev);
8749
34f80b04
EG
8750 if (!bp->port.pmf)
8751 return 0;
a2fbb9ea 8752
34f80b04 8753 if (netif_running(dev)) {
bb2a0f7a 8754 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8755 bnx2x_link_set(bp);
8756 }
a2fbb9ea
ET
8757
8758 return 0;
8759}
8760
01e53298
NO
8761static u32
8762bnx2x_get_link(struct net_device *dev)
8763{
8764 struct bnx2x *bp = netdev_priv(dev);
8765
8766 return bp->link_vars.link_up;
8767}
8768
a2fbb9ea
ET
8769static int bnx2x_get_eeprom_len(struct net_device *dev)
8770{
8771 struct bnx2x *bp = netdev_priv(dev);
8772
34f80b04 8773 return bp->common.flash_size;
a2fbb9ea
ET
8774}
8775
8776static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8777{
34f80b04 8778 int port = BP_PORT(bp);
a2fbb9ea
ET
8779 int count, i;
8780 u32 val = 0;
8781
8782 /* adjust timeout for emulation/FPGA */
8783 count = NVRAM_TIMEOUT_COUNT;
8784 if (CHIP_REV_IS_SLOW(bp))
8785 count *= 100;
8786
8787 /* request access to nvram interface */
8788 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8789 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8790
8791 for (i = 0; i < count*10; i++) {
8792 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8793 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8794 break;
8795
8796 udelay(5);
8797 }
8798
8799 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8800 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8801 return -EBUSY;
8802 }
8803
8804 return 0;
8805}
8806
8807static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8808{
34f80b04 8809 int port = BP_PORT(bp);
a2fbb9ea
ET
8810 int count, i;
8811 u32 val = 0;
8812
8813 /* adjust timeout for emulation/FPGA */
8814 count = NVRAM_TIMEOUT_COUNT;
8815 if (CHIP_REV_IS_SLOW(bp))
8816 count *= 100;
8817
8818 /* relinquish nvram interface */
8819 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8820 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8821
8822 for (i = 0; i < count*10; i++) {
8823 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8824 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8825 break;
8826
8827 udelay(5);
8828 }
8829
8830 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8831 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8832 return -EBUSY;
8833 }
8834
8835 return 0;
8836}
8837
8838static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8839{
8840 u32 val;
8841
8842 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8843
8844 /* enable both bits, even on read */
8845 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8846 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8847 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8848}
8849
8850static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8851{
8852 u32 val;
8853
8854 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8855
8856 /* disable both bits, even after read */
8857 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8858 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8859 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8860}
8861
4781bfad 8862static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8863 u32 cmd_flags)
8864{
f1410647 8865 int count, i, rc;
a2fbb9ea
ET
8866 u32 val;
8867
8868 /* build the command word */
8869 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8870
8871 /* need to clear DONE bit separately */
8872 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8873
8874 /* address of the NVRAM to read from */
8875 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8876 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8877
8878 /* issue a read command */
8879 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8880
8881 /* adjust timeout for emulation/FPGA */
8882 count = NVRAM_TIMEOUT_COUNT;
8883 if (CHIP_REV_IS_SLOW(bp))
8884 count *= 100;
8885
8886 /* wait for completion */
8887 *ret_val = 0;
8888 rc = -EBUSY;
8889 for (i = 0; i < count; i++) {
8890 udelay(5);
8891 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8892
8893 if (val & MCPR_NVM_COMMAND_DONE) {
8894 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8895 /* we read nvram data in cpu order
8896 * but ethtool sees it as an array of bytes
8897 * converting to big-endian will do the work */
4781bfad 8898 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8899 rc = 0;
8900 break;
8901 }
8902 }
8903
8904 return rc;
8905}
8906
8907static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8908 int buf_size)
8909{
8910 int rc;
8911 u32 cmd_flags;
4781bfad 8912 __be32 val;
a2fbb9ea
ET
8913
8914 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8915 DP(BNX2X_MSG_NVM,
c14423fe 8916 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8917 offset, buf_size);
8918 return -EINVAL;
8919 }
8920
34f80b04
EG
8921 if (offset + buf_size > bp->common.flash_size) {
8922 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8923 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8924 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8925 return -EINVAL;
8926 }
8927
8928 /* request access to nvram interface */
8929 rc = bnx2x_acquire_nvram_lock(bp);
8930 if (rc)
8931 return rc;
8932
8933 /* enable access to nvram interface */
8934 bnx2x_enable_nvram_access(bp);
8935
8936 /* read the first word(s) */
8937 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8938 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8939 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8940 memcpy(ret_buf, &val, 4);
8941
8942 /* advance to the next dword */
8943 offset += sizeof(u32);
8944 ret_buf += sizeof(u32);
8945 buf_size -= sizeof(u32);
8946 cmd_flags = 0;
8947 }
8948
8949 if (rc == 0) {
8950 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8951 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8952 memcpy(ret_buf, &val, 4);
8953 }
8954
8955 /* disable access to nvram interface */
8956 bnx2x_disable_nvram_access(bp);
8957 bnx2x_release_nvram_lock(bp);
8958
8959 return rc;
8960}
8961
8962static int bnx2x_get_eeprom(struct net_device *dev,
8963 struct ethtool_eeprom *eeprom, u8 *eebuf)
8964{
8965 struct bnx2x *bp = netdev_priv(dev);
8966 int rc;
8967
2add3acb
EG
8968 if (!netif_running(dev))
8969 return -EAGAIN;
8970
34f80b04 8971 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8972 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8973 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8974 eeprom->len, eeprom->len);
8975
8976 /* parameters already validated in ethtool_get_eeprom */
8977
8978 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8979
8980 return rc;
8981}
8982
8983static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8984 u32 cmd_flags)
8985{
f1410647 8986 int count, i, rc;
a2fbb9ea
ET
8987
8988 /* build the command word */
8989 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8990
8991 /* need to clear DONE bit separately */
8992 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8993
8994 /* write the data */
8995 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8996
8997 /* address of the NVRAM to write to */
8998 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8999 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9000
9001 /* issue the write command */
9002 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9003
9004 /* adjust timeout for emulation/FPGA */
9005 count = NVRAM_TIMEOUT_COUNT;
9006 if (CHIP_REV_IS_SLOW(bp))
9007 count *= 100;
9008
9009 /* wait for completion */
9010 rc = -EBUSY;
9011 for (i = 0; i < count; i++) {
9012 udelay(5);
9013 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9014 if (val & MCPR_NVM_COMMAND_DONE) {
9015 rc = 0;
9016 break;
9017 }
9018 }
9019
9020 return rc;
9021}
9022
f1410647 9023#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9024
9025static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9026 int buf_size)
9027{
9028 int rc;
9029 u32 cmd_flags;
9030 u32 align_offset;
4781bfad 9031 __be32 val;
a2fbb9ea 9032
34f80b04
EG
9033 if (offset + buf_size > bp->common.flash_size) {
9034 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9035 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9036 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9037 return -EINVAL;
9038 }
9039
9040 /* request access to nvram interface */
9041 rc = bnx2x_acquire_nvram_lock(bp);
9042 if (rc)
9043 return rc;
9044
9045 /* enable access to nvram interface */
9046 bnx2x_enable_nvram_access(bp);
9047
9048 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9049 align_offset = (offset & ~0x03);
9050 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9051
9052 if (rc == 0) {
9053 val &= ~(0xff << BYTE_OFFSET(offset));
9054 val |= (*data_buf << BYTE_OFFSET(offset));
9055
9056 /* nvram data is returned as an array of bytes
9057 * convert it back to cpu order */
9058 val = be32_to_cpu(val);
9059
a2fbb9ea
ET
9060 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9061 cmd_flags);
9062 }
9063
9064 /* disable access to nvram interface */
9065 bnx2x_disable_nvram_access(bp);
9066 bnx2x_release_nvram_lock(bp);
9067
9068 return rc;
9069}
9070
9071static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9072 int buf_size)
9073{
9074 int rc;
9075 u32 cmd_flags;
9076 u32 val;
9077 u32 written_so_far;
9078
34f80b04 9079 if (buf_size == 1) /* ethtool */
a2fbb9ea 9080 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9081
9082 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9083 DP(BNX2X_MSG_NVM,
c14423fe 9084 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9085 offset, buf_size);
9086 return -EINVAL;
9087 }
9088
34f80b04
EG
9089 if (offset + buf_size > bp->common.flash_size) {
9090 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9091 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9092 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9093 return -EINVAL;
9094 }
9095
9096 /* request access to nvram interface */
9097 rc = bnx2x_acquire_nvram_lock(bp);
9098 if (rc)
9099 return rc;
9100
9101 /* enable access to nvram interface */
9102 bnx2x_enable_nvram_access(bp);
9103
9104 written_so_far = 0;
9105 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9106 while ((written_so_far < buf_size) && (rc == 0)) {
9107 if (written_so_far == (buf_size - sizeof(u32)))
9108 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9109 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9110 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9111 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9112 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9113
9114 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9115
9116 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9117
9118 /* advance to the next dword */
9119 offset += sizeof(u32);
9120 data_buf += sizeof(u32);
9121 written_so_far += sizeof(u32);
9122 cmd_flags = 0;
9123 }
9124
9125 /* disable access to nvram interface */
9126 bnx2x_disable_nvram_access(bp);
9127 bnx2x_release_nvram_lock(bp);
9128
9129 return rc;
9130}
9131
9132static int bnx2x_set_eeprom(struct net_device *dev,
9133 struct ethtool_eeprom *eeprom, u8 *eebuf)
9134{
9135 struct bnx2x *bp = netdev_priv(dev);
9136 int rc;
9137
9f4c9583
EG
9138 if (!netif_running(dev))
9139 return -EAGAIN;
9140
34f80b04 9141 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9142 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9143 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9144 eeprom->len, eeprom->len);
9145
9146 /* parameters already validated in ethtool_set_eeprom */
9147
c18487ee 9148 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
9149 if (eeprom->magic == 0x00504859)
9150 if (bp->port.pmf) {
9151
4a37fb66 9152 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9153 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9154 bp->link_params.ext_phy_config,
9155 (bp->state != BNX2X_STATE_CLOSED),
9156 eebuf, eeprom->len);
bb2a0f7a
YG
9157 if ((bp->state == BNX2X_STATE_OPEN) ||
9158 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9159 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9160 &bp->link_vars, 1);
34f80b04
EG
9161 rc |= bnx2x_phy_init(&bp->link_params,
9162 &bp->link_vars);
bb2a0f7a 9163 }
4a37fb66 9164 bnx2x_release_phy_lock(bp);
34f80b04
EG
9165
9166 } else /* Only the PMF can access the PHY */
9167 return -EINVAL;
9168 else
c18487ee 9169 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9170
9171 return rc;
9172}
9173
9174static int bnx2x_get_coalesce(struct net_device *dev,
9175 struct ethtool_coalesce *coal)
9176{
9177 struct bnx2x *bp = netdev_priv(dev);
9178
9179 memset(coal, 0, sizeof(struct ethtool_coalesce));
9180
9181 coal->rx_coalesce_usecs = bp->rx_ticks;
9182 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9183
9184 return 0;
9185}
9186
9187static int bnx2x_set_coalesce(struct net_device *dev,
9188 struct ethtool_coalesce *coal)
9189{
9190 struct bnx2x *bp = netdev_priv(dev);
9191
9192 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
1e9d9987
EG
9193 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9194 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea
ET
9195
9196 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
1e9d9987
EG
9197 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9198 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 9199
34f80b04 9200 if (netif_running(dev))
a2fbb9ea
ET
9201 bnx2x_update_coalesce(bp);
9202
9203 return 0;
9204}
9205
9206static void bnx2x_get_ringparam(struct net_device *dev,
9207 struct ethtool_ringparam *ering)
9208{
9209 struct bnx2x *bp = netdev_priv(dev);
9210
9211 ering->rx_max_pending = MAX_RX_AVAIL;
9212 ering->rx_mini_max_pending = 0;
9213 ering->rx_jumbo_max_pending = 0;
9214
9215 ering->rx_pending = bp->rx_ring_size;
9216 ering->rx_mini_pending = 0;
9217 ering->rx_jumbo_pending = 0;
9218
9219 ering->tx_max_pending = MAX_TX_AVAIL;
9220 ering->tx_pending = bp->tx_ring_size;
9221}
9222
9223static int bnx2x_set_ringparam(struct net_device *dev,
9224 struct ethtool_ringparam *ering)
9225{
9226 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9227 int rc = 0;
a2fbb9ea
ET
9228
9229 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9230 (ering->tx_pending > MAX_TX_AVAIL) ||
9231 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9232 return -EINVAL;
9233
9234 bp->rx_ring_size = ering->rx_pending;
9235 bp->tx_ring_size = ering->tx_pending;
9236
34f80b04
EG
9237 if (netif_running(dev)) {
9238 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9239 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9240 }
9241
34f80b04 9242 return rc;
a2fbb9ea
ET
9243}
9244
9245static void bnx2x_get_pauseparam(struct net_device *dev,
9246 struct ethtool_pauseparam *epause)
9247{
9248 struct bnx2x *bp = netdev_priv(dev);
9249
356e2385
EG
9250 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9251 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9252 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9253
c0700f90
DM
9254 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9255 BNX2X_FLOW_CTRL_RX);
9256 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9257 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9258
9259 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9260 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9261 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9262}
9263
9264static int bnx2x_set_pauseparam(struct net_device *dev,
9265 struct ethtool_pauseparam *epause)
9266{
9267 struct bnx2x *bp = netdev_priv(dev);
9268
34f80b04
EG
9269 if (IS_E1HMF(bp))
9270 return 0;
9271
a2fbb9ea
ET
9272 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9273 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9274 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9275
c0700f90 9276 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9277
f1410647 9278 if (epause->rx_pause)
c0700f90 9279 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9280
f1410647 9281 if (epause->tx_pause)
c0700f90 9282 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9283
c0700f90
DM
9284 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9285 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9286
c18487ee 9287 if (epause->autoneg) {
34f80b04 9288 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9289 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9290 return -EINVAL;
9291 }
a2fbb9ea 9292
c18487ee 9293 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9294 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9295 }
a2fbb9ea 9296
c18487ee
YR
9297 DP(NETIF_MSG_LINK,
9298 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9299
9300 if (netif_running(dev)) {
bb2a0f7a 9301 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9302 bnx2x_link_set(bp);
9303 }
a2fbb9ea
ET
9304
9305 return 0;
9306}
9307
df0f2343
VZ
9308static int bnx2x_set_flags(struct net_device *dev, u32 data)
9309{
9310 struct bnx2x *bp = netdev_priv(dev);
9311 int changed = 0;
9312 int rc = 0;
9313
9314 /* TPA requires Rx CSUM offloading */
9315 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9316 if (!(dev->features & NETIF_F_LRO)) {
9317 dev->features |= NETIF_F_LRO;
9318 bp->flags |= TPA_ENABLE_FLAG;
9319 changed = 1;
9320 }
9321
9322 } else if (dev->features & NETIF_F_LRO) {
9323 dev->features &= ~NETIF_F_LRO;
9324 bp->flags &= ~TPA_ENABLE_FLAG;
9325 changed = 1;
9326 }
9327
9328 if (changed && netif_running(dev)) {
9329 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9330 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9331 }
9332
9333 return rc;
9334}
9335
a2fbb9ea
ET
9336static u32 bnx2x_get_rx_csum(struct net_device *dev)
9337{
9338 struct bnx2x *bp = netdev_priv(dev);
9339
9340 return bp->rx_csum;
9341}
9342
9343static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9344{
9345 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9346 int rc = 0;
a2fbb9ea
ET
9347
9348 bp->rx_csum = data;
df0f2343
VZ
9349
9350 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9351 TPA'ed packets will be discarded due to wrong TCP CSUM */
9352 if (!data) {
9353 u32 flags = ethtool_op_get_flags(dev);
9354
9355 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9356 }
9357
9358 return rc;
a2fbb9ea
ET
9359}
9360
9361static int bnx2x_set_tso(struct net_device *dev, u32 data)
9362{
755735eb 9363 if (data) {
a2fbb9ea 9364 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9365 dev->features |= NETIF_F_TSO6;
9366 } else {
a2fbb9ea 9367 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9368 dev->features &= ~NETIF_F_TSO6;
9369 }
9370
a2fbb9ea
ET
9371 return 0;
9372}
9373
f3c87cdd 9374static const struct {
a2fbb9ea
ET
9375 char string[ETH_GSTRING_LEN];
9376} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9377 { "register_test (offline)" },
9378 { "memory_test (offline)" },
9379 { "loopback_test (offline)" },
9380 { "nvram_test (online)" },
9381 { "interrupt_test (online)" },
9382 { "link_test (online)" },
d3d4f495 9383 { "idle check (online)" }
a2fbb9ea
ET
9384};
9385
9386static int bnx2x_self_test_count(struct net_device *dev)
9387{
9388 return BNX2X_NUM_TESTS;
9389}
9390
f3c87cdd
YG
9391static int bnx2x_test_registers(struct bnx2x *bp)
9392{
9393 int idx, i, rc = -ENODEV;
9394 u32 wr_val = 0;
9dabc424 9395 int port = BP_PORT(bp);
f3c87cdd
YG
9396 static const struct {
9397 u32 offset0;
9398 u32 offset1;
9399 u32 mask;
9400 } reg_tbl[] = {
9401/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9402 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9403 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9404 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9405 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9406 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9407 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9408 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9409 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9410 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9411/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9412 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9413 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9414 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9415 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9416 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9417 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9418 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9419 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9420 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9421/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9422 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9423 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9424 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9425 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9426 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9427 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9428 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9429 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9430 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9431/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9432 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9433 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9434 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9435 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9436 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9437 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9438
9439 { 0xffffffff, 0, 0x00000000 }
9440 };
9441
9442 if (!netif_running(bp->dev))
9443 return rc;
9444
9445 /* Repeat the test twice:
9446 First by writing 0x00000000, second by writing 0xffffffff */
9447 for (idx = 0; idx < 2; idx++) {
9448
9449 switch (idx) {
9450 case 0:
9451 wr_val = 0;
9452 break;
9453 case 1:
9454 wr_val = 0xffffffff;
9455 break;
9456 }
9457
9458 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9459 u32 offset, mask, save_val, val;
f3c87cdd
YG
9460
9461 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9462 mask = reg_tbl[i].mask;
9463
9464 save_val = REG_RD(bp, offset);
9465
9466 REG_WR(bp, offset, wr_val);
9467 val = REG_RD(bp, offset);
9468
9469 /* Restore the original register's value */
9470 REG_WR(bp, offset, save_val);
9471
9472 /* verify that value is as expected value */
9473 if ((val & mask) != (wr_val & mask))
9474 goto test_reg_exit;
9475 }
9476 }
9477
9478 rc = 0;
9479
9480test_reg_exit:
9481 return rc;
9482}
9483
9484static int bnx2x_test_memory(struct bnx2x *bp)
9485{
9486 int i, j, rc = -ENODEV;
9487 u32 val;
9488 static const struct {
9489 u32 offset;
9490 int size;
9491 } mem_tbl[] = {
9492 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9493 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9494 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9495 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9496 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9497 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9498 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9499
9500 { 0xffffffff, 0 }
9501 };
9502 static const struct {
9503 char *name;
9504 u32 offset;
9dabc424
YG
9505 u32 e1_mask;
9506 u32 e1h_mask;
f3c87cdd 9507 } prty_tbl[] = {
9dabc424
YG
9508 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9509 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9510 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9511 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9512 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9513 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9514
9515 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9516 };
9517
9518 if (!netif_running(bp->dev))
9519 return rc;
9520
9521 /* Go through all the memories */
9522 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9523 for (j = 0; j < mem_tbl[i].size; j++)
9524 REG_RD(bp, mem_tbl[i].offset + j*4);
9525
9526 /* Check the parity status */
9527 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9528 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9529 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9530 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9531 DP(NETIF_MSG_HW,
9532 "%s is 0x%x\n", prty_tbl[i].name, val);
9533 goto test_mem_exit;
9534 }
9535 }
9536
9537 rc = 0;
9538
9539test_mem_exit:
9540 return rc;
9541}
9542
f3c87cdd
YG
9543static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9544{
9545 int cnt = 1000;
9546
9547 if (link_up)
9548 while (bnx2x_link_test(bp) && cnt--)
9549 msleep(10);
9550}
9551
9552static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9553{
9554 unsigned int pkt_size, num_pkts, i;
9555 struct sk_buff *skb;
9556 unsigned char *packet;
9557 struct bnx2x_fastpath *fp = &bp->fp[0];
9558 u16 tx_start_idx, tx_idx;
9559 u16 rx_start_idx, rx_idx;
9560 u16 pkt_prod;
9561 struct sw_tx_bd *tx_buf;
9562 struct eth_tx_bd *tx_bd;
9563 dma_addr_t mapping;
9564 union eth_rx_cqe *cqe;
9565 u8 cqe_fp_flags;
9566 struct sw_rx_bd *rx_buf;
9567 u16 len;
9568 int rc = -ENODEV;
9569
b5bf9068
EG
9570 /* check the loopback mode */
9571 switch (loopback_mode) {
9572 case BNX2X_PHY_LOOPBACK:
9573 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9574 return -EINVAL;
9575 break;
9576 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9577 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9578 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9579 break;
9580 default:
f3c87cdd 9581 return -EINVAL;
b5bf9068 9582 }
f3c87cdd 9583
b5bf9068
EG
9584 /* prepare the loopback packet */
9585 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9586 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9587 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9588 if (!skb) {
9589 rc = -ENOMEM;
9590 goto test_loopback_exit;
9591 }
9592 packet = skb_put(skb, pkt_size);
9593 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9594 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9595 for (i = ETH_HLEN; i < pkt_size; i++)
9596 packet[i] = (unsigned char) (i & 0xff);
9597
b5bf9068 9598 /* send the loopback packet */
f3c87cdd
YG
9599 num_pkts = 0;
9600 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9601 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9602
9603 pkt_prod = fp->tx_pkt_prod++;
9604 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9605 tx_buf->first_bd = fp->tx_bd_prod;
9606 tx_buf->skb = skb;
9607
9608 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9609 mapping = pci_map_single(bp->pdev, skb->data,
9610 skb_headlen(skb), PCI_DMA_TODEVICE);
9611 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9612 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9613 tx_bd->nbd = cpu_to_le16(1);
9614 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9615 tx_bd->vlan = cpu_to_le16(pkt_prod);
9616 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9617 ETH_TX_BD_FLAGS_END_BD);
9618 tx_bd->general_data = ((UNICAST_ADDRESS <<
9619 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9620
58f4c4cf
EG
9621 wmb();
9622
4781bfad 9623 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9624 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9625 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9626 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9627
9628 mmiowb();
9629
9630 num_pkts++;
9631 fp->tx_bd_prod++;
9632 bp->dev->trans_start = jiffies;
9633
9634 udelay(100);
9635
9636 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9637 if (tx_idx != tx_start_idx + num_pkts)
9638 goto test_loopback_exit;
9639
9640 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9641 if (rx_idx != rx_start_idx + num_pkts)
9642 goto test_loopback_exit;
9643
9644 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9645 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9646 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9647 goto test_loopback_rx_exit;
9648
9649 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9650 if (len != pkt_size)
9651 goto test_loopback_rx_exit;
9652
9653 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9654 skb = rx_buf->skb;
9655 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9656 for (i = ETH_HLEN; i < pkt_size; i++)
9657 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9658 goto test_loopback_rx_exit;
9659
9660 rc = 0;
9661
9662test_loopback_rx_exit:
f3c87cdd
YG
9663
9664 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9665 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9666 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9667 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9668
9669 /* Update producers */
9670 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9671 fp->rx_sge_prod);
f3c87cdd
YG
9672
9673test_loopback_exit:
9674 bp->link_params.loopback_mode = LOOPBACK_NONE;
9675
9676 return rc;
9677}
9678
9679static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9680{
b5bf9068 9681 int rc = 0, res;
f3c87cdd
YG
9682
9683 if (!netif_running(bp->dev))
9684 return BNX2X_LOOPBACK_FAILED;
9685
f8ef6e44 9686 bnx2x_netif_stop(bp, 1);
3910c8ae 9687 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9688
b5bf9068
EG
9689 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9690 if (res) {
9691 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9692 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9693 }
9694
b5bf9068
EG
9695 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9696 if (res) {
9697 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9698 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9699 }
9700
3910c8ae 9701 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9702 bnx2x_netif_start(bp);
9703
9704 return rc;
9705}
9706
9707#define CRC32_RESIDUAL 0xdebb20e3
9708
9709static int bnx2x_test_nvram(struct bnx2x *bp)
9710{
9711 static const struct {
9712 int offset;
9713 int size;
9714 } nvram_tbl[] = {
9715 { 0, 0x14 }, /* bootstrap */
9716 { 0x14, 0xec }, /* dir */
9717 { 0x100, 0x350 }, /* manuf_info */
9718 { 0x450, 0xf0 }, /* feature_info */
9719 { 0x640, 0x64 }, /* upgrade_key_info */
9720 { 0x6a4, 0x64 },
9721 { 0x708, 0x70 }, /* manuf_key_info */
9722 { 0x778, 0x70 },
9723 { 0, 0 }
9724 };
4781bfad 9725 __be32 buf[0x350 / 4];
f3c87cdd
YG
9726 u8 *data = (u8 *)buf;
9727 int i, rc;
9728 u32 magic, csum;
9729
9730 rc = bnx2x_nvram_read(bp, 0, data, 4);
9731 if (rc) {
f5372251 9732 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9733 goto test_nvram_exit;
9734 }
9735
9736 magic = be32_to_cpu(buf[0]);
9737 if (magic != 0x669955aa) {
9738 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9739 rc = -ENODEV;
9740 goto test_nvram_exit;
9741 }
9742
9743 for (i = 0; nvram_tbl[i].size; i++) {
9744
9745 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9746 nvram_tbl[i].size);
9747 if (rc) {
9748 DP(NETIF_MSG_PROBE,
f5372251 9749 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9750 goto test_nvram_exit;
9751 }
9752
9753 csum = ether_crc_le(nvram_tbl[i].size, data);
9754 if (csum != CRC32_RESIDUAL) {
9755 DP(NETIF_MSG_PROBE,
9756 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9757 rc = -ENODEV;
9758 goto test_nvram_exit;
9759 }
9760 }
9761
9762test_nvram_exit:
9763 return rc;
9764}
9765
9766static int bnx2x_test_intr(struct bnx2x *bp)
9767{
9768 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9769 int i, rc;
9770
9771 if (!netif_running(bp->dev))
9772 return -ENODEV;
9773
8d9c5f34 9774 config->hdr.length = 0;
af246401
EG
9775 if (CHIP_IS_E1(bp))
9776 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9777 else
9778 config->hdr.offset = BP_FUNC(bp);
0626b899 9779 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9780 config->hdr.reserved1 = 0;
9781
9782 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9783 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9784 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9785 if (rc == 0) {
9786 bp->set_mac_pending++;
9787 for (i = 0; i < 10; i++) {
9788 if (!bp->set_mac_pending)
9789 break;
9790 msleep_interruptible(10);
9791 }
9792 if (i == 10)
9793 rc = -ENODEV;
9794 }
9795
9796 return rc;
9797}
9798
a2fbb9ea
ET
9799static void bnx2x_self_test(struct net_device *dev,
9800 struct ethtool_test *etest, u64 *buf)
9801{
9802 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9803
9804 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9805
f3c87cdd 9806 if (!netif_running(dev))
a2fbb9ea 9807 return;
a2fbb9ea 9808
33471629 9809 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9810 if (IS_E1HMF(bp))
9811 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9812
9813 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
9814 int port = BP_PORT(bp);
9815 u32 val;
f3c87cdd
YG
9816 u8 link_up;
9817
279abdf5
EG
9818 /* save current value of input enable for TX port IF */
9819 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9820 /* disable input for TX port IF */
9821 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9822
f3c87cdd
YG
9823 link_up = bp->link_vars.link_up;
9824 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9825 bnx2x_nic_load(bp, LOAD_DIAG);
9826 /* wait until link state is restored */
9827 bnx2x_wait_for_link(bp, link_up);
9828
9829 if (bnx2x_test_registers(bp) != 0) {
9830 buf[0] = 1;
9831 etest->flags |= ETH_TEST_FL_FAILED;
9832 }
9833 if (bnx2x_test_memory(bp) != 0) {
9834 buf[1] = 1;
9835 etest->flags |= ETH_TEST_FL_FAILED;
9836 }
9837 buf[2] = bnx2x_test_loopback(bp, link_up);
9838 if (buf[2] != 0)
9839 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9840
f3c87cdd 9841 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
9842
9843 /* restore input for TX port IF */
9844 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9845
f3c87cdd
YG
9846 bnx2x_nic_load(bp, LOAD_NORMAL);
9847 /* wait until link state is restored */
9848 bnx2x_wait_for_link(bp, link_up);
9849 }
9850 if (bnx2x_test_nvram(bp) != 0) {
9851 buf[3] = 1;
a2fbb9ea
ET
9852 etest->flags |= ETH_TEST_FL_FAILED;
9853 }
f3c87cdd
YG
9854 if (bnx2x_test_intr(bp) != 0) {
9855 buf[4] = 1;
9856 etest->flags |= ETH_TEST_FL_FAILED;
9857 }
9858 if (bp->port.pmf)
9859 if (bnx2x_link_test(bp) != 0) {
9860 buf[5] = 1;
9861 etest->flags |= ETH_TEST_FL_FAILED;
9862 }
f3c87cdd
YG
9863
9864#ifdef BNX2X_EXTRA_DEBUG
9865 bnx2x_panic_dump(bp);
9866#endif
a2fbb9ea
ET
9867}
9868
de832a55
EG
9869static const struct {
9870 long offset;
9871 int size;
9872 u8 string[ETH_GSTRING_LEN];
9873} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9874/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9875 { Q_STATS_OFFSET32(error_bytes_received_hi),
9876 8, "[%d]: rx_error_bytes" },
9877 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9878 8, "[%d]: rx_ucast_packets" },
9879 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9880 8, "[%d]: rx_mcast_packets" },
9881 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9882 8, "[%d]: rx_bcast_packets" },
9883 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9884 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9885 4, "[%d]: rx_phy_ip_err_discards"},
9886 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9887 4, "[%d]: rx_skb_alloc_discard" },
9888 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9889
9890/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9891 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9892 8, "[%d]: tx_packets" }
9893};
9894
bb2a0f7a
YG
9895static const struct {
9896 long offset;
9897 int size;
9898 u32 flags;
66e855f3
YG
9899#define STATS_FLAGS_PORT 1
9900#define STATS_FLAGS_FUNC 2
de832a55 9901#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9902 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9903} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9904/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9905 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9906 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9907 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9908 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9909 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9910 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9911 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9912 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9913 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9914 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9915 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9916 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9917 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9918 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9919 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9920 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9921 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9922/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9923 8, STATS_FLAGS_PORT, "rx_fragments" },
9924 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9925 8, STATS_FLAGS_PORT, "rx_jabbers" },
9926 { STATS_OFFSET32(no_buff_discard_hi),
9927 8, STATS_FLAGS_BOTH, "rx_discards" },
9928 { STATS_OFFSET32(mac_filter_discard),
9929 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9930 { STATS_OFFSET32(xxoverflow_discard),
9931 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9932 { STATS_OFFSET32(brb_drop_hi),
9933 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9934 { STATS_OFFSET32(brb_truncate_hi),
9935 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9936 { STATS_OFFSET32(pause_frames_received_hi),
9937 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9938 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9939 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9940 { STATS_OFFSET32(nig_timer_max),
9941 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9942/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9943 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9944 { STATS_OFFSET32(rx_skb_alloc_failed),
9945 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9946 { STATS_OFFSET32(hw_csum_err),
9947 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9948
9949 { STATS_OFFSET32(total_bytes_transmitted_hi),
9950 8, STATS_FLAGS_BOTH, "tx_bytes" },
9951 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9952 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9953 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9954 8, STATS_FLAGS_BOTH, "tx_packets" },
9955 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9956 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9957 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9958 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9959 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9960 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9961 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9962 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9963/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9964 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9965 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9966 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9967 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9968 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9969 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9970 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9971 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9972 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9973 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9974 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9975 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9976 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9977 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9978 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9979 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9980 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9981 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9982 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9983/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9984 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9985 { STATS_OFFSET32(pause_frames_sent_hi),
9986 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9987};
9988
de832a55
EG
9989#define IS_PORT_STAT(i) \
9990 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9991#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9992#define IS_E1HMF_MODE_STAT(bp) \
9993 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9994
a2fbb9ea
ET
9995static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9996{
bb2a0f7a 9997 struct bnx2x *bp = netdev_priv(dev);
de832a55 9998 int i, j, k;
bb2a0f7a 9999
a2fbb9ea
ET
10000 switch (stringset) {
10001 case ETH_SS_STATS:
de832a55
EG
10002 if (is_multi(bp)) {
10003 k = 0;
10004 for_each_queue(bp, i) {
10005 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10006 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10007 bnx2x_q_stats_arr[j].string, i);
10008 k += BNX2X_NUM_Q_STATS;
10009 }
10010 if (IS_E1HMF_MODE_STAT(bp))
10011 break;
10012 for (j = 0; j < BNX2X_NUM_STATS; j++)
10013 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10014 bnx2x_stats_arr[j].string);
10015 } else {
10016 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10017 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10018 continue;
10019 strcpy(buf + j*ETH_GSTRING_LEN,
10020 bnx2x_stats_arr[i].string);
10021 j++;
10022 }
bb2a0f7a 10023 }
a2fbb9ea
ET
10024 break;
10025
10026 case ETH_SS_TEST:
10027 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10028 break;
10029 }
10030}
10031
10032static int bnx2x_get_stats_count(struct net_device *dev)
10033{
bb2a0f7a 10034 struct bnx2x *bp = netdev_priv(dev);
de832a55 10035 int i, num_stats;
bb2a0f7a 10036
de832a55
EG
10037 if (is_multi(bp)) {
10038 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
10039 if (!IS_E1HMF_MODE_STAT(bp))
10040 num_stats += BNX2X_NUM_STATS;
10041 } else {
10042 if (IS_E1HMF_MODE_STAT(bp)) {
10043 num_stats = 0;
10044 for (i = 0; i < BNX2X_NUM_STATS; i++)
10045 if (IS_FUNC_STAT(i))
10046 num_stats++;
10047 } else
10048 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10049 }
de832a55 10050
bb2a0f7a 10051 return num_stats;
a2fbb9ea
ET
10052}
10053
10054static void bnx2x_get_ethtool_stats(struct net_device *dev,
10055 struct ethtool_stats *stats, u64 *buf)
10056{
10057 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10058 u32 *hw_stats, *offset;
10059 int i, j, k;
bb2a0f7a 10060
de832a55
EG
10061 if (is_multi(bp)) {
10062 k = 0;
10063 for_each_queue(bp, i) {
10064 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10065 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10066 if (bnx2x_q_stats_arr[j].size == 0) {
10067 /* skip this counter */
10068 buf[k + j] = 0;
10069 continue;
10070 }
10071 offset = (hw_stats +
10072 bnx2x_q_stats_arr[j].offset);
10073 if (bnx2x_q_stats_arr[j].size == 4) {
10074 /* 4-byte counter */
10075 buf[k + j] = (u64) *offset;
10076 continue;
10077 }
10078 /* 8-byte counter */
10079 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10080 }
10081 k += BNX2X_NUM_Q_STATS;
10082 }
10083 if (IS_E1HMF_MODE_STAT(bp))
10084 return;
10085 hw_stats = (u32 *)&bp->eth_stats;
10086 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10087 if (bnx2x_stats_arr[j].size == 0) {
10088 /* skip this counter */
10089 buf[k + j] = 0;
10090 continue;
10091 }
10092 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10093 if (bnx2x_stats_arr[j].size == 4) {
10094 /* 4-byte counter */
10095 buf[k + j] = (u64) *offset;
10096 continue;
10097 }
10098 /* 8-byte counter */
10099 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10100 }
de832a55
EG
10101 } else {
10102 hw_stats = (u32 *)&bp->eth_stats;
10103 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10104 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10105 continue;
10106 if (bnx2x_stats_arr[i].size == 0) {
10107 /* skip this counter */
10108 buf[j] = 0;
10109 j++;
10110 continue;
10111 }
10112 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10113 if (bnx2x_stats_arr[i].size == 4) {
10114 /* 4-byte counter */
10115 buf[j] = (u64) *offset;
10116 j++;
10117 continue;
10118 }
10119 /* 8-byte counter */
10120 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10121 j++;
a2fbb9ea 10122 }
a2fbb9ea
ET
10123 }
10124}
10125
10126static int bnx2x_phys_id(struct net_device *dev, u32 data)
10127{
10128 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10129 int port = BP_PORT(bp);
a2fbb9ea
ET
10130 int i;
10131
34f80b04
EG
10132 if (!netif_running(dev))
10133 return 0;
10134
10135 if (!bp->port.pmf)
10136 return 0;
10137
a2fbb9ea
ET
10138 if (data == 0)
10139 data = 2;
10140
10141 for (i = 0; i < (data * 2); i++) {
c18487ee 10142 if ((i % 2) == 0)
34f80b04 10143 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10144 bp->link_params.hw_led_mode,
10145 bp->link_params.chip_id);
10146 else
34f80b04 10147 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10148 bp->link_params.hw_led_mode,
10149 bp->link_params.chip_id);
10150
a2fbb9ea
ET
10151 msleep_interruptible(500);
10152 if (signal_pending(current))
10153 break;
10154 }
10155
c18487ee 10156 if (bp->link_vars.link_up)
34f80b04 10157 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10158 bp->link_vars.line_speed,
10159 bp->link_params.hw_led_mode,
10160 bp->link_params.chip_id);
a2fbb9ea
ET
10161
10162 return 0;
10163}
10164
10165static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10166 .get_settings = bnx2x_get_settings,
10167 .set_settings = bnx2x_set_settings,
10168 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10169 .get_regs_len = bnx2x_get_regs_len,
10170 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10171 .get_wol = bnx2x_get_wol,
10172 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10173 .get_msglevel = bnx2x_get_msglevel,
10174 .set_msglevel = bnx2x_set_msglevel,
10175 .nway_reset = bnx2x_nway_reset,
01e53298 10176 .get_link = bnx2x_get_link,
7a9b2557
VZ
10177 .get_eeprom_len = bnx2x_get_eeprom_len,
10178 .get_eeprom = bnx2x_get_eeprom,
10179 .set_eeprom = bnx2x_set_eeprom,
10180 .get_coalesce = bnx2x_get_coalesce,
10181 .set_coalesce = bnx2x_set_coalesce,
10182 .get_ringparam = bnx2x_get_ringparam,
10183 .set_ringparam = bnx2x_set_ringparam,
10184 .get_pauseparam = bnx2x_get_pauseparam,
10185 .set_pauseparam = bnx2x_set_pauseparam,
10186 .get_rx_csum = bnx2x_get_rx_csum,
10187 .set_rx_csum = bnx2x_set_rx_csum,
10188 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10189 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10190 .set_flags = bnx2x_set_flags,
10191 .get_flags = ethtool_op_get_flags,
10192 .get_sg = ethtool_op_get_sg,
10193 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10194 .get_tso = ethtool_op_get_tso,
10195 .set_tso = bnx2x_set_tso,
10196 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10197 .self_test = bnx2x_self_test,
10198 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10199 .phys_id = bnx2x_phys_id,
10200 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10201 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10202};
10203
10204/* end of ethtool_ops */
10205
10206/****************************************************************************
10207* General service functions
10208****************************************************************************/
10209
10210static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10211{
10212 u16 pmcsr;
10213
10214 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10215
10216 switch (state) {
10217 case PCI_D0:
34f80b04 10218 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10219 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10220 PCI_PM_CTRL_PME_STATUS));
10221
10222 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10223 /* delay required during transition out of D3hot */
a2fbb9ea 10224 msleep(20);
34f80b04 10225 break;
a2fbb9ea 10226
34f80b04
EG
10227 case PCI_D3hot:
10228 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10229 pmcsr |= 3;
a2fbb9ea 10230
34f80b04
EG
10231 if (bp->wol)
10232 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10233
34f80b04
EG
10234 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10235 pmcsr);
a2fbb9ea 10236
34f80b04
EG
10237 /* No more memory access after this point until
10238 * device is brought back to D0.
10239 */
10240 break;
10241
10242 default:
10243 return -EINVAL;
10244 }
10245 return 0;
a2fbb9ea
ET
10246}
10247
237907c1
EG
10248static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10249{
10250 u16 rx_cons_sb;
10251
10252 /* Tell compiler that status block fields can change */
10253 barrier();
10254 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10255 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10256 rx_cons_sb++;
10257 return (fp->rx_comp_cons != rx_cons_sb);
10258}
10259
34f80b04
EG
10260/*
10261 * net_device service functions
10262 */
10263
a2fbb9ea
ET
10264static int bnx2x_poll(struct napi_struct *napi, int budget)
10265{
10266 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10267 napi);
10268 struct bnx2x *bp = fp->bp;
10269 int work_done = 0;
10270
10271#ifdef BNX2X_STOP_ON_ERROR
10272 if (unlikely(bp->panic))
34f80b04 10273 goto poll_panic;
a2fbb9ea
ET
10274#endif
10275
10276 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10277 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10278 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10279
10280 bnx2x_update_fpsb_idx(fp);
10281
237907c1 10282 if (bnx2x_has_tx_work(fp))
7961f791 10283 bnx2x_tx_int(fp);
a2fbb9ea 10284
8534f32c 10285 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10286 work_done = bnx2x_rx_int(fp, budget);
356e2385 10287
8534f32c
EG
10288 /* must not complete if we consumed full budget */
10289 if (work_done >= budget)
10290 goto poll_again;
10291 }
a2fbb9ea 10292
8534f32c
EG
10293 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10294 * ensure that status block indices have been actually read
10295 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10296 * so that we won't write the "newer" value of the status block to IGU
10297 * (if there was a DMA right after BNX2X_HAS_WORK and
10298 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10299 * may be postponed to right before bnx2x_ack_sb). In this case
10300 * there will never be another interrupt until there is another update
10301 * of the status block, while there is still unhandled work.
10302 */
10303 rmb();
a2fbb9ea 10304
8534f32c 10305 if (!BNX2X_HAS_WORK(fp)) {
a2fbb9ea 10306#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10307poll_panic:
a2fbb9ea 10308#endif
288379f0 10309 napi_complete(napi);
a2fbb9ea 10310
0626b899 10311 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10312 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10313 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10314 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10315 }
356e2385 10316
8534f32c 10317poll_again:
a2fbb9ea
ET
10318 return work_done;
10319}
10320
755735eb
EG
10321
10322/* we split the first BD into headers and data BDs
33471629 10323 * to ease the pain of our fellow microcode engineers
755735eb
EG
10324 * we use one mapping for both BDs
10325 * So far this has only been observed to happen
10326 * in Other Operating Systems(TM)
10327 */
10328static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10329 struct bnx2x_fastpath *fp,
10330 struct eth_tx_bd **tx_bd, u16 hlen,
10331 u16 bd_prod, int nbd)
10332{
10333 struct eth_tx_bd *h_tx_bd = *tx_bd;
10334 struct eth_tx_bd *d_tx_bd;
10335 dma_addr_t mapping;
10336 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10337
10338 /* first fix first BD */
10339 h_tx_bd->nbd = cpu_to_le16(nbd);
10340 h_tx_bd->nbytes = cpu_to_le16(hlen);
10341
10342 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10343 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10344 h_tx_bd->addr_lo, h_tx_bd->nbd);
10345
10346 /* now get a new data BD
10347 * (after the pbd) and fill it */
10348 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10349 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10350
10351 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10352 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10353
10354 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10355 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10356 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10357 d_tx_bd->vlan = 0;
10358 /* this marks the BD as one that has no individual mapping
10359 * the FW ignores this flag in a BD not marked start
10360 */
10361 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10362 DP(NETIF_MSG_TX_QUEUED,
10363 "TSO split data size is %d (%x:%x)\n",
10364 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10365
10366 /* update tx_bd for marking the last BD flag */
10367 *tx_bd = d_tx_bd;
10368
10369 return bd_prod;
10370}
10371
10372static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10373{
10374 if (fix > 0)
10375 csum = (u16) ~csum_fold(csum_sub(csum,
10376 csum_partial(t_header - fix, fix, 0)));
10377
10378 else if (fix < 0)
10379 csum = (u16) ~csum_fold(csum_add(csum,
10380 csum_partial(t_header, -fix, 0)));
10381
10382 return swab16(csum);
10383}
10384
10385static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10386{
10387 u32 rc;
10388
10389 if (skb->ip_summed != CHECKSUM_PARTIAL)
10390 rc = XMIT_PLAIN;
10391
10392 else {
4781bfad 10393 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10394 rc = XMIT_CSUM_V6;
10395 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10396 rc |= XMIT_CSUM_TCP;
10397
10398 } else {
10399 rc = XMIT_CSUM_V4;
10400 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10401 rc |= XMIT_CSUM_TCP;
10402 }
10403 }
10404
10405 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10406 rc |= XMIT_GSO_V4;
10407
10408 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10409 rc |= XMIT_GSO_V6;
10410
10411 return rc;
10412}
10413
632da4d6 10414#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10415/* check if packet requires linearization (packet is too fragmented)
10416 no need to check fragmentation if page size > 8K (there will be no
10417 violation to FW restrictions) */
755735eb
EG
10418static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10419 u32 xmit_type)
10420{
10421 int to_copy = 0;
10422 int hlen = 0;
10423 int first_bd_sz = 0;
10424
10425 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10426 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10427
10428 if (xmit_type & XMIT_GSO) {
10429 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10430 /* Check if LSO packet needs to be copied:
10431 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10432 int wnd_size = MAX_FETCH_BD - 3;
33471629 10433 /* Number of windows to check */
755735eb
EG
10434 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10435 int wnd_idx = 0;
10436 int frag_idx = 0;
10437 u32 wnd_sum = 0;
10438
10439 /* Headers length */
10440 hlen = (int)(skb_transport_header(skb) - skb->data) +
10441 tcp_hdrlen(skb);
10442
10443 /* Amount of data (w/o headers) on linear part of SKB*/
10444 first_bd_sz = skb_headlen(skb) - hlen;
10445
10446 wnd_sum = first_bd_sz;
10447
10448 /* Calculate the first sum - it's special */
10449 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10450 wnd_sum +=
10451 skb_shinfo(skb)->frags[frag_idx].size;
10452
10453 /* If there was data on linear skb data - check it */
10454 if (first_bd_sz > 0) {
10455 if (unlikely(wnd_sum < lso_mss)) {
10456 to_copy = 1;
10457 goto exit_lbl;
10458 }
10459
10460 wnd_sum -= first_bd_sz;
10461 }
10462
10463 /* Others are easier: run through the frag list and
10464 check all windows */
10465 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10466 wnd_sum +=
10467 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10468
10469 if (unlikely(wnd_sum < lso_mss)) {
10470 to_copy = 1;
10471 break;
10472 }
10473 wnd_sum -=
10474 skb_shinfo(skb)->frags[wnd_idx].size;
10475 }
755735eb
EG
10476 } else {
10477 /* in non-LSO too fragmented packet should always
10478 be linearized */
10479 to_copy = 1;
10480 }
10481 }
10482
10483exit_lbl:
10484 if (unlikely(to_copy))
10485 DP(NETIF_MSG_TX_QUEUED,
10486 "Linearization IS REQUIRED for %s packet. "
10487 "num_frags %d hlen %d first_bd_sz %d\n",
10488 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10489 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10490
10491 return to_copy;
10492}
632da4d6 10493#endif
755735eb
EG
10494
10495/* called with netif_tx_lock
a2fbb9ea 10496 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10497 * netif_wake_queue()
a2fbb9ea
ET
10498 */
10499static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10500{
10501 struct bnx2x *bp = netdev_priv(dev);
10502 struct bnx2x_fastpath *fp;
555f6c78 10503 struct netdev_queue *txq;
a2fbb9ea
ET
10504 struct sw_tx_bd *tx_buf;
10505 struct eth_tx_bd *tx_bd;
10506 struct eth_tx_parse_bd *pbd = NULL;
10507 u16 pkt_prod, bd_prod;
755735eb 10508 int nbd, fp_index;
a2fbb9ea 10509 dma_addr_t mapping;
755735eb
EG
10510 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10511 int vlan_off = (bp->e1hov ? 4 : 0);
10512 int i;
10513 u8 hlen = 0;
a2fbb9ea
ET
10514
10515#ifdef BNX2X_STOP_ON_ERROR
10516 if (unlikely(bp->panic))
10517 return NETDEV_TX_BUSY;
10518#endif
10519
555f6c78
EG
10520 fp_index = skb_get_queue_mapping(skb);
10521 txq = netdev_get_tx_queue(dev, fp_index);
10522
a2fbb9ea 10523 fp = &bp->fp[fp_index];
755735eb 10524
231fd58a 10525 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10526 fp->eth_q_stats.driver_xoff++,
555f6c78 10527 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10528 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10529 return NETDEV_TX_BUSY;
10530 }
10531
755735eb
EG
10532 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10533 " gso type %x xmit_type %x\n",
10534 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10535 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10536
632da4d6 10537#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10538 /* First, check if we need to linearize the skb (due to FW
10539 restrictions). No need to check fragmentation if page size > 8K
10540 (there will be no violation to FW restrictions) */
755735eb
EG
10541 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10542 /* Statistics of linearization */
10543 bp->lin_cnt++;
10544 if (skb_linearize(skb) != 0) {
10545 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10546 "silently dropping this SKB\n");
10547 dev_kfree_skb_any(skb);
da5a662a 10548 return NETDEV_TX_OK;
755735eb
EG
10549 }
10550 }
632da4d6 10551#endif
755735eb 10552
a2fbb9ea 10553 /*
755735eb 10554 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10555 then for TSO or xsum we have a parsing info BD,
755735eb 10556 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10557 (don't forget to mark the last one as last,
10558 and to unmap only AFTER you write to the BD ...)
755735eb 10559 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10560 */
10561
10562 pkt_prod = fp->tx_pkt_prod++;
755735eb 10563 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10564
755735eb 10565 /* get a tx_buf and first BD */
a2fbb9ea
ET
10566 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10567 tx_bd = &fp->tx_desc_ring[bd_prod];
10568
10569 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10570 tx_bd->general_data = (UNICAST_ADDRESS <<
10571 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10572 /* header nbd */
10573 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10574
755735eb
EG
10575 /* remember the first BD of the packet */
10576 tx_buf->first_bd = fp->tx_bd_prod;
10577 tx_buf->skb = skb;
a2fbb9ea
ET
10578
10579 DP(NETIF_MSG_TX_QUEUED,
10580 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10581 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10582
0c6671b0
EG
10583#ifdef BCM_VLAN
10584 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10585 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10586 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10587 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10588 vlan_off += 4;
10589 } else
0c6671b0 10590#endif
755735eb 10591 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10592
755735eb 10593 if (xmit_type) {
755735eb 10594 /* turn on parsing and get a BD */
a2fbb9ea
ET
10595 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10596 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10597
10598 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10599 }
10600
10601 if (xmit_type & XMIT_CSUM) {
10602 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10603
10604 /* for now NS flag is not used in Linux */
4781bfad
EG
10605 pbd->global_data =
10606 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10607 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10608
755735eb
EG
10609 pbd->ip_hlen = (skb_transport_header(skb) -
10610 skb_network_header(skb)) / 2;
10611
10612 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10613
755735eb
EG
10614 pbd->total_hlen = cpu_to_le16(hlen);
10615 hlen = hlen*2 - vlan_off;
a2fbb9ea 10616
755735eb
EG
10617 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10618
10619 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10620 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10621 ETH_TX_BD_FLAGS_IP_CSUM;
10622 else
10623 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10624
10625 if (xmit_type & XMIT_CSUM_TCP) {
10626 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10627
10628 } else {
10629 s8 fix = SKB_CS_OFF(skb); /* signed! */
10630
a2fbb9ea 10631 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10632 pbd->cs_offset = fix / 2;
a2fbb9ea 10633
755735eb
EG
10634 DP(NETIF_MSG_TX_QUEUED,
10635 "hlen %d offset %d fix %d csum before fix %x\n",
10636 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10637 SKB_CS(skb));
10638
10639 /* HW bug: fixup the CSUM */
10640 pbd->tcp_pseudo_csum =
10641 bnx2x_csum_fix(skb_transport_header(skb),
10642 SKB_CS(skb), fix);
10643
10644 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10645 pbd->tcp_pseudo_csum);
10646 }
a2fbb9ea
ET
10647 }
10648
10649 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10650 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10651
10652 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10653 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10654 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10655 tx_bd->nbd = cpu_to_le16(nbd);
10656 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10657
10658 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10659 " nbytes %d flags %x vlan %x\n",
10660 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10661 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10662 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10663
755735eb 10664 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10665
10666 DP(NETIF_MSG_TX_QUEUED,
10667 "TSO packet len %d hlen %d total len %d tso size %d\n",
10668 skb->len, hlen, skb_headlen(skb),
10669 skb_shinfo(skb)->gso_size);
10670
10671 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10672
755735eb
EG
10673 if (unlikely(skb_headlen(skb) > hlen))
10674 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10675 bd_prod, ++nbd);
a2fbb9ea
ET
10676
10677 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10678 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10679 pbd->tcp_flags = pbd_tcp_flags(skb);
10680
10681 if (xmit_type & XMIT_GSO_V4) {
10682 pbd->ip_id = swab16(ip_hdr(skb)->id);
10683 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10684 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10685 ip_hdr(skb)->daddr,
10686 0, IPPROTO_TCP, 0));
755735eb
EG
10687
10688 } else
10689 pbd->tcp_pseudo_csum =
10690 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10691 &ipv6_hdr(skb)->daddr,
10692 0, IPPROTO_TCP, 0));
10693
a2fbb9ea
ET
10694 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10695 }
10696
755735eb
EG
10697 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10698 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10699
755735eb
EG
10700 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10701 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10702
755735eb
EG
10703 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10704 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10705
755735eb
EG
10706 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10707 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10708 tx_bd->nbytes = cpu_to_le16(frag->size);
10709 tx_bd->vlan = cpu_to_le16(pkt_prod);
10710 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10711
755735eb
EG
10712 DP(NETIF_MSG_TX_QUEUED,
10713 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10714 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10715 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10716 }
10717
755735eb 10718 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10719 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10720
10721 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10722 tx_bd, tx_bd->bd_flags.as_bitfield);
10723
a2fbb9ea
ET
10724 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10725
755735eb 10726 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10727 * if the packet contains or ends with it
10728 */
10729 if (TX_BD_POFF(bd_prod) < nbd)
10730 nbd++;
10731
10732 if (pbd)
10733 DP(NETIF_MSG_TX_QUEUED,
10734 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10735 " tcp_flags %x xsum %x seq %u hlen %u\n",
10736 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10737 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10738 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10739
755735eb 10740 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10741
58f4c4cf
EG
10742 /*
10743 * Make sure that the BD data is updated before updating the producer
10744 * since FW might read the BD right after the producer is updated.
10745 * This is only applicable for weak-ordered memory model archs such
10746 * as IA-64. The following barrier is also mandatory since FW will
10747 * assumes packets must have BDs.
10748 */
10749 wmb();
10750
4781bfad 10751 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10752 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10753 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10754 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10755
10756 mmiowb();
10757
755735eb 10758 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10759
10760 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10761 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10762 if we put Tx into XOFF state. */
10763 smp_mb();
555f6c78 10764 netif_tx_stop_queue(txq);
de832a55 10765 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10766 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10767 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10768 }
10769 fp->tx_pkt++;
10770
10771 return NETDEV_TX_OK;
10772}
10773
bb2a0f7a 10774/* called with rtnl_lock */
a2fbb9ea
ET
10775static int bnx2x_open(struct net_device *dev)
10776{
10777 struct bnx2x *bp = netdev_priv(dev);
10778
6eccabb3
EG
10779 netif_carrier_off(dev);
10780
a2fbb9ea
ET
10781 bnx2x_set_power_state(bp, PCI_D0);
10782
bb2a0f7a 10783 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10784}
10785
bb2a0f7a 10786/* called with rtnl_lock */
a2fbb9ea
ET
10787static int bnx2x_close(struct net_device *dev)
10788{
a2fbb9ea
ET
10789 struct bnx2x *bp = netdev_priv(dev);
10790
10791 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10792 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10793 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10794 if (!CHIP_REV_IS_SLOW(bp))
10795 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10796
10797 return 0;
10798}
10799
f5372251 10800/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10801static void bnx2x_set_rx_mode(struct net_device *dev)
10802{
10803 struct bnx2x *bp = netdev_priv(dev);
10804 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10805 int port = BP_PORT(bp);
10806
10807 if (bp->state != BNX2X_STATE_OPEN) {
10808 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10809 return;
10810 }
10811
10812 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10813
10814 if (dev->flags & IFF_PROMISC)
10815 rx_mode = BNX2X_RX_MODE_PROMISC;
10816
10817 else if ((dev->flags & IFF_ALLMULTI) ||
10818 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10819 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10820
10821 else { /* some multicasts */
10822 if (CHIP_IS_E1(bp)) {
10823 int i, old, offset;
10824 struct dev_mc_list *mclist;
10825 struct mac_configuration_cmd *config =
10826 bnx2x_sp(bp, mcast_config);
10827
10828 for (i = 0, mclist = dev->mc_list;
10829 mclist && (i < dev->mc_count);
10830 i++, mclist = mclist->next) {
10831
10832 config->config_table[i].
10833 cam_entry.msb_mac_addr =
10834 swab16(*(u16 *)&mclist->dmi_addr[0]);
10835 config->config_table[i].
10836 cam_entry.middle_mac_addr =
10837 swab16(*(u16 *)&mclist->dmi_addr[2]);
10838 config->config_table[i].
10839 cam_entry.lsb_mac_addr =
10840 swab16(*(u16 *)&mclist->dmi_addr[4]);
10841 config->config_table[i].cam_entry.flags =
10842 cpu_to_le16(port);
10843 config->config_table[i].
10844 target_table_entry.flags = 0;
10845 config->config_table[i].
10846 target_table_entry.client_id = 0;
10847 config->config_table[i].
10848 target_table_entry.vlan_id = 0;
10849
10850 DP(NETIF_MSG_IFUP,
10851 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10852 config->config_table[i].
10853 cam_entry.msb_mac_addr,
10854 config->config_table[i].
10855 cam_entry.middle_mac_addr,
10856 config->config_table[i].
10857 cam_entry.lsb_mac_addr);
10858 }
8d9c5f34 10859 old = config->hdr.length;
34f80b04
EG
10860 if (old > i) {
10861 for (; i < old; i++) {
10862 if (CAM_IS_INVALID(config->
10863 config_table[i])) {
af246401 10864 /* already invalidated */
34f80b04
EG
10865 break;
10866 }
10867 /* invalidate */
10868 CAM_INVALIDATE(config->
10869 config_table[i]);
10870 }
10871 }
10872
10873 if (CHIP_REV_IS_SLOW(bp))
10874 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10875 else
10876 offset = BNX2X_MAX_MULTICAST*(1 + port);
10877
8d9c5f34 10878 config->hdr.length = i;
34f80b04 10879 config->hdr.offset = offset;
8d9c5f34 10880 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10881 config->hdr.reserved1 = 0;
10882
10883 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10884 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10885 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10886 0);
10887 } else { /* E1H */
10888 /* Accept one or more multicasts */
10889 struct dev_mc_list *mclist;
10890 u32 mc_filter[MC_HASH_SIZE];
10891 u32 crc, bit, regidx;
10892 int i;
10893
10894 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10895
10896 for (i = 0, mclist = dev->mc_list;
10897 mclist && (i < dev->mc_count);
10898 i++, mclist = mclist->next) {
10899
7c510e4b
JB
10900 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10901 mclist->dmi_addr);
34f80b04
EG
10902
10903 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10904 bit = (crc >> 24) & 0xff;
10905 regidx = bit >> 5;
10906 bit &= 0x1f;
10907 mc_filter[regidx] |= (1 << bit);
10908 }
10909
10910 for (i = 0; i < MC_HASH_SIZE; i++)
10911 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10912 mc_filter[i]);
10913 }
10914 }
10915
10916 bp->rx_mode = rx_mode;
10917 bnx2x_set_storm_rx_mode(bp);
10918}
10919
10920/* called with rtnl_lock */
a2fbb9ea
ET
10921static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10922{
10923 struct sockaddr *addr = p;
10924 struct bnx2x *bp = netdev_priv(dev);
10925
34f80b04 10926 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10927 return -EINVAL;
10928
10929 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10930 if (netif_running(dev)) {
10931 if (CHIP_IS_E1(bp))
3101c2bc 10932 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10933 else
3101c2bc 10934 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10935 }
a2fbb9ea
ET
10936
10937 return 0;
10938}
10939
c18487ee 10940/* called with rtnl_lock */
a2fbb9ea
ET
10941static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10942{
10943 struct mii_ioctl_data *data = if_mii(ifr);
10944 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10945 int port = BP_PORT(bp);
a2fbb9ea
ET
10946 int err;
10947
10948 switch (cmd) {
10949 case SIOCGMIIPHY:
34f80b04 10950 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10951
c14423fe 10952 /* fallthrough */
c18487ee 10953
a2fbb9ea 10954 case SIOCGMIIREG: {
c18487ee 10955 u16 mii_regval;
a2fbb9ea 10956
c18487ee
YR
10957 if (!netif_running(dev))
10958 return -EAGAIN;
a2fbb9ea 10959
34f80b04 10960 mutex_lock(&bp->port.phy_mutex);
3196a88a 10961 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10962 DEFAULT_PHY_DEV_ADDR,
10963 (data->reg_num & 0x1f), &mii_regval);
10964 data->val_out = mii_regval;
34f80b04 10965 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10966 return err;
10967 }
10968
10969 case SIOCSMIIREG:
10970 if (!capable(CAP_NET_ADMIN))
10971 return -EPERM;
10972
c18487ee
YR
10973 if (!netif_running(dev))
10974 return -EAGAIN;
10975
34f80b04 10976 mutex_lock(&bp->port.phy_mutex);
3196a88a 10977 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10978 DEFAULT_PHY_DEV_ADDR,
10979 (data->reg_num & 0x1f), data->val_in);
34f80b04 10980 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10981 return err;
10982
10983 default:
10984 /* do nothing */
10985 break;
10986 }
10987
10988 return -EOPNOTSUPP;
10989}
10990
34f80b04 10991/* called with rtnl_lock */
a2fbb9ea
ET
10992static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10993{
10994 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10995 int rc = 0;
a2fbb9ea
ET
10996
10997 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10998 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10999 return -EINVAL;
11000
11001 /* This does not race with packet allocation
c14423fe 11002 * because the actual alloc size is
a2fbb9ea
ET
11003 * only updated as part of load
11004 */
11005 dev->mtu = new_mtu;
11006
11007 if (netif_running(dev)) {
34f80b04
EG
11008 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11009 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11010 }
34f80b04
EG
11011
11012 return rc;
a2fbb9ea
ET
11013}
11014
11015static void bnx2x_tx_timeout(struct net_device *dev)
11016{
11017 struct bnx2x *bp = netdev_priv(dev);
11018
11019#ifdef BNX2X_STOP_ON_ERROR
11020 if (!bp->panic)
11021 bnx2x_panic();
11022#endif
11023 /* This allows the netif to be shutdown gracefully before resetting */
11024 schedule_work(&bp->reset_task);
11025}
11026
11027#ifdef BCM_VLAN
34f80b04 11028/* called with rtnl_lock */
a2fbb9ea
ET
11029static void bnx2x_vlan_rx_register(struct net_device *dev,
11030 struct vlan_group *vlgrp)
11031{
11032 struct bnx2x *bp = netdev_priv(dev);
11033
11034 bp->vlgrp = vlgrp;
0c6671b0
EG
11035
11036 /* Set flags according to the required capabilities */
11037 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11038
11039 if (dev->features & NETIF_F_HW_VLAN_TX)
11040 bp->flags |= HW_VLAN_TX_FLAG;
11041
11042 if (dev->features & NETIF_F_HW_VLAN_RX)
11043 bp->flags |= HW_VLAN_RX_FLAG;
11044
a2fbb9ea 11045 if (netif_running(dev))
49d66772 11046 bnx2x_set_client_config(bp);
a2fbb9ea 11047}
34f80b04 11048
a2fbb9ea
ET
11049#endif
11050
11051#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11052static void poll_bnx2x(struct net_device *dev)
11053{
11054 struct bnx2x *bp = netdev_priv(dev);
11055
11056 disable_irq(bp->pdev->irq);
11057 bnx2x_interrupt(bp->pdev->irq, dev);
11058 enable_irq(bp->pdev->irq);
11059}
11060#endif
11061
c64213cd
SH
11062static const struct net_device_ops bnx2x_netdev_ops = {
11063 .ndo_open = bnx2x_open,
11064 .ndo_stop = bnx2x_close,
11065 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11066 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11067 .ndo_set_mac_address = bnx2x_change_mac_addr,
11068 .ndo_validate_addr = eth_validate_addr,
11069 .ndo_do_ioctl = bnx2x_ioctl,
11070 .ndo_change_mtu = bnx2x_change_mtu,
11071 .ndo_tx_timeout = bnx2x_tx_timeout,
11072#ifdef BCM_VLAN
11073 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11074#endif
11075#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11076 .ndo_poll_controller = poll_bnx2x,
11077#endif
11078};
11079
34f80b04
EG
11080static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11081 struct net_device *dev)
a2fbb9ea
ET
11082{
11083 struct bnx2x *bp;
11084 int rc;
11085
11086 SET_NETDEV_DEV(dev, &pdev->dev);
11087 bp = netdev_priv(dev);
11088
34f80b04
EG
11089 bp->dev = dev;
11090 bp->pdev = pdev;
a2fbb9ea 11091 bp->flags = 0;
34f80b04 11092 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11093
11094 rc = pci_enable_device(pdev);
11095 if (rc) {
11096 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11097 goto err_out;
11098 }
11099
11100 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11101 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11102 " aborting\n");
11103 rc = -ENODEV;
11104 goto err_out_disable;
11105 }
11106
11107 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11108 printk(KERN_ERR PFX "Cannot find second PCI device"
11109 " base address, aborting\n");
11110 rc = -ENODEV;
11111 goto err_out_disable;
11112 }
11113
34f80b04
EG
11114 if (atomic_read(&pdev->enable_cnt) == 1) {
11115 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11116 if (rc) {
11117 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11118 " aborting\n");
11119 goto err_out_disable;
11120 }
a2fbb9ea 11121
34f80b04
EG
11122 pci_set_master(pdev);
11123 pci_save_state(pdev);
11124 }
a2fbb9ea
ET
11125
11126 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11127 if (bp->pm_cap == 0) {
11128 printk(KERN_ERR PFX "Cannot find power management"
11129 " capability, aborting\n");
11130 rc = -EIO;
11131 goto err_out_release;
11132 }
11133
11134 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11135 if (bp->pcie_cap == 0) {
11136 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11137 " aborting\n");
11138 rc = -EIO;
11139 goto err_out_release;
11140 }
11141
6a35528a 11142 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11143 bp->flags |= USING_DAC_FLAG;
6a35528a 11144 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11145 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11146 " failed, aborting\n");
11147 rc = -EIO;
11148 goto err_out_release;
11149 }
11150
284901a9 11151 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11152 printk(KERN_ERR PFX "System does not support DMA,"
11153 " aborting\n");
11154 rc = -EIO;
11155 goto err_out_release;
11156 }
11157
34f80b04
EG
11158 dev->mem_start = pci_resource_start(pdev, 0);
11159 dev->base_addr = dev->mem_start;
11160 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11161
11162 dev->irq = pdev->irq;
11163
275f165f 11164 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11165 if (!bp->regview) {
11166 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11167 rc = -ENOMEM;
11168 goto err_out_release;
11169 }
11170
34f80b04
EG
11171 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11172 min_t(u64, BNX2X_DB_SIZE,
11173 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11174 if (!bp->doorbells) {
11175 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11176 rc = -ENOMEM;
11177 goto err_out_unmap;
11178 }
11179
11180 bnx2x_set_power_state(bp, PCI_D0);
11181
34f80b04
EG
11182 /* clean indirect addresses */
11183 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11184 PCICFG_VENDOR_ID_OFFSET);
11185 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11186 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11187 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11188 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11189
34f80b04 11190 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11191
c64213cd 11192 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11193 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11194 dev->features |= NETIF_F_SG;
11195 dev->features |= NETIF_F_HW_CSUM;
11196 if (bp->flags & USING_DAC_FLAG)
11197 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11198 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11199 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11200#ifdef BCM_VLAN
11201 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11202 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11203
11204 dev->vlan_features |= NETIF_F_SG;
11205 dev->vlan_features |= NETIF_F_HW_CSUM;
11206 if (bp->flags & USING_DAC_FLAG)
11207 dev->vlan_features |= NETIF_F_HIGHDMA;
11208 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11209 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11210#endif
a2fbb9ea
ET
11211
11212 return 0;
11213
11214err_out_unmap:
11215 if (bp->regview) {
11216 iounmap(bp->regview);
11217 bp->regview = NULL;
11218 }
a2fbb9ea
ET
11219 if (bp->doorbells) {
11220 iounmap(bp->doorbells);
11221 bp->doorbells = NULL;
11222 }
11223
11224err_out_release:
34f80b04
EG
11225 if (atomic_read(&pdev->enable_cnt) == 1)
11226 pci_release_regions(pdev);
a2fbb9ea
ET
11227
11228err_out_disable:
11229 pci_disable_device(pdev);
11230 pci_set_drvdata(pdev, NULL);
11231
11232err_out:
11233 return rc;
11234}
11235
25047950
ET
11236static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11237{
11238 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11239
11240 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11241 return val;
11242}
11243
11244/* return value of 1=2.5GHz 2=5GHz */
11245static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11246{
11247 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11248
11249 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11250 return val;
11251}
94a78b79
VZ
11252static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11253{
11254 struct bnx2x_fw_file_hdr *fw_hdr;
11255 struct bnx2x_fw_file_section *sections;
11256 u16 *ops_offsets;
11257 u32 offset, len, num_ops;
11258 int i;
11259 const struct firmware *firmware = bp->firmware;
11260 const u8 * fw_ver;
11261
11262 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11263 return -EINVAL;
11264
11265 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11266 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11267
11268 /* Make sure none of the offsets and sizes make us read beyond
11269 * the end of the firmware data */
11270 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11271 offset = be32_to_cpu(sections[i].offset);
11272 len = be32_to_cpu(sections[i].len);
11273 if (offset + len > firmware->size) {
11274 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11275 return -EINVAL;
11276 }
11277 }
11278
11279 /* Likewise for the init_ops offsets */
11280 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11281 ops_offsets = (u16 *)(firmware->data + offset);
11282 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11283
11284 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11285 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11286 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11287 return -EINVAL;
11288 }
11289 }
11290
11291 /* Check FW version */
11292 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11293 fw_ver = firmware->data + offset;
11294 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11295 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11296 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11297 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11298 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11299 " Should be %d.%d.%d.%d\n",
11300 fw_ver[0], fw_ver[1], fw_ver[2],
11301 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11302 BCM_5710_FW_MINOR_VERSION,
11303 BCM_5710_FW_REVISION_VERSION,
11304 BCM_5710_FW_ENGINEERING_VERSION);
11305 return -EINVAL;
11306 }
11307
11308 return 0;
11309}
11310
11311static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11312{
11313 u32 i;
11314 const __be32 *source = (const __be32*)_source;
11315 u32 *target = (u32*)_target;
11316
11317 for (i = 0; i < n/4; i++)
11318 target[i] = be32_to_cpu(source[i]);
11319}
11320
11321/*
11322 Ops array is stored in the following format:
11323 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11324 */
11325static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11326{
11327 u32 i, j, tmp;
11328 const __be32 *source = (const __be32*)_source;
11329 struct raw_op *target = (struct raw_op*)_target;
11330
11331 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11332 tmp = be32_to_cpu(source[j]);
11333 target[i].op = (tmp >> 24) & 0xff;
11334 target[i].offset = tmp & 0xffffff;
11335 target[i].raw_data = be32_to_cpu(source[j+1]);
11336 }
11337}
11338static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11339{
11340 u32 i;
11341 u16 *target = (u16*)_target;
11342 const __be16 *source = (const __be16*)_source;
11343
11344 for (i = 0; i < n/2; i++)
11345 target[i] = be16_to_cpu(source[i]);
11346}
11347
11348#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11349 do { \
11350 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11351 bp->arr = kmalloc(len, GFP_KERNEL); \
11352 if (!bp->arr) { \
11353 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11354 goto lbl; \
11355 } \
11356 func(bp->firmware->data + \
11357 be32_to_cpu(fw_hdr->arr.offset), \
11358 (u8*)bp->arr, len); \
11359 } while (0)
11360
11361
11362static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11363{
11364 char fw_file_name[40] = {0};
11365 int rc, offset;
11366 struct bnx2x_fw_file_hdr *fw_hdr;
11367
11368 /* Create a FW file name */
11369 if (CHIP_IS_E1(bp))
11370 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11371 else
11372 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11373
11374 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11375 BCM_5710_FW_MAJOR_VERSION,
11376 BCM_5710_FW_MINOR_VERSION,
11377 BCM_5710_FW_REVISION_VERSION,
11378 BCM_5710_FW_ENGINEERING_VERSION);
11379
11380 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11381
11382 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11383 if (rc) {
11384 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11385 goto request_firmware_exit;
11386 }
11387
11388 rc = bnx2x_check_firmware(bp);
11389 if (rc) {
11390 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11391 goto request_firmware_exit;
11392 }
11393
11394 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11395
11396 /* Initialize the pointers to the init arrays */
11397 /* Blob */
11398 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11399
11400 /* Opcodes */
11401 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11402
11403 /* Offsets */
11404 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11405
11406 /* STORMs firmware */
11407 bp->tsem_int_table_data = bp->firmware->data +
11408 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11409 bp->tsem_pram_data = bp->firmware->data +
11410 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11411 bp->usem_int_table_data = bp->firmware->data +
11412 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11413 bp->usem_pram_data = bp->firmware->data +
11414 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11415 bp->xsem_int_table_data = bp->firmware->data +
11416 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11417 bp->xsem_pram_data = bp->firmware->data +
11418 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11419 bp->csem_int_table_data = bp->firmware->data +
11420 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11421 bp->csem_pram_data = bp->firmware->data +
11422 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11423
11424 return 0;
11425init_offsets_alloc_err:
11426 kfree(bp->init_ops);
11427init_ops_alloc_err:
11428 kfree(bp->init_data);
11429request_firmware_exit:
11430 release_firmware(bp->firmware);
11431
11432 return rc;
11433}
11434
11435
25047950 11436
a2fbb9ea
ET
11437static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11438 const struct pci_device_id *ent)
11439{
11440 static int version_printed;
11441 struct net_device *dev = NULL;
11442 struct bnx2x *bp;
25047950 11443 int rc;
a2fbb9ea
ET
11444
11445 if (version_printed++ == 0)
11446 printk(KERN_INFO "%s", version);
11447
11448 /* dev zeroed in init_etherdev */
555f6c78 11449 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11450 if (!dev) {
11451 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11452 return -ENOMEM;
34f80b04 11453 }
a2fbb9ea 11454
a2fbb9ea
ET
11455 bp = netdev_priv(dev);
11456 bp->msglevel = debug;
11457
34f80b04 11458 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11459 if (rc < 0) {
11460 free_netdev(dev);
11461 return rc;
11462 }
11463
a2fbb9ea
ET
11464 pci_set_drvdata(pdev, dev);
11465
34f80b04 11466 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11467 if (rc)
11468 goto init_one_exit;
11469
94a78b79
VZ
11470 /* Set init arrays */
11471 rc = bnx2x_init_firmware(bp, &pdev->dev);
11472 if (rc) {
11473 printk(KERN_ERR PFX "Error loading firmware\n");
11474 goto init_one_exit;
11475 }
11476
693fc0d1 11477 rc = register_netdev(dev);
34f80b04 11478 if (rc) {
693fc0d1 11479 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11480 goto init_one_exit;
11481 }
11482
25047950 11483 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11484 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11485 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11486 bnx2x_get_pcie_width(bp),
11487 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11488 dev->base_addr, bp->pdev->irq);
e174961c 11489 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11490
a2fbb9ea 11491 return 0;
34f80b04
EG
11492
11493init_one_exit:
11494 if (bp->regview)
11495 iounmap(bp->regview);
11496
11497 if (bp->doorbells)
11498 iounmap(bp->doorbells);
11499
11500 free_netdev(dev);
11501
11502 if (atomic_read(&pdev->enable_cnt) == 1)
11503 pci_release_regions(pdev);
11504
11505 pci_disable_device(pdev);
11506 pci_set_drvdata(pdev, NULL);
11507
11508 return rc;
a2fbb9ea
ET
11509}
11510
11511static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11512{
11513 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11514 struct bnx2x *bp;
11515
11516 if (!dev) {
228241eb
ET
11517 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11518 return;
11519 }
228241eb 11520 bp = netdev_priv(dev);
a2fbb9ea 11521
a2fbb9ea
ET
11522 unregister_netdev(dev);
11523
94a78b79
VZ
11524 kfree(bp->init_ops_offsets);
11525 kfree(bp->init_ops);
11526 kfree(bp->init_data);
11527 release_firmware(bp->firmware);
11528
a2fbb9ea
ET
11529 if (bp->regview)
11530 iounmap(bp->regview);
11531
11532 if (bp->doorbells)
11533 iounmap(bp->doorbells);
11534
11535 free_netdev(dev);
34f80b04
EG
11536
11537 if (atomic_read(&pdev->enable_cnt) == 1)
11538 pci_release_regions(pdev);
11539
a2fbb9ea
ET
11540 pci_disable_device(pdev);
11541 pci_set_drvdata(pdev, NULL);
11542}
11543
11544static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11545{
11546 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11547 struct bnx2x *bp;
11548
34f80b04
EG
11549 if (!dev) {
11550 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11551 return -ENODEV;
11552 }
11553 bp = netdev_priv(dev);
a2fbb9ea 11554
34f80b04 11555 rtnl_lock();
a2fbb9ea 11556
34f80b04 11557 pci_save_state(pdev);
228241eb 11558
34f80b04
EG
11559 if (!netif_running(dev)) {
11560 rtnl_unlock();
11561 return 0;
11562 }
a2fbb9ea
ET
11563
11564 netif_device_detach(dev);
a2fbb9ea 11565
da5a662a 11566 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11567
a2fbb9ea 11568 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11569
34f80b04
EG
11570 rtnl_unlock();
11571
a2fbb9ea
ET
11572 return 0;
11573}
11574
11575static int bnx2x_resume(struct pci_dev *pdev)
11576{
11577 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11578 struct bnx2x *bp;
a2fbb9ea
ET
11579 int rc;
11580
228241eb
ET
11581 if (!dev) {
11582 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11583 return -ENODEV;
11584 }
228241eb 11585 bp = netdev_priv(dev);
a2fbb9ea 11586
34f80b04
EG
11587 rtnl_lock();
11588
228241eb 11589 pci_restore_state(pdev);
34f80b04
EG
11590
11591 if (!netif_running(dev)) {
11592 rtnl_unlock();
11593 return 0;
11594 }
11595
a2fbb9ea
ET
11596 bnx2x_set_power_state(bp, PCI_D0);
11597 netif_device_attach(dev);
11598
da5a662a 11599 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11600
34f80b04
EG
11601 rtnl_unlock();
11602
11603 return rc;
a2fbb9ea
ET
11604}
11605
f8ef6e44
YG
11606static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11607{
11608 int i;
11609
11610 bp->state = BNX2X_STATE_ERROR;
11611
11612 bp->rx_mode = BNX2X_RX_MODE_NONE;
11613
11614 bnx2x_netif_stop(bp, 0);
11615
11616 del_timer_sync(&bp->timer);
11617 bp->stats_state = STATS_STATE_DISABLED;
11618 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11619
11620 /* Release IRQs */
11621 bnx2x_free_irq(bp);
11622
11623 if (CHIP_IS_E1(bp)) {
11624 struct mac_configuration_cmd *config =
11625 bnx2x_sp(bp, mcast_config);
11626
8d9c5f34 11627 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11628 CAM_INVALIDATE(config->config_table[i]);
11629 }
11630
11631 /* Free SKBs, SGEs, TPA pool and driver internals */
11632 bnx2x_free_skbs(bp);
555f6c78 11633 for_each_rx_queue(bp, i)
f8ef6e44 11634 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11635 for_each_rx_queue(bp, i)
7cde1c8b 11636 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11637 bnx2x_free_mem(bp);
11638
11639 bp->state = BNX2X_STATE_CLOSED;
11640
11641 netif_carrier_off(bp->dev);
11642
11643 return 0;
11644}
11645
11646static void bnx2x_eeh_recover(struct bnx2x *bp)
11647{
11648 u32 val;
11649
11650 mutex_init(&bp->port.phy_mutex);
11651
11652 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11653 bp->link_params.shmem_base = bp->common.shmem_base;
11654 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11655
11656 if (!bp->common.shmem_base ||
11657 (bp->common.shmem_base < 0xA0000) ||
11658 (bp->common.shmem_base >= 0xC0000)) {
11659 BNX2X_DEV_INFO("MCP not active\n");
11660 bp->flags |= NO_MCP_FLAG;
11661 return;
11662 }
11663
11664 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11665 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11666 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11667 BNX2X_ERR("BAD MCP validity signature\n");
11668
11669 if (!BP_NOMCP(bp)) {
11670 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11671 & DRV_MSG_SEQ_NUMBER_MASK);
11672 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11673 }
11674}
11675
493adb1f
WX
11676/**
11677 * bnx2x_io_error_detected - called when PCI error is detected
11678 * @pdev: Pointer to PCI device
11679 * @state: The current pci connection state
11680 *
11681 * This function is called after a PCI bus error affecting
11682 * this device has been detected.
11683 */
11684static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11685 pci_channel_state_t state)
11686{
11687 struct net_device *dev = pci_get_drvdata(pdev);
11688 struct bnx2x *bp = netdev_priv(dev);
11689
11690 rtnl_lock();
11691
11692 netif_device_detach(dev);
11693
07ce50e4
DN
11694 if (state == pci_channel_io_perm_failure) {
11695 rtnl_unlock();
11696 return PCI_ERS_RESULT_DISCONNECT;
11697 }
11698
493adb1f 11699 if (netif_running(dev))
f8ef6e44 11700 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11701
11702 pci_disable_device(pdev);
11703
11704 rtnl_unlock();
11705
11706 /* Request a slot reset */
11707 return PCI_ERS_RESULT_NEED_RESET;
11708}
11709
11710/**
11711 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11712 * @pdev: Pointer to PCI device
11713 *
11714 * Restart the card from scratch, as if from a cold-boot.
11715 */
11716static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11717{
11718 struct net_device *dev = pci_get_drvdata(pdev);
11719 struct bnx2x *bp = netdev_priv(dev);
11720
11721 rtnl_lock();
11722
11723 if (pci_enable_device(pdev)) {
11724 dev_err(&pdev->dev,
11725 "Cannot re-enable PCI device after reset\n");
11726 rtnl_unlock();
11727 return PCI_ERS_RESULT_DISCONNECT;
11728 }
11729
11730 pci_set_master(pdev);
11731 pci_restore_state(pdev);
11732
11733 if (netif_running(dev))
11734 bnx2x_set_power_state(bp, PCI_D0);
11735
11736 rtnl_unlock();
11737
11738 return PCI_ERS_RESULT_RECOVERED;
11739}
11740
11741/**
11742 * bnx2x_io_resume - called when traffic can start flowing again
11743 * @pdev: Pointer to PCI device
11744 *
11745 * This callback is called when the error recovery driver tells us that
11746 * its OK to resume normal operation.
11747 */
11748static void bnx2x_io_resume(struct pci_dev *pdev)
11749{
11750 struct net_device *dev = pci_get_drvdata(pdev);
11751 struct bnx2x *bp = netdev_priv(dev);
11752
11753 rtnl_lock();
11754
f8ef6e44
YG
11755 bnx2x_eeh_recover(bp);
11756
493adb1f 11757 if (netif_running(dev))
f8ef6e44 11758 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11759
11760 netif_device_attach(dev);
11761
11762 rtnl_unlock();
11763}
11764
11765static struct pci_error_handlers bnx2x_err_handler = {
11766 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11767 .slot_reset = bnx2x_io_slot_reset,
11768 .resume = bnx2x_io_resume,
493adb1f
WX
11769};
11770
a2fbb9ea 11771static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11772 .name = DRV_MODULE_NAME,
11773 .id_table = bnx2x_pci_tbl,
11774 .probe = bnx2x_init_one,
11775 .remove = __devexit_p(bnx2x_remove_one),
11776 .suspend = bnx2x_suspend,
11777 .resume = bnx2x_resume,
11778 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11779};
11780
11781static int __init bnx2x_init(void)
11782{
dd21ca6d
SG
11783 int ret;
11784
1cf167f2
EG
11785 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11786 if (bnx2x_wq == NULL) {
11787 printk(KERN_ERR PFX "Cannot create workqueue\n");
11788 return -ENOMEM;
11789 }
11790
dd21ca6d
SG
11791 ret = pci_register_driver(&bnx2x_pci_driver);
11792 if (ret) {
11793 printk(KERN_ERR PFX "Cannot register driver\n");
11794 destroy_workqueue(bnx2x_wq);
11795 }
11796 return ret;
a2fbb9ea
ET
11797}
11798
11799static void __exit bnx2x_cleanup(void)
11800{
11801 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11802
11803 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11804}
11805
11806module_init(bnx2x_init);
11807module_exit(bnx2x_cleanup);
11808
94a78b79 11809