]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
netxen: fix CONFIG_INET=n build
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
e8834a63
EG
59#define DRV_MODULE_VERSION "1.48.113-1"
60#define DRV_MODULE_RELDATE "2009/07/21"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
2059aba7 83MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
555f6c78 84
19680c48 85static int disable_tpa;
19680c48 86module_param(disable_tpa, int, 0);
9898f86d 87MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
88
89static int int_mode;
90module_param(int_mode, int, 0);
91MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
9898f86d 93static int poll;
a2fbb9ea 94module_param(poll, int, 0);
9898f86d 95MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
96
97static int mrrs = -1;
98module_param(mrrs, int, 0);
99MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
9898f86d 101static int debug;
a2fbb9ea 102module_param(debug, int, 0);
9898f86d
EG
103MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 106
1cf167f2 107static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
108
109enum bnx2x_board_type {
110 BCM57710 = 0,
34f80b04
EG
111 BCM57711 = 1,
112 BCM57711E = 2,
a2fbb9ea
ET
113};
114
34f80b04 115/* indexed by board_type, above */
53a10565 116static struct {
a2fbb9ea
ET
117 char *name;
118} board_info[] __devinitdata = {
34f80b04
EG
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
122};
123
34f80b04 124
a2fbb9ea
ET
125static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
132 { 0 }
133};
134
135MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137/****************************************************************************
138* General service functions
139****************************************************************************/
140
141/* used only at init
142 * locking is done by mcp
143 */
144static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145{
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150}
151
a2fbb9ea
ET
152static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153{
154 u32 val;
155
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
160
161 return val;
162}
a2fbb9ea
ET
163
164static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169};
170
171/* copy command into DMAE command memory and set DMAE command go */
172static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173 int idx)
174{
175 u32 cmd_offset;
176 int i;
177
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
ad8d3948
EG
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
184 }
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
186}
187
ad8d3948
EG
188void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189 u32 len32)
a2fbb9ea 190{
ad8d3948 191 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
193 int cnt = 200;
194
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201 return;
202 }
203
204 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
205
206 memset(dmae, 0, sizeof(struct dmae_command));
207
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211#ifdef __BIG_ENDIAN
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
213#else
214 DMAE_CMD_ENDIANITY_DW_SWAP |
215#endif
34f80b04
EG
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
222 dmae->len = len32;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 225 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 226
c3eefaf6 227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
237
238 *wb_comp = 0;
239
34f80b04 240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
241
242 udelay(5);
ad8d3948
EG
243
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
ad8d3948 247 if (!cnt) {
c3eefaf6 248 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
249 break;
250 }
ad8d3948 251 cnt--;
12469401
YG
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
254 msleep(100);
255 else
256 udelay(5);
a2fbb9ea 257 }
ad8d3948
EG
258
259 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
260}
261
c18487ee 262void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 263{
ad8d3948 264 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
266 int cnt = 200;
267
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
270 int i;
271
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276 return;
277 }
278
279 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
280
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
283
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287#ifdef __BIG_ENDIAN
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
289#else
290 DMAE_CMD_ENDIANITY_DW_SWAP |
291#endif
34f80b04
EG
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298 dmae->len = len32;
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 301 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 302
c3eefaf6 303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
310
311 *wb_comp = 0;
312
34f80b04 313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
314
315 udelay(5);
ad8d3948
EG
316
317 while (*wb_comp != DMAE_COMP_VAL) {
318
ad8d3948 319 if (!cnt) {
c3eefaf6 320 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
321 break;
322 }
ad8d3948 323 cnt--;
12469401
YG
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
326 msleep(100);
327 else
328 udelay(5);
a2fbb9ea 329 }
ad8d3948 330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
333
334 mutex_unlock(&bp->dmae_mutex);
335}
336
337/* used only for slowpath so not inlined */
338static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339{
340 u32 wb_write[2];
341
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 345}
a2fbb9ea 346
ad8d3948
EG
347#ifdef USE_WB_RD
348static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349{
350 u32 wb_data[2];
351
352 REG_RD_DMAE(bp, reg, wb_data, 2);
353
354 return HILO_U64(wb_data[0], wb_data[1]);
355}
356#endif
357
a2fbb9ea
ET
358static int bnx2x_mc_assert(struct bnx2x *bp)
359{
a2fbb9ea 360 char last_idx;
34f80b04
EG
361 int i, rc = 0;
362 u32 row0, row1, row2, row3;
363
364 /* XSTORM */
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
367 if (last_idx)
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
386 rc++;
387 } else {
388 break;
389 }
390 }
391
392 /* TSTORM */
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
395 if (last_idx)
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
414 rc++;
415 } else {
416 break;
417 }
418 }
419
420 /* CSTORM */
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
423 if (last_idx)
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
442 rc++;
443 } else {
444 break;
445 }
446 }
447
448 /* USTORM */
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
451 if (last_idx)
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
470 rc++;
471 } else {
472 break;
a2fbb9ea
ET
473 }
474 }
34f80b04 475
a2fbb9ea
ET
476 return rc;
477}
c14423fe 478
a2fbb9ea
ET
479static void bnx2x_fw_dump(struct bnx2x *bp)
480{
481 u32 mark, offset;
4781bfad 482 __be32 data[9];
a2fbb9ea
ET
483 int word;
484
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
488
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499 offset + 4*word));
500 data[8] = 0x0;
49d66772 501 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
502 }
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
504}
505
506static void bnx2x_panic_dump(struct bnx2x *bp)
507{
508 int i;
509 u16 j, start, end;
510
66e855f3
YG
511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
a2fbb9ea
ET
514 BNX2X_ERR("begin crash dump -----------------\n");
515
8440d2b6
EG
516 /* Indices */
517 /* Common */
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524 /* Rx */
525 for_each_rx_queue(bp, i) {
a2fbb9ea 526 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 527
c3eefaf6 528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 531 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
539 }
a2fbb9ea 540
8440d2b6
EG
541 /* Tx */
542 for_each_tx_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 545
c3eefaf6 546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
8440d2b6
EG
551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552 fp->status_blk->c_status_block.status_block_index,
553 hw_prods->packets_prod, hw_prods->bds_prod);
554 }
a2fbb9ea 555
8440d2b6
EG
556 /* Rings */
557 /* Rx */
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
560
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 563 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
c3eefaf6
EG
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
569 }
570
3196a88a
EG
571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
8440d2b6 573 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
c3eefaf6
EG
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
579 }
580
a2fbb9ea
ET
581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
c3eefaf6
EG
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
588 }
589 }
590
8440d2b6
EG
591 /* Tx */
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
c3eefaf6
EG
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
602 }
603
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
611 }
612 }
a2fbb9ea 613
34f80b04 614 bnx2x_fw_dump(bp);
a2fbb9ea
ET
615 bnx2x_mc_assert(bp);
616 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
617}
618
615f8fd9 619static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 620{
34f80b04 621 int port = BP_PORT(bp);
a2fbb9ea
ET
622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
626
627 if (msix) {
8badd27a
EG
628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
632 } else if (msi) {
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
637 } else {
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 642
8badd27a
EG
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
615f8fd9
ET
645
646 REG_WR(bp, addr, val);
647
a2fbb9ea
ET
648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649 }
650
8badd27a
EG
651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
653
654 REG_WR(bp, addr, val);
37dbbf32
EG
655 /*
656 * Ensure that HC_CONFIG is written before leading/trailing edge config
657 */
658 mmiowb();
659 barrier();
34f80b04
EG
660
661 if (CHIP_IS_E1H(bp)) {
662 /* init leading/trailing edge */
663 if (IS_E1HMF(bp)) {
8badd27a 664 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 665 if (bp->port.pmf)
4acac6a5
EG
666 /* enable nig and gpio3 attention */
667 val |= 0x1100;
34f80b04
EG
668 } else
669 val = 0xffff;
670
671 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
672 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
673 }
37dbbf32
EG
674
675 /* Make sure that interrupts are indeed enabled from here on */
676 mmiowb();
a2fbb9ea
ET
677}
678
615f8fd9 679static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 680{
34f80b04 681 int port = BP_PORT(bp);
a2fbb9ea
ET
682 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
683 u32 val = REG_RD(bp, addr);
684
685 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
686 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
687 HC_CONFIG_0_REG_INT_LINE_EN_0 |
688 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
689
690 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
691 val, port, addr);
692
8badd27a
EG
693 /* flush all outstanding writes */
694 mmiowb();
695
a2fbb9ea
ET
696 REG_WR(bp, addr, val);
697 if (REG_RD(bp, addr) != val)
698 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 699
a2fbb9ea
ET
700}
701
f8ef6e44 702static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 703{
a2fbb9ea 704 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 705 int i, offset;
a2fbb9ea 706
34f80b04 707 /* disable interrupt handling */
a2fbb9ea 708 atomic_inc(&bp->intr_sem);
e1510706
EG
709 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
710
f8ef6e44
YG
711 if (disable_hw)
712 /* prevent the HW from sending interrupts */
713 bnx2x_int_disable(bp);
a2fbb9ea
ET
714
715 /* make sure all ISRs are done */
716 if (msix) {
8badd27a
EG
717 synchronize_irq(bp->msix_table[0].vector);
718 offset = 1;
a2fbb9ea 719 for_each_queue(bp, i)
8badd27a 720 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
721 } else
722 synchronize_irq(bp->pdev->irq);
723
724 /* make sure sp_task is not running */
1cf167f2
EG
725 cancel_delayed_work(&bp->sp_task);
726 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
727}
728
34f80b04 729/* fast path */
a2fbb9ea
ET
730
731/*
34f80b04 732 * General service functions
a2fbb9ea
ET
733 */
734
34f80b04 735static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
736 u8 storm, u16 index, u8 op, u8 update)
737{
5c862848
EG
738 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
739 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
740 struct igu_ack_register igu_ack;
741
742 igu_ack.status_block_index = index;
743 igu_ack.sb_id_and_flags =
34f80b04 744 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
745 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
746 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
747 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
748
5c862848
EG
749 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
750 (*(u32 *)&igu_ack), hc_addr);
751 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
752
753 /* Make sure that ACK is written */
754 mmiowb();
755 barrier();
a2fbb9ea
ET
756}
757
758static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
759{
760 struct host_status_block *fpsb = fp->status_blk;
761 u16 rc = 0;
762
763 barrier(); /* status block is written to by the chip */
764 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
765 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
766 rc |= 1;
767 }
768 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
769 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
770 rc |= 2;
771 }
772 return rc;
773}
774
a2fbb9ea
ET
775static u16 bnx2x_ack_int(struct bnx2x *bp)
776{
5c862848
EG
777 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
778 COMMAND_REG_SIMD_MASK);
779 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 780
5c862848
EG
781 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
782 result, hc_addr);
a2fbb9ea 783
a2fbb9ea
ET
784 return result;
785}
786
787
788/*
789 * fast path service functions
790 */
791
237907c1
EG
792static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
793{
794 u16 tx_cons_sb;
795
796 /* Tell compiler that status block fields can change */
797 barrier();
798 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
799 return (fp->tx_pkt_cons != tx_cons_sb);
800}
801
802static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
803{
804 /* Tell compiler that consumer and producer can change */
805 barrier();
806 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
807}
808
a2fbb9ea
ET
809/* free skb in the packet ring at pos idx
810 * return idx of last bd freed
811 */
812static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
813 u16 idx)
814{
815 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
816 struct eth_tx_bd *tx_bd;
817 struct sk_buff *skb = tx_buf->skb;
34f80b04 818 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
819 int nbd;
820
821 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
822 idx, tx_buf, skb);
823
824 /* unmap first bd */
825 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
828 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
829
830 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 831 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
832#ifdef BNX2X_STOP_ON_ERROR
833 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 834 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
835 bnx2x_panic();
836 }
837#endif
838
839 /* Skip a parse bd and the TSO split header bd
840 since they have no mapping */
841 if (nbd)
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
845 ETH_TX_BD_FLAGS_TCP_CSUM |
846 ETH_TX_BD_FLAGS_SW_LSO)) {
847 if (--nbd)
848 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
849 tx_bd = &fp->tx_desc_ring[bd_idx];
850 /* is this a TSO split header bd? */
851 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
852 if (--nbd)
853 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
854 }
855 }
856
857 /* now free frags */
858 while (nbd > 0) {
859
860 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
861 tx_bd = &fp->tx_desc_ring[bd_idx];
862 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
863 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
864 if (--nbd)
865 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866 }
867
868 /* release skb */
53e5e96e 869 WARN_ON(!skb);
a2fbb9ea
ET
870 dev_kfree_skb(skb);
871 tx_buf->first_bd = 0;
872 tx_buf->skb = NULL;
873
34f80b04 874 return new_cons;
a2fbb9ea
ET
875}
876
34f80b04 877static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 878{
34f80b04
EG
879 s16 used;
880 u16 prod;
881 u16 cons;
a2fbb9ea 882
34f80b04 883 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
884 prod = fp->tx_bd_prod;
885 cons = fp->tx_bd_cons;
886
34f80b04
EG
887 /* NUM_TX_RINGS = number of "next-page" entries
888 It will be used as a threshold */
889 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 890
34f80b04 891#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
892 WARN_ON(used < 0);
893 WARN_ON(used > fp->bp->tx_ring_size);
894 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 895#endif
a2fbb9ea 896
34f80b04 897 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
898}
899
7961f791 900static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
901{
902 struct bnx2x *bp = fp->bp;
555f6c78 903 struct netdev_queue *txq;
a2fbb9ea
ET
904 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
905 int done = 0;
906
907#ifdef BNX2X_STOP_ON_ERROR
908 if (unlikely(bp->panic))
909 return;
910#endif
911
555f6c78 912 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
913 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
914 sw_cons = fp->tx_pkt_cons;
915
916 while (sw_cons != hw_cons) {
917 u16 pkt_cons;
918
919 pkt_cons = TX_BD(sw_cons);
920
921 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
922
34f80b04 923 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
924 hw_cons, sw_cons, pkt_cons);
925
34f80b04 926/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
927 rmb();
928 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
929 }
930*/
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
932 sw_cons++;
933 done++;
a2fbb9ea
ET
934 }
935
936 fp->tx_pkt_cons = sw_cons;
937 fp->tx_bd_cons = bd_cons;
938
a2fbb9ea 939 /* TBD need a thresh? */
555f6c78 940 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 941
555f6c78 942 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 943
6044735d
EG
944 /* Need to make the tx_bd_cons update visible to start_xmit()
945 * before checking for netif_tx_queue_stopped(). Without the
946 * memory barrier, there is a small possibility that
947 * start_xmit() will miss it and cause the queue to be stopped
948 * forever.
949 */
950 smp_mb();
951
555f6c78 952 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 953 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 954 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 955 netif_tx_wake_queue(txq);
a2fbb9ea 956
555f6c78 957 __netif_tx_unlock(txq);
a2fbb9ea
ET
958 }
959}
960
3196a88a 961
a2fbb9ea
ET
962static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
963 union eth_rx_cqe *rr_cqe)
964{
965 struct bnx2x *bp = fp->bp;
966 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
967 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
968
34f80b04 969 DP(BNX2X_MSG_SP,
a2fbb9ea 970 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 971 fp->index, cid, command, bp->state,
34f80b04 972 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
973
974 bp->spq_left++;
975
0626b899 976 if (fp->index) {
a2fbb9ea
ET
977 switch (command | fp->state) {
978 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
979 BNX2X_FP_STATE_OPENING):
980 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
981 cid);
982 fp->state = BNX2X_FP_STATE_OPEN;
983 break;
984
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
986 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
987 cid);
988 fp->state = BNX2X_FP_STATE_HALTED;
989 break;
990
991 default:
34f80b04
EG
992 BNX2X_ERR("unexpected MC reply (%d) "
993 "fp->state is %x\n", command, fp->state);
994 break;
a2fbb9ea 995 }
34f80b04 996 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
997 return;
998 }
c14423fe 999
a2fbb9ea
ET
1000 switch (command | bp->state) {
1001 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1002 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1003 bp->state = BNX2X_STATE_OPEN;
1004 break;
1005
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1007 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1008 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1009 fp->state = BNX2X_FP_STATE_HALTED;
1010 break;
1011
a2fbb9ea 1012 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1013 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1014 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1015 break;
1016
3196a88a 1017
a2fbb9ea 1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1019 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1020 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1021 bp->set_mac_pending = 0;
a2fbb9ea
ET
1022 break;
1023
49d66772 1024 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1025 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1026 break;
1027
a2fbb9ea 1028 default:
34f80b04 1029 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1030 command, bp->state);
34f80b04 1031 break;
a2fbb9ea 1032 }
34f80b04 1033 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1034}
1035
7a9b2557
VZ
1036static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1037 struct bnx2x_fastpath *fp, u16 index)
1038{
1039 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1040 struct page *page = sw_buf->page;
1041 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1042
1043 /* Skip "next page" elements */
1044 if (!page)
1045 return;
1046
1047 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1048 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1049 __free_pages(page, PAGES_PER_SGE_SHIFT);
1050
1051 sw_buf->page = NULL;
1052 sge->addr_hi = 0;
1053 sge->addr_lo = 0;
1054}
1055
1056static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1057 struct bnx2x_fastpath *fp, int last)
1058{
1059 int i;
1060
1061 for (i = 0; i < last; i++)
1062 bnx2x_free_rx_sge(bp, fp, i);
1063}
1064
1065static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1066 struct bnx2x_fastpath *fp, u16 index)
1067{
1068 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1071 dma_addr_t mapping;
1072
1073 if (unlikely(page == NULL))
1074 return -ENOMEM;
1075
4f40f2cb 1076 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1077 PCI_DMA_FROMDEVICE);
8d8bb39b 1078 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080 return -ENOMEM;
1081 }
1082
1083 sw_buf->page = page;
1084 pci_unmap_addr_set(sw_buf, mapping, mapping);
1085
1086 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1087 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1088
1089 return 0;
1090}
1091
a2fbb9ea
ET
1092static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1093 struct bnx2x_fastpath *fp, u16 index)
1094{
1095 struct sk_buff *skb;
1096 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1097 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1098 dma_addr_t mapping;
1099
1100 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1101 if (unlikely(skb == NULL))
1102 return -ENOMEM;
1103
437cf2f1 1104 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1105 PCI_DMA_FROMDEVICE);
8d8bb39b 1106 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1107 dev_kfree_skb(skb);
1108 return -ENOMEM;
1109 }
1110
1111 rx_buf->skb = skb;
1112 pci_unmap_addr_set(rx_buf, mapping, mapping);
1113
1114 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1115 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1116
1117 return 0;
1118}
1119
1120/* note that we are not allocating a new skb,
1121 * we are just moving one from cons to prod
1122 * we are not creating a new mapping,
1123 * so there is no need to check for dma_mapping_error().
1124 */
1125static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1126 struct sk_buff *skb, u16 cons, u16 prod)
1127{
1128 struct bnx2x *bp = fp->bp;
1129 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1130 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1131 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1132 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1133
1134 pci_dma_sync_single_for_device(bp->pdev,
1135 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1136 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1137
1138 prod_rx_buf->skb = cons_rx_buf->skb;
1139 pci_unmap_addr_set(prod_rx_buf, mapping,
1140 pci_unmap_addr(cons_rx_buf, mapping));
1141 *prod_bd = *cons_bd;
1142}
1143
7a9b2557
VZ
1144static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1145 u16 idx)
1146{
1147 u16 last_max = fp->last_max_sge;
1148
1149 if (SUB_S16(idx, last_max) > 0)
1150 fp->last_max_sge = idx;
1151}
1152
1153static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1154{
1155 int i, j;
1156
1157 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1158 int idx = RX_SGE_CNT * i - 1;
1159
1160 for (j = 0; j < 2; j++) {
1161 SGE_MASK_CLEAR_BIT(fp, idx);
1162 idx--;
1163 }
1164 }
1165}
1166
1167static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1168 struct eth_fast_path_rx_cqe *fp_cqe)
1169{
1170 struct bnx2x *bp = fp->bp;
4f40f2cb 1171 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1172 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1173 SGE_PAGE_SHIFT;
7a9b2557
VZ
1174 u16 last_max, last_elem, first_elem;
1175 u16 delta = 0;
1176 u16 i;
1177
1178 if (!sge_len)
1179 return;
1180
1181 /* First mark all used pages */
1182 for (i = 0; i < sge_len; i++)
1183 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1184
1185 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1186 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1187
1188 /* Here we assume that the last SGE index is the biggest */
1189 prefetch((void *)(fp->sge_mask));
1190 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1191
1192 last_max = RX_SGE(fp->last_max_sge);
1193 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1194 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1195
1196 /* If ring is not full */
1197 if (last_elem + 1 != first_elem)
1198 last_elem++;
1199
1200 /* Now update the prod */
1201 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1202 if (likely(fp->sge_mask[i]))
1203 break;
1204
1205 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1206 delta += RX_SGE_MASK_ELEM_SZ;
1207 }
1208
1209 if (delta > 0) {
1210 fp->rx_sge_prod += delta;
1211 /* clear page-end entries */
1212 bnx2x_clear_sge_mask_next_elems(fp);
1213 }
1214
1215 DP(NETIF_MSG_RX_STATUS,
1216 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1217 fp->last_max_sge, fp->rx_sge_prod);
1218}
1219
1220static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1221{
1222 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1223 memset(fp->sge_mask, 0xff,
1224 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1225
33471629
EG
1226 /* Clear the two last indices in the page to 1:
1227 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1228 hence will never be indicated and should be removed from
1229 the calculations. */
1230 bnx2x_clear_sge_mask_next_elems(fp);
1231}
1232
1233static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1234 struct sk_buff *skb, u16 cons, u16 prod)
1235{
1236 struct bnx2x *bp = fp->bp;
1237 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1238 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1239 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1240 dma_addr_t mapping;
1241
1242 /* move empty skb from pool to prod and map it */
1243 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1244 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1245 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1246 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1247
1248 /* move partial skb from cons to pool (don't unmap yet) */
1249 fp->tpa_pool[queue] = *cons_rx_buf;
1250
1251 /* mark bin state as start - print error if current state != stop */
1252 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1253 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1254
1255 fp->tpa_state[queue] = BNX2X_TPA_START;
1256
1257 /* point prod_bd to new skb */
1258 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1259 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1260
1261#ifdef BNX2X_STOP_ON_ERROR
1262 fp->tpa_queue_used |= (1 << queue);
1263#ifdef __powerpc64__
1264 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1265#else
1266 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1267#endif
1268 fp->tpa_queue_used);
1269#endif
1270}
1271
1272static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273 struct sk_buff *skb,
1274 struct eth_fast_path_rx_cqe *fp_cqe,
1275 u16 cqe_idx)
1276{
1277 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1278 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1279 u32 i, frag_len, frag_size, pages;
1280 int err;
1281 int j;
1282
1283 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1284 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1285
1286 /* This is needed in order to enable forwarding support */
1287 if (frag_size)
4f40f2cb 1288 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1289 max(frag_size, (u32)len_on_bd));
1290
1291#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1292 if (pages >
1293 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1294 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1295 pages, cqe_idx);
1296 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1297 fp_cqe->pkt_len, len_on_bd);
1298 bnx2x_panic();
1299 return -EINVAL;
1300 }
1301#endif
1302
1303 /* Run through the SGL and compose the fragmented skb */
1304 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1305 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1306
1307 /* FW gives the indices of the SGE as if the ring is an array
1308 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1309 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1310 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1311 old_rx_pg = *rx_pg;
1312
1313 /* If we fail to allocate a substitute page, we simply stop
1314 where we are and drop the whole packet */
1315 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1316 if (unlikely(err)) {
de832a55 1317 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1318 return err;
1319 }
1320
1321 /* Unmap the page as we r going to pass it to the stack */
1322 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1323 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1324
1325 /* Add one frag and update the appropriate fields in the skb */
1326 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1327
1328 skb->data_len += frag_len;
1329 skb->truesize += frag_len;
1330 skb->len += frag_len;
1331
1332 frag_size -= frag_len;
1333 }
1334
1335 return 0;
1336}
1337
1338static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1339 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1340 u16 cqe_idx)
1341{
1342 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1343 struct sk_buff *skb = rx_buf->skb;
1344 /* alloc new skb */
1345 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1346
1347 /* Unmap skb in the pool anyway, as we are going to change
1348 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1349 fails. */
1350 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1351 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1352
7a9b2557 1353 if (likely(new_skb)) {
66e855f3
YG
1354 /* fix ip xsum and give it to the stack */
1355 /* (no need to map the new skb) */
0c6671b0
EG
1356#ifdef BCM_VLAN
1357 int is_vlan_cqe =
1358 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1359 PARSING_FLAGS_VLAN);
1360 int is_not_hwaccel_vlan_cqe =
1361 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1362#endif
7a9b2557
VZ
1363
1364 prefetch(skb);
1365 prefetch(((char *)(skb)) + 128);
1366
7a9b2557
VZ
1367#ifdef BNX2X_STOP_ON_ERROR
1368 if (pad + len > bp->rx_buf_size) {
1369 BNX2X_ERR("skb_put is about to fail... "
1370 "pad %d len %d rx_buf_size %d\n",
1371 pad, len, bp->rx_buf_size);
1372 bnx2x_panic();
1373 return;
1374 }
1375#endif
1376
1377 skb_reserve(skb, pad);
1378 skb_put(skb, len);
1379
1380 skb->protocol = eth_type_trans(skb, bp->dev);
1381 skb->ip_summed = CHECKSUM_UNNECESSARY;
1382
1383 {
1384 struct iphdr *iph;
1385
1386 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1387#ifdef BCM_VLAN
1388 /* If there is no Rx VLAN offloading -
1389 take VLAN tag into an account */
1390 if (unlikely(is_not_hwaccel_vlan_cqe))
1391 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1392#endif
7a9b2557
VZ
1393 iph->check = 0;
1394 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1395 }
1396
1397 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1398 &cqe->fast_path_cqe, cqe_idx)) {
1399#ifdef BCM_VLAN
0c6671b0
EG
1400 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1401 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1402 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1403 le16_to_cpu(cqe->fast_path_cqe.
1404 vlan_tag));
1405 else
1406#endif
1407 netif_receive_skb(skb);
1408 } else {
1409 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1410 " - dropping packet!\n");
1411 dev_kfree_skb(skb);
1412 }
1413
7a9b2557
VZ
1414
1415 /* put new skb in bin */
1416 fp->tpa_pool[queue].skb = new_skb;
1417
1418 } else {
66e855f3 1419 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1420 DP(NETIF_MSG_RX_STATUS,
1421 "Failed to allocate new skb - dropping packet!\n");
de832a55 1422 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1423 }
1424
1425 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1426}
1427
1428static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1429 struct bnx2x_fastpath *fp,
1430 u16 bd_prod, u16 rx_comp_prod,
1431 u16 rx_sge_prod)
1432{
8d9c5f34 1433 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1434 int i;
1435
1436 /* Update producers */
1437 rx_prods.bd_prod = bd_prod;
1438 rx_prods.cqe_prod = rx_comp_prod;
1439 rx_prods.sge_prod = rx_sge_prod;
1440
58f4c4cf
EG
1441 /*
1442 * Make sure that the BD and SGE data is updated before updating the
1443 * producers since FW might read the BD/SGE right after the producer
1444 * is updated.
1445 * This is only applicable for weak-ordered memory model archs such
1446 * as IA-64. The following barrier is also mandatory since FW will
1447 * assumes BDs must have buffers.
1448 */
1449 wmb();
1450
8d9c5f34
EG
1451 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1452 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1453 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1454 ((u32 *)&rx_prods)[i]);
1455
58f4c4cf
EG
1456 mmiowb(); /* keep prod updates ordered */
1457
7a9b2557 1458 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1459 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1460 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1461}
1462
a2fbb9ea
ET
1463static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1464{
1465 struct bnx2x *bp = fp->bp;
34f80b04 1466 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1467 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1468 int rx_pkt = 0;
1469
1470#ifdef BNX2X_STOP_ON_ERROR
1471 if (unlikely(bp->panic))
1472 return 0;
1473#endif
1474
34f80b04
EG
1475 /* CQ "next element" is of the size of the regular element,
1476 that's why it's ok here */
a2fbb9ea
ET
1477 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1478 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1479 hw_comp_cons++;
1480
1481 bd_cons = fp->rx_bd_cons;
1482 bd_prod = fp->rx_bd_prod;
34f80b04 1483 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1484 sw_comp_cons = fp->rx_comp_cons;
1485 sw_comp_prod = fp->rx_comp_prod;
1486
1487 /* Memory barrier necessary as speculative reads of the rx
1488 * buffer can be ahead of the index in the status block
1489 */
1490 rmb();
1491
1492 DP(NETIF_MSG_RX_STATUS,
1493 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1494 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1495
1496 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1497 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1498 struct sk_buff *skb;
1499 union eth_rx_cqe *cqe;
34f80b04
EG
1500 u8 cqe_fp_flags;
1501 u16 len, pad;
a2fbb9ea
ET
1502
1503 comp_ring_cons = RCQ_BD(sw_comp_cons);
1504 bd_prod = RX_BD(bd_prod);
1505 bd_cons = RX_BD(bd_cons);
1506
1507 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1508 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1509
a2fbb9ea 1510 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1511 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1512 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1513 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1514 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1515 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1516
1517 /* is this a slowpath msg? */
34f80b04 1518 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1519 bnx2x_sp_event(fp, cqe);
1520 goto next_cqe;
1521
1522 /* this is an rx packet */
1523 } else {
1524 rx_buf = &fp->rx_buf_ring[bd_cons];
1525 skb = rx_buf->skb;
a2fbb9ea
ET
1526 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1527 pad = cqe->fast_path_cqe.placement_offset;
1528
7a9b2557
VZ
1529 /* If CQE is marked both TPA_START and TPA_END
1530 it is a non-TPA CQE */
1531 if ((!fp->disable_tpa) &&
1532 (TPA_TYPE(cqe_fp_flags) !=
1533 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1534 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1535
1536 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1537 DP(NETIF_MSG_RX_STATUS,
1538 "calling tpa_start on queue %d\n",
1539 queue);
1540
1541 bnx2x_tpa_start(fp, queue, skb,
1542 bd_cons, bd_prod);
1543 goto next_rx;
1544 }
1545
1546 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1547 DP(NETIF_MSG_RX_STATUS,
1548 "calling tpa_stop on queue %d\n",
1549 queue);
1550
1551 if (!BNX2X_RX_SUM_FIX(cqe))
1552 BNX2X_ERR("STOP on none TCP "
1553 "data\n");
1554
1555 /* This is a size of the linear data
1556 on this skb */
1557 len = le16_to_cpu(cqe->fast_path_cqe.
1558 len_on_bd);
1559 bnx2x_tpa_stop(bp, fp, queue, pad,
1560 len, cqe, comp_ring_cons);
1561#ifdef BNX2X_STOP_ON_ERROR
1562 if (bp->panic)
17cb4006 1563 return 0;
7a9b2557
VZ
1564#endif
1565
1566 bnx2x_update_sge_prod(fp,
1567 &cqe->fast_path_cqe);
1568 goto next_cqe;
1569 }
1570 }
1571
a2fbb9ea
ET
1572 pci_dma_sync_single_for_device(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
1574 pad + RX_COPY_THRESH,
1575 PCI_DMA_FROMDEVICE);
1576 prefetch(skb);
1577 prefetch(((char *)(skb)) + 128);
1578
1579 /* is this an error packet? */
34f80b04 1580 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1581 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1582 "ERROR flags %x rx packet %u\n",
1583 cqe_fp_flags, sw_comp_cons);
de832a55 1584 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1585 goto reuse_rx;
1586 }
1587
1588 /* Since we don't have a jumbo ring
1589 * copy small packets if mtu > 1500
1590 */
1591 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1592 (len <= RX_COPY_THRESH)) {
1593 struct sk_buff *new_skb;
1594
1595 new_skb = netdev_alloc_skb(bp->dev,
1596 len + pad);
1597 if (new_skb == NULL) {
1598 DP(NETIF_MSG_RX_ERR,
34f80b04 1599 "ERROR packet dropped "
a2fbb9ea 1600 "because of alloc failure\n");
de832a55 1601 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1602 goto reuse_rx;
1603 }
1604
1605 /* aligned copy */
1606 skb_copy_from_linear_data_offset(skb, pad,
1607 new_skb->data + pad, len);
1608 skb_reserve(new_skb, pad);
1609 skb_put(new_skb, len);
1610
1611 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612
1613 skb = new_skb;
1614
1615 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1616 pci_unmap_single(bp->pdev,
1617 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1618 bp->rx_buf_size,
a2fbb9ea
ET
1619 PCI_DMA_FROMDEVICE);
1620 skb_reserve(skb, pad);
1621 skb_put(skb, len);
1622
1623 } else {
1624 DP(NETIF_MSG_RX_ERR,
34f80b04 1625 "ERROR packet dropped because "
a2fbb9ea 1626 "of alloc failure\n");
de832a55 1627 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1628reuse_rx:
1629 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1630 goto next_rx;
1631 }
1632
1633 skb->protocol = eth_type_trans(skb, bp->dev);
1634
1635 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1636 if (bp->rx_csum) {
1adcd8be
EG
1637 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1638 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1639 else
de832a55 1640 fp->eth_q_stats.hw_csum_err++;
66e855f3 1641 }
a2fbb9ea
ET
1642 }
1643
748e5439 1644 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1645#ifdef BCM_VLAN
0c6671b0 1646 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1647 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1648 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1649 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1650 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1651 else
1652#endif
34f80b04 1653 netif_receive_skb(skb);
a2fbb9ea 1654
a2fbb9ea
ET
1655
1656next_rx:
1657 rx_buf->skb = NULL;
1658
1659 bd_cons = NEXT_RX_IDX(bd_cons);
1660 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1661 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1662 rx_pkt++;
a2fbb9ea
ET
1663next_cqe:
1664 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1665 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1666
34f80b04 1667 if (rx_pkt == budget)
a2fbb9ea
ET
1668 break;
1669 } /* while */
1670
1671 fp->rx_bd_cons = bd_cons;
34f80b04 1672 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1673 fp->rx_comp_cons = sw_comp_cons;
1674 fp->rx_comp_prod = sw_comp_prod;
1675
7a9b2557
VZ
1676 /* Update producers */
1677 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1678 fp->rx_sge_prod);
a2fbb9ea
ET
1679
1680 fp->rx_pkt += rx_pkt;
1681 fp->rx_calls++;
1682
1683 return rx_pkt;
1684}
1685
1686static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1687{
1688 struct bnx2x_fastpath *fp = fp_cookie;
1689 struct bnx2x *bp = fp->bp;
0626b899 1690 int index = fp->index;
a2fbb9ea 1691
da5a662a
VZ
1692 /* Return here if interrupt is disabled */
1693 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1694 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1695 return IRQ_HANDLED;
1696 }
1697
34f80b04 1698 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1699 index, fp->sb_id);
1700 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1701
1702#ifdef BNX2X_STOP_ON_ERROR
1703 if (unlikely(bp->panic))
1704 return IRQ_HANDLED;
1705#endif
1706
1707 prefetch(fp->rx_cons_sb);
1708 prefetch(fp->tx_cons_sb);
1709 prefetch(&fp->status_blk->c_status_block.status_block_index);
1710 prefetch(&fp->status_blk->u_status_block.status_block_index);
1711
288379f0 1712 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1713
a2fbb9ea
ET
1714 return IRQ_HANDLED;
1715}
1716
1717static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1718{
555f6c78 1719 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1720 u16 status = bnx2x_ack_int(bp);
34f80b04 1721 u16 mask;
a2fbb9ea 1722
34f80b04 1723 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1724 if (unlikely(status == 0)) {
1725 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1726 return IRQ_NONE;
1727 }
f5372251 1728 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1729
34f80b04 1730 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1731 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1732 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1733 return IRQ_HANDLED;
1734 }
1735
3196a88a
EG
1736#ifdef BNX2X_STOP_ON_ERROR
1737 if (unlikely(bp->panic))
1738 return IRQ_HANDLED;
1739#endif
1740
34f80b04
EG
1741 mask = 0x2 << bp->fp[0].sb_id;
1742 if (status & mask) {
a2fbb9ea
ET
1743 struct bnx2x_fastpath *fp = &bp->fp[0];
1744
1745 prefetch(fp->rx_cons_sb);
1746 prefetch(fp->tx_cons_sb);
1747 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748 prefetch(&fp->status_blk->u_status_block.status_block_index);
1749
288379f0 1750 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1751
34f80b04 1752 status &= ~mask;
a2fbb9ea
ET
1753 }
1754
a2fbb9ea 1755
34f80b04 1756 if (unlikely(status & 0x1)) {
1cf167f2 1757 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1758
1759 status &= ~0x1;
1760 if (!status)
1761 return IRQ_HANDLED;
1762 }
1763
34f80b04
EG
1764 if (status)
1765 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1766 status);
a2fbb9ea 1767
c18487ee 1768 return IRQ_HANDLED;
a2fbb9ea
ET
1769}
1770
c18487ee 1771/* end of fast path */
a2fbb9ea 1772
bb2a0f7a 1773static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1774
c18487ee
YR
1775/* Link */
1776
1777/*
1778 * General service functions
1779 */
a2fbb9ea 1780
4a37fb66 1781static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1782{
1783 u32 lock_status;
1784 u32 resource_bit = (1 << resource);
4a37fb66
YG
1785 int func = BP_FUNC(bp);
1786 u32 hw_lock_control_reg;
c18487ee 1787 int cnt;
a2fbb9ea 1788
c18487ee
YR
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791 DP(NETIF_MSG_HW,
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794 return -EINVAL;
1795 }
a2fbb9ea 1796
4a37fb66
YG
1797 if (func <= 5) {
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799 } else {
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802 }
1803
c18487ee 1804 /* Validating that the resource is not already taken */
4a37fb66 1805 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1806 if (lock_status & resource_bit) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1809 return -EEXIST;
1810 }
a2fbb9ea 1811
46230476
EG
1812 /* Try for 5 second every 5ms */
1813 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1814 /* Try to acquire the lock */
4a37fb66
YG
1815 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1816 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1817 if (lock_status & resource_bit)
1818 return 0;
a2fbb9ea 1819
c18487ee 1820 msleep(5);
a2fbb9ea 1821 }
c18487ee
YR
1822 DP(NETIF_MSG_HW, "Timeout\n");
1823 return -EAGAIN;
1824}
a2fbb9ea 1825
4a37fb66 1826static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1827{
1828 u32 lock_status;
1829 u32 resource_bit = (1 << resource);
4a37fb66
YG
1830 int func = BP_FUNC(bp);
1831 u32 hw_lock_control_reg;
a2fbb9ea 1832
c18487ee
YR
1833 /* Validating that the resource is within range */
1834 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1835 DP(NETIF_MSG_HW,
1836 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1837 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1838 return -EINVAL;
1839 }
1840
4a37fb66
YG
1841 if (func <= 5) {
1842 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1843 } else {
1844 hw_lock_control_reg =
1845 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1846 }
1847
c18487ee 1848 /* Validating that the resource is currently taken */
4a37fb66 1849 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1850 if (!(lock_status & resource_bit)) {
1851 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1852 lock_status, resource_bit);
1853 return -EFAULT;
a2fbb9ea
ET
1854 }
1855
4a37fb66 1856 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1857 return 0;
1858}
1859
1860/* HW Lock for shared dual port PHYs */
4a37fb66 1861static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1862{
34f80b04 1863 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1864
46c6a674
EG
1865 if (bp->port.need_hw_lock)
1866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1867}
a2fbb9ea 1868
4a37fb66 1869static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1870{
46c6a674
EG
1871 if (bp->port.need_hw_lock)
1872 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1873
34f80b04 1874 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1875}
a2fbb9ea 1876
4acac6a5
EG
1877int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1878{
1879 /* The GPIO should be swapped if swap register is set and active */
1880 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1881 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1882 int gpio_shift = gpio_num +
1883 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1884 u32 gpio_mask = (1 << gpio_shift);
1885 u32 gpio_reg;
1886 int value;
1887
1888 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1889 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1890 return -EINVAL;
1891 }
1892
1893 /* read GPIO value */
1894 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1895
1896 /* get the requested pin value */
1897 if ((gpio_reg & gpio_mask) == gpio_mask)
1898 value = 1;
1899 else
1900 value = 0;
1901
1902 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1903
1904 return value;
1905}
1906
17de50b7 1907int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1908{
1909 /* The GPIO should be swapped if swap register is set and active */
1910 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1911 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1912 int gpio_shift = gpio_num +
1913 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1914 u32 gpio_mask = (1 << gpio_shift);
1915 u32 gpio_reg;
a2fbb9ea 1916
c18487ee
YR
1917 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1918 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1919 return -EINVAL;
1920 }
a2fbb9ea 1921
4a37fb66 1922 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1923 /* read GPIO and mask except the float bits */
1924 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1925
c18487ee
YR
1926 switch (mode) {
1927 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1928 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1929 gpio_num, gpio_shift);
1930 /* clear FLOAT and set CLR */
1931 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1932 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1933 break;
a2fbb9ea 1934
c18487ee
YR
1935 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1936 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1937 gpio_num, gpio_shift);
1938 /* clear FLOAT and set SET */
1939 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1940 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1941 break;
a2fbb9ea 1942
17de50b7 1943 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1944 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1945 gpio_num, gpio_shift);
1946 /* set FLOAT */
1947 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1948 break;
a2fbb9ea 1949
c18487ee
YR
1950 default:
1951 break;
a2fbb9ea
ET
1952 }
1953
c18487ee 1954 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1955 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1956
c18487ee 1957 return 0;
a2fbb9ea
ET
1958}
1959
4acac6a5
EG
1960int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1961{
1962 /* The GPIO should be swapped if swap register is set and active */
1963 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1964 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1965 int gpio_shift = gpio_num +
1966 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1967 u32 gpio_mask = (1 << gpio_shift);
1968 u32 gpio_reg;
1969
1970 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1971 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1972 return -EINVAL;
1973 }
1974
1975 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1976 /* read GPIO int */
1977 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1978
1979 switch (mode) {
1980 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1981 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1982 "output low\n", gpio_num, gpio_shift);
1983 /* clear SET and set CLR */
1984 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1985 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1986 break;
1987
1988 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1989 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1990 "output high\n", gpio_num, gpio_shift);
1991 /* clear CLR and set SET */
1992 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1993 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1994 break;
1995
1996 default:
1997 break;
1998 }
1999
2000 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2001 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2002
2003 return 0;
2004}
2005
c18487ee 2006static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2007{
c18487ee
YR
2008 u32 spio_mask = (1 << spio_num);
2009 u32 spio_reg;
a2fbb9ea 2010
c18487ee
YR
2011 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2012 (spio_num > MISC_REGISTERS_SPIO_7)) {
2013 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2014 return -EINVAL;
a2fbb9ea
ET
2015 }
2016
4a37fb66 2017 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2018 /* read SPIO and mask except the float bits */
2019 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2020
c18487ee 2021 switch (mode) {
6378c025 2022 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2023 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2024 /* clear FLOAT and set CLR */
2025 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2027 break;
a2fbb9ea 2028
6378c025 2029 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2030 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2031 /* clear FLOAT and set SET */
2032 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2033 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2034 break;
a2fbb9ea 2035
c18487ee
YR
2036 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2037 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2038 /* set FLOAT */
2039 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2040 break;
a2fbb9ea 2041
c18487ee
YR
2042 default:
2043 break;
a2fbb9ea
ET
2044 }
2045
c18487ee 2046 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2047 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2048
a2fbb9ea
ET
2049 return 0;
2050}
2051
c18487ee 2052static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2053{
ad33ea3a
EG
2054 switch (bp->link_vars.ieee_fc &
2055 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2056 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2057 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2058 ADVERTISED_Pause);
2059 break;
356e2385 2060
c18487ee 2061 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2062 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2063 ADVERTISED_Pause);
2064 break;
356e2385 2065
c18487ee 2066 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2067 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2068 break;
356e2385 2069
c18487ee 2070 default:
34f80b04 2071 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2072 ADVERTISED_Pause);
2073 break;
2074 }
2075}
f1410647 2076
c18487ee
YR
2077static void bnx2x_link_report(struct bnx2x *bp)
2078{
2079 if (bp->link_vars.link_up) {
2080 if (bp->state == BNX2X_STATE_OPEN)
2081 netif_carrier_on(bp->dev);
2082 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2083
c18487ee 2084 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2085
c18487ee
YR
2086 if (bp->link_vars.duplex == DUPLEX_FULL)
2087 printk("full duplex");
2088 else
2089 printk("half duplex");
f1410647 2090
c0700f90
DM
2091 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2092 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2093 printk(", receive ");
356e2385
EG
2094 if (bp->link_vars.flow_ctrl &
2095 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2096 printk("& transmit ");
2097 } else {
2098 printk(", transmit ");
2099 }
2100 printk("flow control ON");
2101 }
2102 printk("\n");
f1410647 2103
c18487ee
YR
2104 } else { /* link_down */
2105 netif_carrier_off(bp->dev);
2106 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2107 }
c18487ee
YR
2108}
2109
b5bf9068 2110static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2111{
19680c48
EG
2112 if (!BP_NOMCP(bp)) {
2113 u8 rc;
a2fbb9ea 2114
19680c48 2115 /* Initialize link parameters structure variables */
8c99e7b0
YR
2116 /* It is recommended to turn off RX FC for jumbo frames
2117 for better performance */
2118 if (IS_E1HMF(bp))
c0700f90 2119 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2120 else if (bp->dev->mtu > 5000)
c0700f90 2121 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2122 else
c0700f90 2123 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2124
4a37fb66 2125 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2126
2127 if (load_mode == LOAD_DIAG)
2128 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2129
19680c48 2130 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2131
4a37fb66 2132 bnx2x_release_phy_lock(bp);
a2fbb9ea 2133
3c96c68b
EG
2134 bnx2x_calc_fc_adv(bp);
2135
b5bf9068
EG
2136 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2137 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2138 bnx2x_link_report(bp);
b5bf9068 2139 }
34f80b04 2140
19680c48
EG
2141 return rc;
2142 }
f5372251 2143 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2144 return -EINVAL;
a2fbb9ea
ET
2145}
2146
c18487ee 2147static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2148{
19680c48 2149 if (!BP_NOMCP(bp)) {
4a37fb66 2150 bnx2x_acquire_phy_lock(bp);
19680c48 2151 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2152 bnx2x_release_phy_lock(bp);
a2fbb9ea 2153
19680c48
EG
2154 bnx2x_calc_fc_adv(bp);
2155 } else
f5372251 2156 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2157}
a2fbb9ea 2158
c18487ee
YR
2159static void bnx2x__link_reset(struct bnx2x *bp)
2160{
19680c48 2161 if (!BP_NOMCP(bp)) {
4a37fb66 2162 bnx2x_acquire_phy_lock(bp);
589abe3a 2163 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2164 bnx2x_release_phy_lock(bp);
19680c48 2165 } else
f5372251 2166 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2167}
a2fbb9ea 2168
c18487ee
YR
2169static u8 bnx2x_link_test(struct bnx2x *bp)
2170{
2171 u8 rc;
a2fbb9ea 2172
4a37fb66 2173 bnx2x_acquire_phy_lock(bp);
c18487ee 2174 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2175 bnx2x_release_phy_lock(bp);
a2fbb9ea 2176
c18487ee
YR
2177 return rc;
2178}
a2fbb9ea 2179
8a1c38d1 2180static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2181{
8a1c38d1
EG
2182 u32 r_param = bp->link_vars.line_speed / 8;
2183 u32 fair_periodic_timeout_usec;
2184 u32 t_fair;
34f80b04 2185
8a1c38d1
EG
2186 memset(&(bp->cmng.rs_vars), 0,
2187 sizeof(struct rate_shaping_vars_per_port));
2188 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2189
8a1c38d1
EG
2190 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2191 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2192
8a1c38d1
EG
2193 /* this is the threshold below which no timer arming will occur
2194 1.25 coefficient is for the threshold to be a little bigger
2195 than the real time, to compensate for timer in-accuracy */
2196 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2197 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2198
8a1c38d1
EG
2199 /* resolution of fairness timer */
2200 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2201 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2202 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2203
8a1c38d1
EG
2204 /* this is the threshold below which we won't arm the timer anymore */
2205 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2206
8a1c38d1
EG
2207 /* we multiply by 1e3/8 to get bytes/msec.
2208 We don't want the credits to pass a credit
2209 of the t_fair*FAIR_MEM (algorithm resolution) */
2210 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2211 /* since each tick is 4 usec */
2212 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2213}
2214
8a1c38d1 2215static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2216{
2217 struct rate_shaping_vars_per_vn m_rs_vn;
2218 struct fairness_vars_per_vn m_fair_vn;
2219 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2220 u16 vn_min_rate, vn_max_rate;
2221 int i;
2222
2223 /* If function is hidden - set min and max to zeroes */
2224 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2225 vn_min_rate = 0;
2226 vn_max_rate = 0;
2227
2228 } else {
2229 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2230 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2231 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2232 if current min rate is zero - set it to 1.
33471629 2233 This is a requirement of the algorithm. */
8a1c38d1 2234 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2235 vn_min_rate = DEF_MIN_RATE;
2236 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2237 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2238 }
2239
8a1c38d1
EG
2240 DP(NETIF_MSG_IFUP,
2241 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2242 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2243
2244 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2245 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2246
2247 /* global vn counter - maximal Mbps for this vn */
2248 m_rs_vn.vn_counter.rate = vn_max_rate;
2249
2250 /* quota - number of bytes transmitted in this period */
2251 m_rs_vn.vn_counter.quota =
2252 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2253
8a1c38d1 2254 if (bp->vn_weight_sum) {
34f80b04
EG
2255 /* credit for each period of the fairness algorithm:
2256 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2257 vn_weight_sum should not be larger than 10000, thus
2258 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2259 than zero */
34f80b04 2260 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2261 max((u32)(vn_min_rate * (T_FAIR_COEF /
2262 (8 * bp->vn_weight_sum))),
2263 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2264 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2265 m_fair_vn.vn_credit_delta);
2266 }
2267
34f80b04
EG
2268 /* Store it to internal memory */
2269 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2270 REG_WR(bp, BAR_XSTRORM_INTMEM +
2271 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2272 ((u32 *)(&m_rs_vn))[i]);
2273
2274 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2275 REG_WR(bp, BAR_XSTRORM_INTMEM +
2276 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2277 ((u32 *)(&m_fair_vn))[i]);
2278}
2279
8a1c38d1 2280
c18487ee
YR
2281/* This function is called upon link interrupt */
2282static void bnx2x_link_attn(struct bnx2x *bp)
2283{
bb2a0f7a
YG
2284 /* Make sure that we are synced with the current statistics */
2285 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2286
c18487ee 2287 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2288
bb2a0f7a
YG
2289 if (bp->link_vars.link_up) {
2290
1c06328c
EG
2291 /* dropless flow control */
2292 if (CHIP_IS_E1H(bp)) {
2293 int port = BP_PORT(bp);
2294 u32 pause_enabled = 0;
2295
2296 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2297 pause_enabled = 1;
2298
2299 REG_WR(bp, BAR_USTRORM_INTMEM +
2300 USTORM_PAUSE_ENABLED_OFFSET(port),
2301 pause_enabled);
2302 }
2303
bb2a0f7a
YG
2304 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2305 struct host_port_stats *pstats;
2306
2307 pstats = bnx2x_sp(bp, port_stats);
2308 /* reset old bmac stats */
2309 memset(&(pstats->mac_stx[0]), 0,
2310 sizeof(struct mac_stx));
2311 }
2312 if ((bp->state == BNX2X_STATE_OPEN) ||
2313 (bp->state == BNX2X_STATE_DISABLED))
2314 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2315 }
2316
c18487ee
YR
2317 /* indicate link status */
2318 bnx2x_link_report(bp);
34f80b04
EG
2319
2320 if (IS_E1HMF(bp)) {
8a1c38d1 2321 int port = BP_PORT(bp);
34f80b04 2322 int func;
8a1c38d1 2323 int vn;
34f80b04
EG
2324
2325 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2326 if (vn == BP_E1HVN(bp))
2327 continue;
2328
8a1c38d1 2329 func = ((vn << 1) | port);
34f80b04
EG
2330
2331 /* Set the attention towards other drivers
2332 on the same port */
2333 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2334 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2335 }
34f80b04 2336
8a1c38d1
EG
2337 if (bp->link_vars.link_up) {
2338 int i;
2339
2340 /* Init rate shaping and fairness contexts */
2341 bnx2x_init_port_minmax(bp);
34f80b04 2342
34f80b04 2343 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2344 bnx2x_init_vn_minmax(bp, 2*vn + port);
2345
2346 /* Store it to internal memory */
2347 for (i = 0;
2348 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2349 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2351 ((u32 *)(&bp->cmng))[i]);
2352 }
34f80b04 2353 }
c18487ee 2354}
a2fbb9ea 2355
c18487ee
YR
2356static void bnx2x__link_status_update(struct bnx2x *bp)
2357{
2358 if (bp->state != BNX2X_STATE_OPEN)
2359 return;
a2fbb9ea 2360
c18487ee 2361 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2362
bb2a0f7a
YG
2363 if (bp->link_vars.link_up)
2364 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2365 else
2366 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2367
c18487ee
YR
2368 /* indicate link status */
2369 bnx2x_link_report(bp);
a2fbb9ea 2370}
a2fbb9ea 2371
34f80b04
EG
2372static void bnx2x_pmf_update(struct bnx2x *bp)
2373{
2374 int port = BP_PORT(bp);
2375 u32 val;
2376
2377 bp->port.pmf = 1;
2378 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2379
2380 /* enable nig attention */
2381 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2382 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2383 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2384
2385 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2386}
2387
c18487ee 2388/* end of Link */
a2fbb9ea
ET
2389
2390/* slow path */
2391
2392/*
2393 * General service functions
2394 */
2395
2396/* the slow path queue is odd since completions arrive on the fastpath ring */
2397static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2398 u32 data_hi, u32 data_lo, int common)
2399{
34f80b04 2400 int func = BP_FUNC(bp);
a2fbb9ea 2401
34f80b04
EG
2402 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2403 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2404 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2405 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2406 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2407
2408#ifdef BNX2X_STOP_ON_ERROR
2409 if (unlikely(bp->panic))
2410 return -EIO;
2411#endif
2412
34f80b04 2413 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2414
2415 if (!bp->spq_left) {
2416 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2417 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2418 bnx2x_panic();
2419 return -EBUSY;
2420 }
f1410647 2421
a2fbb9ea
ET
2422 /* CID needs port number to be encoded int it */
2423 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2424 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2425 HW_CID(bp, cid)));
2426 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2427 if (common)
2428 bp->spq_prod_bd->hdr.type |=
2429 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2430
2431 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2432 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2433
2434 bp->spq_left--;
2435
2436 if (bp->spq_prod_bd == bp->spq_last_bd) {
2437 bp->spq_prod_bd = bp->spq;
2438 bp->spq_prod_idx = 0;
2439 DP(NETIF_MSG_TIMER, "end of spq\n");
2440
2441 } else {
2442 bp->spq_prod_bd++;
2443 bp->spq_prod_idx++;
2444 }
2445
37dbbf32
EG
2446 /* Make sure that BD data is updated before writing the producer */
2447 wmb();
2448
34f80b04 2449 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2450 bp->spq_prod_idx);
2451
37dbbf32
EG
2452 mmiowb();
2453
34f80b04 2454 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2455 return 0;
2456}
2457
2458/* acquire split MCP access lock register */
4a37fb66 2459static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2460{
a2fbb9ea 2461 u32 i, j, val;
34f80b04 2462 int rc = 0;
a2fbb9ea
ET
2463
2464 might_sleep();
2465 i = 100;
2466 for (j = 0; j < i*10; j++) {
2467 val = (1UL << 31);
2468 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2469 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2470 if (val & (1L << 31))
2471 break;
2472
2473 msleep(5);
2474 }
a2fbb9ea 2475 if (!(val & (1L << 31))) {
19680c48 2476 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2477 rc = -EBUSY;
2478 }
2479
2480 return rc;
2481}
2482
4a37fb66
YG
2483/* release split MCP access lock register */
2484static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2485{
2486 u32 val = 0;
2487
2488 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2489}
2490
2491static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2492{
2493 struct host_def_status_block *def_sb = bp->def_status_blk;
2494 u16 rc = 0;
2495
2496 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2497 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2498 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2499 rc |= 1;
2500 }
2501 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2502 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2503 rc |= 2;
2504 }
2505 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2506 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2507 rc |= 4;
2508 }
2509 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2510 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2511 rc |= 8;
2512 }
2513 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2514 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2515 rc |= 16;
2516 }
2517 return rc;
2518}
2519
2520/*
2521 * slow path service functions
2522 */
2523
2524static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2525{
34f80b04 2526 int port = BP_PORT(bp);
5c862848
EG
2527 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2528 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2529 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2530 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2531 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2532 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2533 u32 aeu_mask;
87942b46 2534 u32 nig_mask = 0;
a2fbb9ea 2535
a2fbb9ea
ET
2536 if (bp->attn_state & asserted)
2537 BNX2X_ERR("IGU ERROR\n");
2538
3fcaf2e5
EG
2539 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540 aeu_mask = REG_RD(bp, aeu_addr);
2541
a2fbb9ea 2542 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2543 aeu_mask, asserted);
2544 aeu_mask &= ~(asserted & 0xff);
2545 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2546
3fcaf2e5
EG
2547 REG_WR(bp, aeu_addr, aeu_mask);
2548 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2549
3fcaf2e5 2550 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2551 bp->attn_state |= asserted;
3fcaf2e5 2552 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2553
2554 if (asserted & ATTN_HARD_WIRED_MASK) {
2555 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2556
a5e9a7cf
EG
2557 bnx2x_acquire_phy_lock(bp);
2558
877e9aa4 2559 /* save nig interrupt mask */
87942b46 2560 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2561 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2562
c18487ee 2563 bnx2x_link_attn(bp);
a2fbb9ea
ET
2564
2565 /* handle unicore attn? */
2566 }
2567 if (asserted & ATTN_SW_TIMER_4_FUNC)
2568 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2569
2570 if (asserted & GPIO_2_FUNC)
2571 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2572
2573 if (asserted & GPIO_3_FUNC)
2574 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2575
2576 if (asserted & GPIO_4_FUNC)
2577 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2578
2579 if (port == 0) {
2580 if (asserted & ATTN_GENERAL_ATTN_1) {
2581 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2582 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2583 }
2584 if (asserted & ATTN_GENERAL_ATTN_2) {
2585 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2586 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2587 }
2588 if (asserted & ATTN_GENERAL_ATTN_3) {
2589 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2590 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2591 }
2592 } else {
2593 if (asserted & ATTN_GENERAL_ATTN_4) {
2594 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2595 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2596 }
2597 if (asserted & ATTN_GENERAL_ATTN_5) {
2598 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2599 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2600 }
2601 if (asserted & ATTN_GENERAL_ATTN_6) {
2602 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2604 }
2605 }
2606
2607 } /* if hardwired */
2608
5c862848
EG
2609 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2610 asserted, hc_addr);
2611 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2612
2613 /* now set back the mask */
a5e9a7cf 2614 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2615 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2616 bnx2x_release_phy_lock(bp);
2617 }
a2fbb9ea
ET
2618}
2619
fd4ef40d
EG
2620static inline void bnx2x_fan_failure(struct bnx2x *bp)
2621{
2622 int port = BP_PORT(bp);
2623
2624 /* mark the failure */
2625 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2626 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2628 bp->link_params.ext_phy_config);
2629
2630 /* log the failure */
2631 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2632 " the driver to shutdown the card to prevent permanent"
2633 " damage. Please contact Dell Support for assistance\n",
2634 bp->dev->name);
2635}
877e9aa4 2636static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2637{
34f80b04 2638 int port = BP_PORT(bp);
877e9aa4 2639 int reg_offset;
4d295db0 2640 u32 val, swap_val, swap_override;
877e9aa4 2641
34f80b04
EG
2642 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2643 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2644
34f80b04 2645 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2646
2647 val = REG_RD(bp, reg_offset);
2648 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2649 REG_WR(bp, reg_offset, val);
2650
2651 BNX2X_ERR("SPIO5 hw attention\n");
2652
fd4ef40d 2653 /* Fan failure attention */
35b19ba5
EG
2654 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2655 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2656 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2657 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2658 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2659 /* The PHY reset is controlled by GPIO 1 */
2660 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2661 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2662 break;
2663
4d295db0
EG
2664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2665 /* The PHY reset is controlled by GPIO 1 */
2666 /* fake the port number to cancel the swap done in
2667 set_gpio() */
2668 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2669 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2670 port = (swap_val && swap_override) ^ 1;
2671 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2672 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2673 break;
2674
877e9aa4
ET
2675 default:
2676 break;
2677 }
fd4ef40d 2678 bnx2x_fan_failure(bp);
877e9aa4 2679 }
34f80b04 2680
589abe3a
EG
2681 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2682 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2683 bnx2x_acquire_phy_lock(bp);
2684 bnx2x_handle_module_detect_int(&bp->link_params);
2685 bnx2x_release_phy_lock(bp);
2686 }
2687
34f80b04
EG
2688 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2689
2690 val = REG_RD(bp, reg_offset);
2691 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2692 REG_WR(bp, reg_offset, val);
2693
2694 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2695 (attn & HW_INTERRUT_ASSERT_SET_0));
2696 bnx2x_panic();
2697 }
877e9aa4
ET
2698}
2699
2700static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2701{
2702 u32 val;
2703
0626b899 2704 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2705
2706 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2707 BNX2X_ERR("DB hw attention 0x%x\n", val);
2708 /* DORQ discard attention */
2709 if (val & 0x2)
2710 BNX2X_ERR("FATAL error from DORQ\n");
2711 }
34f80b04
EG
2712
2713 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2714
2715 int port = BP_PORT(bp);
2716 int reg_offset;
2717
2718 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2719 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2720
2721 val = REG_RD(bp, reg_offset);
2722 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2723 REG_WR(bp, reg_offset, val);
2724
2725 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2726 (attn & HW_INTERRUT_ASSERT_SET_1));
2727 bnx2x_panic();
2728 }
877e9aa4
ET
2729}
2730
2731static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2732{
2733 u32 val;
2734
2735 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2736
2737 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2738 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2739 /* CFC error attention */
2740 if (val & 0x2)
2741 BNX2X_ERR("FATAL error from CFC\n");
2742 }
2743
2744 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2745
2746 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2747 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2748 /* RQ_USDMDP_FIFO_OVERFLOW */
2749 if (val & 0x18000)
2750 BNX2X_ERR("FATAL error from PXP\n");
2751 }
34f80b04
EG
2752
2753 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2754
2755 int port = BP_PORT(bp);
2756 int reg_offset;
2757
2758 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2759 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2760
2761 val = REG_RD(bp, reg_offset);
2762 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2763 REG_WR(bp, reg_offset, val);
2764
2765 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2766 (attn & HW_INTERRUT_ASSERT_SET_2));
2767 bnx2x_panic();
2768 }
877e9aa4
ET
2769}
2770
2771static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2772{
34f80b04
EG
2773 u32 val;
2774
877e9aa4
ET
2775 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2776
34f80b04
EG
2777 if (attn & BNX2X_PMF_LINK_ASSERT) {
2778 int func = BP_FUNC(bp);
2779
2780 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2781 bnx2x__link_status_update(bp);
2782 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2783 DRV_STATUS_PMF)
2784 bnx2x_pmf_update(bp);
2785
2786 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2787
2788 BNX2X_ERR("MC assert!\n");
2789 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2790 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2791 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2792 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2793 bnx2x_panic();
2794
2795 } else if (attn & BNX2X_MCP_ASSERT) {
2796
2797 BNX2X_ERR("MCP assert!\n");
2798 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2799 bnx2x_fw_dump(bp);
877e9aa4
ET
2800
2801 } else
2802 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2803 }
2804
2805 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2806 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2807 if (attn & BNX2X_GRC_TIMEOUT) {
2808 val = CHIP_IS_E1H(bp) ?
2809 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2810 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2811 }
2812 if (attn & BNX2X_GRC_RSV) {
2813 val = CHIP_IS_E1H(bp) ?
2814 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2815 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2816 }
877e9aa4 2817 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2818 }
2819}
2820
2821static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2822{
a2fbb9ea
ET
2823 struct attn_route attn;
2824 struct attn_route group_mask;
34f80b04 2825 int port = BP_PORT(bp);
877e9aa4 2826 int index;
a2fbb9ea
ET
2827 u32 reg_addr;
2828 u32 val;
3fcaf2e5 2829 u32 aeu_mask;
a2fbb9ea
ET
2830
2831 /* need to take HW lock because MCP or other port might also
2832 try to handle this event */
4a37fb66 2833 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2834
2835 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2836 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2837 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2838 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2839 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2840 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2841
2842 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2843 if (deasserted & (1 << index)) {
2844 group_mask = bp->attn_group[index];
2845
34f80b04
EG
2846 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2847 index, group_mask.sig[0], group_mask.sig[1],
2848 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2849
877e9aa4
ET
2850 bnx2x_attn_int_deasserted3(bp,
2851 attn.sig[3] & group_mask.sig[3]);
2852 bnx2x_attn_int_deasserted1(bp,
2853 attn.sig[1] & group_mask.sig[1]);
2854 bnx2x_attn_int_deasserted2(bp,
2855 attn.sig[2] & group_mask.sig[2]);
2856 bnx2x_attn_int_deasserted0(bp,
2857 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2858
a2fbb9ea
ET
2859 if ((attn.sig[0] & group_mask.sig[0] &
2860 HW_PRTY_ASSERT_SET_0) ||
2861 (attn.sig[1] & group_mask.sig[1] &
2862 HW_PRTY_ASSERT_SET_1) ||
2863 (attn.sig[2] & group_mask.sig[2] &
2864 HW_PRTY_ASSERT_SET_2))
6378c025 2865 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2866 }
2867 }
2868
4a37fb66 2869 bnx2x_release_alr(bp);
a2fbb9ea 2870
5c862848 2871 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2872
2873 val = ~deasserted;
3fcaf2e5
EG
2874 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2875 val, reg_addr);
5c862848 2876 REG_WR(bp, reg_addr, val);
a2fbb9ea 2877
a2fbb9ea 2878 if (~bp->attn_state & deasserted)
3fcaf2e5 2879 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2880
2881 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2882 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2883
3fcaf2e5
EG
2884 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2885 aeu_mask = REG_RD(bp, reg_addr);
2886
2887 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2888 aeu_mask, deasserted);
2889 aeu_mask |= (deasserted & 0xff);
2890 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2891
3fcaf2e5
EG
2892 REG_WR(bp, reg_addr, aeu_mask);
2893 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2894
2895 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2896 bp->attn_state &= ~deasserted;
2897 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2898}
2899
2900static void bnx2x_attn_int(struct bnx2x *bp)
2901{
2902 /* read local copy of bits */
68d59484
EG
2903 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2904 attn_bits);
2905 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2906 attn_bits_ack);
a2fbb9ea
ET
2907 u32 attn_state = bp->attn_state;
2908
2909 /* look for changed bits */
2910 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2911 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2912
2913 DP(NETIF_MSG_HW,
2914 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2915 attn_bits, attn_ack, asserted, deasserted);
2916
2917 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2918 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2919
2920 /* handle bits that were raised */
2921 if (asserted)
2922 bnx2x_attn_int_asserted(bp, asserted);
2923
2924 if (deasserted)
2925 bnx2x_attn_int_deasserted(bp, deasserted);
2926}
2927
2928static void bnx2x_sp_task(struct work_struct *work)
2929{
1cf167f2 2930 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2931 u16 status;
2932
34f80b04 2933
a2fbb9ea
ET
2934 /* Return here if interrupt is disabled */
2935 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2936 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2937 return;
2938 }
2939
2940 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2941/* if (status == 0) */
2942/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2943
3196a88a 2944 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2945
877e9aa4
ET
2946 /* HW attentions */
2947 if (status & 0x1)
a2fbb9ea 2948 bnx2x_attn_int(bp);
a2fbb9ea 2949
68d59484 2950 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2951 IGU_INT_NOP, 1);
2952 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2953 IGU_INT_NOP, 1);
2954 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2955 IGU_INT_NOP, 1);
2956 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2957 IGU_INT_NOP, 1);
2958 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2959 IGU_INT_ENABLE, 1);
877e9aa4 2960
a2fbb9ea
ET
2961}
2962
2963static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2964{
2965 struct net_device *dev = dev_instance;
2966 struct bnx2x *bp = netdev_priv(dev);
2967
2968 /* Return here if interrupt is disabled */
2969 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2970 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2971 return IRQ_HANDLED;
2972 }
2973
8d9c5f34 2974 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2975
2976#ifdef BNX2X_STOP_ON_ERROR
2977 if (unlikely(bp->panic))
2978 return IRQ_HANDLED;
2979#endif
2980
1cf167f2 2981 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2982
2983 return IRQ_HANDLED;
2984}
2985
2986/* end of slow path */
2987
2988/* Statistics */
2989
2990/****************************************************************************
2991* Macros
2992****************************************************************************/
2993
a2fbb9ea
ET
2994/* sum[hi:lo] += add[hi:lo] */
2995#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2996 do { \
2997 s_lo += a_lo; \
f5ba6772 2998 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2999 } while (0)
3000
3001/* difference = minuend - subtrahend */
3002#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3003 do { \
bb2a0f7a
YG
3004 if (m_lo < s_lo) { \
3005 /* underflow */ \
a2fbb9ea 3006 d_hi = m_hi - s_hi; \
bb2a0f7a 3007 if (d_hi > 0) { \
6378c025 3008 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3009 d_hi--; \
3010 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3011 } else { \
6378c025 3012 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3013 d_hi = 0; \
3014 d_lo = 0; \
3015 } \
bb2a0f7a
YG
3016 } else { \
3017 /* m_lo >= s_lo */ \
a2fbb9ea 3018 if (m_hi < s_hi) { \
bb2a0f7a
YG
3019 d_hi = 0; \
3020 d_lo = 0; \
3021 } else { \
6378c025 3022 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3023 d_hi = m_hi - s_hi; \
3024 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3025 } \
3026 } \
3027 } while (0)
3028
bb2a0f7a 3029#define UPDATE_STAT64(s, t) \
a2fbb9ea 3030 do { \
bb2a0f7a
YG
3031 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3032 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3033 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3034 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3035 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3036 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3037 } while (0)
3038
bb2a0f7a 3039#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3040 do { \
bb2a0f7a
YG
3041 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3042 diff.lo, new->s##_lo, old->s##_lo); \
3043 ADD_64(estats->t##_hi, diff.hi, \
3044 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3045 } while (0)
3046
3047/* sum[hi:lo] += add */
3048#define ADD_EXTEND_64(s_hi, s_lo, a) \
3049 do { \
3050 s_lo += a; \
3051 s_hi += (s_lo < a) ? 1 : 0; \
3052 } while (0)
3053
bb2a0f7a 3054#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3055 do { \
bb2a0f7a
YG
3056 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3057 pstats->mac_stx[1].s##_lo, \
3058 new->s); \
a2fbb9ea
ET
3059 } while (0)
3060
bb2a0f7a 3061#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3062 do { \
4781bfad
EG
3063 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3064 old_tclient->s = tclient->s; \
de832a55
EG
3065 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3066 } while (0)
3067
3068#define UPDATE_EXTEND_USTAT(s, t) \
3069 do { \
3070 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3071 old_uclient->s = uclient->s; \
3072 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3073 } while (0)
3074
3075#define UPDATE_EXTEND_XSTAT(s, t) \
3076 do { \
4781bfad
EG
3077 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3078 old_xclient->s = xclient->s; \
de832a55
EG
3079 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3080 } while (0)
3081
3082/* minuend -= subtrahend */
3083#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3084 do { \
3085 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3086 } while (0)
3087
3088/* minuend[hi:lo] -= subtrahend */
3089#define SUB_EXTEND_64(m_hi, m_lo, s) \
3090 do { \
3091 SUB_64(m_hi, 0, m_lo, s); \
3092 } while (0)
3093
3094#define SUB_EXTEND_USTAT(s, t) \
3095 do { \
3096 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3097 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3098 } while (0)
3099
3100/*
3101 * General service functions
3102 */
3103
3104static inline long bnx2x_hilo(u32 *hiref)
3105{
3106 u32 lo = *(hiref + 1);
3107#if (BITS_PER_LONG == 64)
3108 u32 hi = *hiref;
3109
3110 return HILO_U64(hi, lo);
3111#else
3112 return lo;
3113#endif
3114}
3115
3116/*
3117 * Init service functions
3118 */
3119
bb2a0f7a
YG
3120static void bnx2x_storm_stats_post(struct bnx2x *bp)
3121{
3122 if (!bp->stats_pending) {
3123 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3124 int i, rc;
bb2a0f7a
YG
3125
3126 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3127 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3128 for_each_queue(bp, i)
3129 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3130
3131 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3132 ((u32 *)&ramrod_data)[1],
3133 ((u32 *)&ramrod_data)[0], 0);
3134 if (rc == 0) {
3135 /* stats ramrod has it's own slot on the spq */
3136 bp->spq_left++;
3137 bp->stats_pending = 1;
3138 }
3139 }
3140}
3141
3142static void bnx2x_stats_init(struct bnx2x *bp)
3143{
3144 int port = BP_PORT(bp);
de832a55 3145 int i;
bb2a0f7a 3146
de832a55 3147 bp->stats_pending = 0;
bb2a0f7a
YG
3148 bp->executer_idx = 0;
3149 bp->stats_counter = 0;
3150
3151 /* port stats */
3152 if (!BP_NOMCP(bp))
3153 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3154 else
3155 bp->port.port_stx = 0;
3156 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3157
3158 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3159 bp->port.old_nig_stats.brb_discard =
3160 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3161 bp->port.old_nig_stats.brb_truncate =
3162 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3163 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3164 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3165 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3166 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3167
3168 /* function stats */
de832a55
EG
3169 for_each_queue(bp, i) {
3170 struct bnx2x_fastpath *fp = &bp->fp[i];
3171
3172 memset(&fp->old_tclient, 0,
3173 sizeof(struct tstorm_per_client_stats));
3174 memset(&fp->old_uclient, 0,
3175 sizeof(struct ustorm_per_client_stats));
3176 memset(&fp->old_xclient, 0,
3177 sizeof(struct xstorm_per_client_stats));
3178 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3179 }
3180
bb2a0f7a 3181 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3182 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3183
3184 bp->stats_state = STATS_STATE_DISABLED;
3185 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3186 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3187}
3188
3189static void bnx2x_hw_stats_post(struct bnx2x *bp)
3190{
3191 struct dmae_command *dmae = &bp->stats_dmae;
3192 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3193
3194 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3195 if (CHIP_REV_IS_SLOW(bp))
3196 return;
bb2a0f7a
YG
3197
3198 /* loader */
3199 if (bp->executer_idx) {
3200 int loader_idx = PMF_DMAE_C(bp);
3201
3202 memset(dmae, 0, sizeof(struct dmae_command));
3203
3204 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3205 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3206 DMAE_CMD_DST_RESET |
3207#ifdef __BIG_ENDIAN
3208 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3209#else
3210 DMAE_CMD_ENDIANITY_DW_SWAP |
3211#endif
3212 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3213 DMAE_CMD_PORT_0) |
3214 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3215 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3216 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3217 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3218 sizeof(struct dmae_command) *
3219 (loader_idx + 1)) >> 2;
3220 dmae->dst_addr_hi = 0;
3221 dmae->len = sizeof(struct dmae_command) >> 2;
3222 if (CHIP_IS_E1(bp))
3223 dmae->len--;
3224 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3225 dmae->comp_addr_hi = 0;
3226 dmae->comp_val = 1;
3227
3228 *stats_comp = 0;
3229 bnx2x_post_dmae(bp, dmae, loader_idx);
3230
3231 } else if (bp->func_stx) {
3232 *stats_comp = 0;
3233 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3234 }
3235}
3236
3237static int bnx2x_stats_comp(struct bnx2x *bp)
3238{
3239 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3240 int cnt = 10;
3241
3242 might_sleep();
3243 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3244 if (!cnt) {
3245 BNX2X_ERR("timeout waiting for stats finished\n");
3246 break;
3247 }
3248 cnt--;
12469401 3249 msleep(1);
bb2a0f7a
YG
3250 }
3251 return 1;
3252}
3253
3254/*
3255 * Statistics service functions
3256 */
3257
3258static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3259{
3260 struct dmae_command *dmae;
3261 u32 opcode;
3262 int loader_idx = PMF_DMAE_C(bp);
3263 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3264
3265 /* sanity */
3266 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3267 BNX2X_ERR("BUG!\n");
3268 return;
3269 }
3270
3271 bp->executer_idx = 0;
3272
3273 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3274 DMAE_CMD_C_ENABLE |
3275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3276#ifdef __BIG_ENDIAN
3277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3278#else
3279 DMAE_CMD_ENDIANITY_DW_SWAP |
3280#endif
3281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3283
3284 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3286 dmae->src_addr_lo = bp->port.port_stx >> 2;
3287 dmae->src_addr_hi = 0;
3288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3290 dmae->len = DMAE_LEN32_RD_MAX;
3291 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292 dmae->comp_addr_hi = 0;
3293 dmae->comp_val = 1;
3294
3295 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3297 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3298 dmae->src_addr_hi = 0;
7a9b2557
VZ
3299 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3300 DMAE_LEN32_RD_MAX * 4);
3301 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3302 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3303 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3304 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3305 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3306 dmae->comp_val = DMAE_COMP_VAL;
3307
3308 *stats_comp = 0;
3309 bnx2x_hw_stats_post(bp);
3310 bnx2x_stats_comp(bp);
3311}
3312
3313static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3314{
3315 struct dmae_command *dmae;
34f80b04 3316 int port = BP_PORT(bp);
bb2a0f7a 3317 int vn = BP_E1HVN(bp);
a2fbb9ea 3318 u32 opcode;
bb2a0f7a 3319 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3320 u32 mac_addr;
bb2a0f7a
YG
3321 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3322
3323 /* sanity */
3324 if (!bp->link_vars.link_up || !bp->port.pmf) {
3325 BNX2X_ERR("BUG!\n");
3326 return;
3327 }
a2fbb9ea
ET
3328
3329 bp->executer_idx = 0;
bb2a0f7a
YG
3330
3331 /* MCP */
3332 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3333 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3334 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3335#ifdef __BIG_ENDIAN
bb2a0f7a 3336 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3337#else
bb2a0f7a 3338 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3339#endif
bb2a0f7a
YG
3340 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3341 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3342
bb2a0f7a 3343 if (bp->port.port_stx) {
a2fbb9ea
ET
3344
3345 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3346 dmae->opcode = opcode;
bb2a0f7a
YG
3347 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3348 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3349 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3350 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3351 dmae->len = sizeof(struct host_port_stats) >> 2;
3352 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3353 dmae->comp_addr_hi = 0;
3354 dmae->comp_val = 1;
a2fbb9ea
ET
3355 }
3356
bb2a0f7a
YG
3357 if (bp->func_stx) {
3358
3359 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360 dmae->opcode = opcode;
3361 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3362 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3363 dmae->dst_addr_lo = bp->func_stx >> 2;
3364 dmae->dst_addr_hi = 0;
3365 dmae->len = sizeof(struct host_func_stats) >> 2;
3366 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3367 dmae->comp_addr_hi = 0;
3368 dmae->comp_val = 1;
a2fbb9ea
ET
3369 }
3370
bb2a0f7a 3371 /* MAC */
a2fbb9ea
ET
3372 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3373 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3374 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3375#ifdef __BIG_ENDIAN
3376 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3377#else
3378 DMAE_CMD_ENDIANITY_DW_SWAP |
3379#endif
bb2a0f7a
YG
3380 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3381 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3382
c18487ee 3383 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3384
3385 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3386 NIG_REG_INGRESS_BMAC0_MEM);
3387
3388 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3389 BIGMAC_REGISTER_TX_STAT_GTBYT */
3390 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3391 dmae->opcode = opcode;
3392 dmae->src_addr_lo = (mac_addr +
3393 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3394 dmae->src_addr_hi = 0;
3395 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3396 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3397 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3398 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3399 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3400 dmae->comp_addr_hi = 0;
3401 dmae->comp_val = 1;
3402
3403 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3404 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3405 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3406 dmae->opcode = opcode;
3407 dmae->src_addr_lo = (mac_addr +
3408 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3409 dmae->src_addr_hi = 0;
3410 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3411 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3412 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3413 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3414 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3415 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3416 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3417 dmae->comp_addr_hi = 0;
3418 dmae->comp_val = 1;
3419
c18487ee 3420 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3421
3422 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3423
3424 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3425 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3426 dmae->opcode = opcode;
3427 dmae->src_addr_lo = (mac_addr +
3428 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3429 dmae->src_addr_hi = 0;
3430 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3431 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3432 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3433 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3434 dmae->comp_addr_hi = 0;
3435 dmae->comp_val = 1;
3436
3437 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3438 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3439 dmae->opcode = opcode;
3440 dmae->src_addr_lo = (mac_addr +
3441 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3442 dmae->src_addr_hi = 0;
3443 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3444 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3445 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3446 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3447 dmae->len = 1;
3448 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3449 dmae->comp_addr_hi = 0;
3450 dmae->comp_val = 1;
3451
3452 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3453 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454 dmae->opcode = opcode;
3455 dmae->src_addr_lo = (mac_addr +
3456 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3457 dmae->src_addr_hi = 0;
3458 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3459 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3460 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3461 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3462 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3463 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3464 dmae->comp_addr_hi = 0;
3465 dmae->comp_val = 1;
3466 }
3467
3468 /* NIG */
bb2a0f7a
YG
3469 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3470 dmae->opcode = opcode;
3471 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3472 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3473 dmae->src_addr_hi = 0;
3474 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3475 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3476 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3477 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3478 dmae->comp_addr_hi = 0;
3479 dmae->comp_val = 1;
3480
3481 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3482 dmae->opcode = opcode;
3483 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3484 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3485 dmae->src_addr_hi = 0;
3486 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3487 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3488 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3489 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3490 dmae->len = (2*sizeof(u32)) >> 2;
3491 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3492 dmae->comp_addr_hi = 0;
3493 dmae->comp_val = 1;
3494
a2fbb9ea
ET
3495 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3496 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3497 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3498 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3499#ifdef __BIG_ENDIAN
3500 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3501#else
3502 DMAE_CMD_ENDIANITY_DW_SWAP |
3503#endif
bb2a0f7a
YG
3504 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3505 (vn << DMAE_CMD_E1HVN_SHIFT));
3506 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3507 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3508 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3509 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3510 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3511 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3512 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3513 dmae->len = (2*sizeof(u32)) >> 2;
3514 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3515 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3516 dmae->comp_val = DMAE_COMP_VAL;
3517
3518 *stats_comp = 0;
a2fbb9ea
ET
3519}
3520
bb2a0f7a 3521static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3522{
bb2a0f7a
YG
3523 struct dmae_command *dmae = &bp->stats_dmae;
3524 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3525
bb2a0f7a
YG
3526 /* sanity */
3527 if (!bp->func_stx) {
3528 BNX2X_ERR("BUG!\n");
3529 return;
3530 }
a2fbb9ea 3531
bb2a0f7a
YG
3532 bp->executer_idx = 0;
3533 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3534
bb2a0f7a
YG
3535 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3536 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3537 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3538#ifdef __BIG_ENDIAN
3539 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3540#else
3541 DMAE_CMD_ENDIANITY_DW_SWAP |
3542#endif
3543 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3544 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3545 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3546 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3547 dmae->dst_addr_lo = bp->func_stx >> 2;
3548 dmae->dst_addr_hi = 0;
3549 dmae->len = sizeof(struct host_func_stats) >> 2;
3550 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3551 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3552 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3553
bb2a0f7a
YG
3554 *stats_comp = 0;
3555}
a2fbb9ea 3556
bb2a0f7a
YG
3557static void bnx2x_stats_start(struct bnx2x *bp)
3558{
3559 if (bp->port.pmf)
3560 bnx2x_port_stats_init(bp);
3561
3562 else if (bp->func_stx)
3563 bnx2x_func_stats_init(bp);
3564
3565 bnx2x_hw_stats_post(bp);
3566 bnx2x_storm_stats_post(bp);
3567}
3568
3569static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3570{
3571 bnx2x_stats_comp(bp);
3572 bnx2x_stats_pmf_update(bp);
3573 bnx2x_stats_start(bp);
3574}
3575
3576static void bnx2x_stats_restart(struct bnx2x *bp)
3577{
3578 bnx2x_stats_comp(bp);
3579 bnx2x_stats_start(bp);
3580}
3581
3582static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3583{
3584 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3585 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3586 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3587 struct {
3588 u32 lo;
3589 u32 hi;
3590 } diff;
bb2a0f7a
YG
3591
3592 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3593 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3594 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3595 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3596 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3597 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3598 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3599 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3600 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3601 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3602 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3603 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3604 UPDATE_STAT64(tx_stat_gt127,
3605 tx_stat_etherstatspkts65octetsto127octets);
3606 UPDATE_STAT64(tx_stat_gt255,
3607 tx_stat_etherstatspkts128octetsto255octets);
3608 UPDATE_STAT64(tx_stat_gt511,
3609 tx_stat_etherstatspkts256octetsto511octets);
3610 UPDATE_STAT64(tx_stat_gt1023,
3611 tx_stat_etherstatspkts512octetsto1023octets);
3612 UPDATE_STAT64(tx_stat_gt1518,
3613 tx_stat_etherstatspkts1024octetsto1522octets);
3614 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3615 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3616 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3617 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3618 UPDATE_STAT64(tx_stat_gterr,
3619 tx_stat_dot3statsinternalmactransmiterrors);
3620 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3621
3622 estats->pause_frames_received_hi =
3623 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3624 estats->pause_frames_received_lo =
3625 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3626
3627 estats->pause_frames_sent_hi =
3628 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3629 estats->pause_frames_sent_lo =
3630 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3631}
3632
3633static void bnx2x_emac_stats_update(struct bnx2x *bp)
3634{
3635 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3636 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3637 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3638
3639 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3640 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3641 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3642 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3643 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3644 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3645 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3646 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3647 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3648 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3649 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3650 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3651 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3652 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3653 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3654 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3655 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3656 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3657 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3658 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3659 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3660 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3661 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3662 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3663 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3664 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3665 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3666 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3667 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3668 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3669 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3670
3671 estats->pause_frames_received_hi =
3672 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3673 estats->pause_frames_received_lo =
3674 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3675 ADD_64(estats->pause_frames_received_hi,
3676 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3677 estats->pause_frames_received_lo,
3678 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3679
3680 estats->pause_frames_sent_hi =
3681 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3682 estats->pause_frames_sent_lo =
3683 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3684 ADD_64(estats->pause_frames_sent_hi,
3685 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3686 estats->pause_frames_sent_lo,
3687 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3688}
3689
3690static int bnx2x_hw_stats_update(struct bnx2x *bp)
3691{
3692 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3693 struct nig_stats *old = &(bp->port.old_nig_stats);
3694 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3695 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3696 struct {
3697 u32 lo;
3698 u32 hi;
3699 } diff;
de832a55 3700 u32 nig_timer_max;
bb2a0f7a
YG
3701
3702 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3703 bnx2x_bmac_stats_update(bp);
3704
3705 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3706 bnx2x_emac_stats_update(bp);
3707
3708 else { /* unreached */
c3eefaf6 3709 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3710 return -1;
3711 }
a2fbb9ea 3712
bb2a0f7a
YG
3713 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3714 new->brb_discard - old->brb_discard);
66e855f3
YG
3715 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3716 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3717
bb2a0f7a
YG
3718 UPDATE_STAT64_NIG(egress_mac_pkt0,
3719 etherstatspkts1024octetsto1522octets);
3720 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3721
bb2a0f7a 3722 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3723
bb2a0f7a
YG
3724 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3725 sizeof(struct mac_stx));
3726 estats->brb_drop_hi = pstats->brb_drop_hi;
3727 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3728
bb2a0f7a 3729 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3730
de832a55
EG
3731 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3732 if (nig_timer_max != estats->nig_timer_max) {
3733 estats->nig_timer_max = nig_timer_max;
3734 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3735 }
3736
bb2a0f7a 3737 return 0;
a2fbb9ea
ET
3738}
3739
bb2a0f7a 3740static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3741{
3742 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3743 struct tstorm_per_port_stats *tport =
de832a55 3744 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3745 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3746 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3747 int i;
3748
3749 memset(&(fstats->total_bytes_received_hi), 0,
3750 sizeof(struct host_func_stats) - 2*sizeof(u32));
3751 estats->error_bytes_received_hi = 0;
3752 estats->error_bytes_received_lo = 0;
3753 estats->etherstatsoverrsizepkts_hi = 0;
3754 estats->etherstatsoverrsizepkts_lo = 0;
3755 estats->no_buff_discard_hi = 0;
3756 estats->no_buff_discard_lo = 0;
a2fbb9ea 3757
de832a55
EG
3758 for_each_queue(bp, i) {
3759 struct bnx2x_fastpath *fp = &bp->fp[i];
3760 int cl_id = fp->cl_id;
3761 struct tstorm_per_client_stats *tclient =
3762 &stats->tstorm_common.client_statistics[cl_id];
3763 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3764 struct ustorm_per_client_stats *uclient =
3765 &stats->ustorm_common.client_statistics[cl_id];
3766 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3767 struct xstorm_per_client_stats *xclient =
3768 &stats->xstorm_common.client_statistics[cl_id];
3769 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3770 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3771 u32 diff;
3772
3773 /* are storm stats valid? */
3774 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3775 bp->stats_counter) {
de832a55
EG
3776 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3777 " xstorm counter (%d) != stats_counter (%d)\n",
3778 i, xclient->stats_counter, bp->stats_counter);
3779 return -1;
3780 }
3781 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3782 bp->stats_counter) {
de832a55
EG
3783 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3784 " tstorm counter (%d) != stats_counter (%d)\n",
3785 i, tclient->stats_counter, bp->stats_counter);
3786 return -2;
3787 }
3788 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3789 bp->stats_counter) {
3790 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3791 " ustorm counter (%d) != stats_counter (%d)\n",
3792 i, uclient->stats_counter, bp->stats_counter);
3793 return -4;
3794 }
a2fbb9ea 3795
de832a55
EG
3796 qstats->total_bytes_received_hi =
3797 qstats->valid_bytes_received_hi =
a2fbb9ea 3798 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3799 qstats->total_bytes_received_lo =
3800 qstats->valid_bytes_received_lo =
a2fbb9ea 3801 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3802
de832a55 3803 qstats->error_bytes_received_hi =
bb2a0f7a 3804 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3805 qstats->error_bytes_received_lo =
bb2a0f7a 3806 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3807
de832a55
EG
3808 ADD_64(qstats->total_bytes_received_hi,
3809 qstats->error_bytes_received_hi,
3810 qstats->total_bytes_received_lo,
3811 qstats->error_bytes_received_lo);
3812
3813 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3814 total_unicast_packets_received);
3815 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3816 total_multicast_packets_received);
3817 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3818 total_broadcast_packets_received);
3819 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3820 etherstatsoverrsizepkts);
3821 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3822
3823 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3824 total_unicast_packets_received);
3825 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3826 total_multicast_packets_received);
3827 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3828 total_broadcast_packets_received);
3829 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3830 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3831 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3832
3833 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3834 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3835 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3836 le32_to_cpu(xclient->total_sent_bytes.lo);
3837
de832a55
EG
3838 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3839 total_unicast_packets_transmitted);
3840 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3841 total_multicast_packets_transmitted);
3842 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3843 total_broadcast_packets_transmitted);
3844
3845 old_tclient->checksum_discard = tclient->checksum_discard;
3846 old_tclient->ttl0_discard = tclient->ttl0_discard;
3847
3848 ADD_64(fstats->total_bytes_received_hi,
3849 qstats->total_bytes_received_hi,
3850 fstats->total_bytes_received_lo,
3851 qstats->total_bytes_received_lo);
3852 ADD_64(fstats->total_bytes_transmitted_hi,
3853 qstats->total_bytes_transmitted_hi,
3854 fstats->total_bytes_transmitted_lo,
3855 qstats->total_bytes_transmitted_lo);
3856 ADD_64(fstats->total_unicast_packets_received_hi,
3857 qstats->total_unicast_packets_received_hi,
3858 fstats->total_unicast_packets_received_lo,
3859 qstats->total_unicast_packets_received_lo);
3860 ADD_64(fstats->total_multicast_packets_received_hi,
3861 qstats->total_multicast_packets_received_hi,
3862 fstats->total_multicast_packets_received_lo,
3863 qstats->total_multicast_packets_received_lo);
3864 ADD_64(fstats->total_broadcast_packets_received_hi,
3865 qstats->total_broadcast_packets_received_hi,
3866 fstats->total_broadcast_packets_received_lo,
3867 qstats->total_broadcast_packets_received_lo);
3868 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3869 qstats->total_unicast_packets_transmitted_hi,
3870 fstats->total_unicast_packets_transmitted_lo,
3871 qstats->total_unicast_packets_transmitted_lo);
3872 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3873 qstats->total_multicast_packets_transmitted_hi,
3874 fstats->total_multicast_packets_transmitted_lo,
3875 qstats->total_multicast_packets_transmitted_lo);
3876 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3877 qstats->total_broadcast_packets_transmitted_hi,
3878 fstats->total_broadcast_packets_transmitted_lo,
3879 qstats->total_broadcast_packets_transmitted_lo);
3880 ADD_64(fstats->valid_bytes_received_hi,
3881 qstats->valid_bytes_received_hi,
3882 fstats->valid_bytes_received_lo,
3883 qstats->valid_bytes_received_lo);
3884
3885 ADD_64(estats->error_bytes_received_hi,
3886 qstats->error_bytes_received_hi,
3887 estats->error_bytes_received_lo,
3888 qstats->error_bytes_received_lo);
3889 ADD_64(estats->etherstatsoverrsizepkts_hi,
3890 qstats->etherstatsoverrsizepkts_hi,
3891 estats->etherstatsoverrsizepkts_lo,
3892 qstats->etherstatsoverrsizepkts_lo);
3893 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3894 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3895 }
3896
3897 ADD_64(fstats->total_bytes_received_hi,
3898 estats->rx_stat_ifhcinbadoctets_hi,
3899 fstats->total_bytes_received_lo,
3900 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3901
3902 memcpy(estats, &(fstats->total_bytes_received_hi),
3903 sizeof(struct host_func_stats) - 2*sizeof(u32));
3904
de832a55
EG
3905 ADD_64(estats->etherstatsoverrsizepkts_hi,
3906 estats->rx_stat_dot3statsframestoolong_hi,
3907 estats->etherstatsoverrsizepkts_lo,
3908 estats->rx_stat_dot3statsframestoolong_lo);
3909 ADD_64(estats->error_bytes_received_hi,
3910 estats->rx_stat_ifhcinbadoctets_hi,
3911 estats->error_bytes_received_lo,
3912 estats->rx_stat_ifhcinbadoctets_lo);
3913
3914 if (bp->port.pmf) {
3915 estats->mac_filter_discard =
3916 le32_to_cpu(tport->mac_filter_discard);
3917 estats->xxoverflow_discard =
3918 le32_to_cpu(tport->xxoverflow_discard);
3919 estats->brb_truncate_discard =
bb2a0f7a 3920 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3921 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3922 }
bb2a0f7a
YG
3923
3924 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3925
de832a55
EG
3926 bp->stats_pending = 0;
3927
a2fbb9ea
ET
3928 return 0;
3929}
3930
bb2a0f7a 3931static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3932{
bb2a0f7a 3933 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3934 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3935 int i;
a2fbb9ea
ET
3936
3937 nstats->rx_packets =
3938 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3939 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3940 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3941
3942 nstats->tx_packets =
3943 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3944 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3945 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3946
de832a55 3947 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3948
0e39e645 3949 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3950
de832a55
EG
3951 nstats->rx_dropped = estats->mac_discard;
3952 for_each_queue(bp, i)
3953 nstats->rx_dropped +=
3954 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3955
a2fbb9ea
ET
3956 nstats->tx_dropped = 0;
3957
3958 nstats->multicast =
de832a55 3959 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3960
bb2a0f7a 3961 nstats->collisions =
de832a55 3962 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3963
3964 nstats->rx_length_errors =
de832a55
EG
3965 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3966 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3967 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3968 bnx2x_hilo(&estats->brb_truncate_hi);
3969 nstats->rx_crc_errors =
3970 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3971 nstats->rx_frame_errors =
3972 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3973 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3974 nstats->rx_missed_errors = estats->xxoverflow_discard;
3975
3976 nstats->rx_errors = nstats->rx_length_errors +
3977 nstats->rx_over_errors +
3978 nstats->rx_crc_errors +
3979 nstats->rx_frame_errors +
0e39e645
ET
3980 nstats->rx_fifo_errors +
3981 nstats->rx_missed_errors;
a2fbb9ea 3982
bb2a0f7a 3983 nstats->tx_aborted_errors =
de832a55
EG
3984 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3985 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3986 nstats->tx_carrier_errors =
3987 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3988 nstats->tx_fifo_errors = 0;
3989 nstats->tx_heartbeat_errors = 0;
3990 nstats->tx_window_errors = 0;
3991
3992 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3993 nstats->tx_carrier_errors +
3994 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3995}
3996
3997static void bnx2x_drv_stats_update(struct bnx2x *bp)
3998{
3999 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4000 int i;
4001
4002 estats->driver_xoff = 0;
4003 estats->rx_err_discard_pkt = 0;
4004 estats->rx_skb_alloc_failed = 0;
4005 estats->hw_csum_err = 0;
4006 for_each_queue(bp, i) {
4007 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4008
4009 estats->driver_xoff += qstats->driver_xoff;
4010 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4011 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4012 estats->hw_csum_err += qstats->hw_csum_err;
4013 }
a2fbb9ea
ET
4014}
4015
bb2a0f7a 4016static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4017{
bb2a0f7a 4018 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4019
bb2a0f7a
YG
4020 if (*stats_comp != DMAE_COMP_VAL)
4021 return;
4022
4023 if (bp->port.pmf)
de832a55 4024 bnx2x_hw_stats_update(bp);
a2fbb9ea 4025
de832a55
EG
4026 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4027 BNX2X_ERR("storm stats were not updated for 3 times\n");
4028 bnx2x_panic();
4029 return;
a2fbb9ea
ET
4030 }
4031
de832a55
EG
4032 bnx2x_net_stats_update(bp);
4033 bnx2x_drv_stats_update(bp);
4034
a2fbb9ea 4035 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
4036 struct tstorm_per_client_stats *old_tclient =
4037 &bp->fp->old_tclient;
4038 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4039 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4040 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4041 int i;
a2fbb9ea
ET
4042
4043 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4044 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4045 " tx pkt (%lx)\n",
4046 bnx2x_tx_avail(bp->fp),
7a9b2557 4047 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4048 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4049 " rx pkt (%lx)\n",
7a9b2557
VZ
4050 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4051 bp->fp->rx_comp_cons),
4052 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4053 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4054 "brb truncate %u\n",
4055 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4056 qstats->driver_xoff,
4057 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4058 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4059 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4060 "mac_discard %u mac_filter_discard %u "
4061 "xxovrflow_discard %u brb_truncate_discard %u "
4062 "ttl0_discard %u\n",
4781bfad 4063 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4064 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4065 bnx2x_hilo(&qstats->no_buff_discard_hi),
4066 estats->mac_discard, estats->mac_filter_discard,
4067 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4068 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4069
4070 for_each_queue(bp, i) {
4071 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4072 bnx2x_fp(bp, i, tx_pkt),
4073 bnx2x_fp(bp, i, rx_pkt),
4074 bnx2x_fp(bp, i, rx_calls));
4075 }
4076 }
4077
bb2a0f7a
YG
4078 bnx2x_hw_stats_post(bp);
4079 bnx2x_storm_stats_post(bp);
4080}
a2fbb9ea 4081
bb2a0f7a
YG
4082static void bnx2x_port_stats_stop(struct bnx2x *bp)
4083{
4084 struct dmae_command *dmae;
4085 u32 opcode;
4086 int loader_idx = PMF_DMAE_C(bp);
4087 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4088
bb2a0f7a 4089 bp->executer_idx = 0;
a2fbb9ea 4090
bb2a0f7a
YG
4091 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4092 DMAE_CMD_C_ENABLE |
4093 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4094#ifdef __BIG_ENDIAN
bb2a0f7a 4095 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4096#else
bb2a0f7a 4097 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4098#endif
bb2a0f7a
YG
4099 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4100 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4101
4102 if (bp->port.port_stx) {
4103
4104 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4105 if (bp->func_stx)
4106 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4107 else
4108 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4109 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4110 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4111 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4112 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4113 dmae->len = sizeof(struct host_port_stats) >> 2;
4114 if (bp->func_stx) {
4115 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4116 dmae->comp_addr_hi = 0;
4117 dmae->comp_val = 1;
4118 } else {
4119 dmae->comp_addr_lo =
4120 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4121 dmae->comp_addr_hi =
4122 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4123 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4124
bb2a0f7a
YG
4125 *stats_comp = 0;
4126 }
a2fbb9ea
ET
4127 }
4128
bb2a0f7a
YG
4129 if (bp->func_stx) {
4130
4131 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4132 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4133 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4134 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4135 dmae->dst_addr_lo = bp->func_stx >> 2;
4136 dmae->dst_addr_hi = 0;
4137 dmae->len = sizeof(struct host_func_stats) >> 2;
4138 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4139 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4140 dmae->comp_val = DMAE_COMP_VAL;
4141
4142 *stats_comp = 0;
a2fbb9ea 4143 }
bb2a0f7a
YG
4144}
4145
4146static void bnx2x_stats_stop(struct bnx2x *bp)
4147{
4148 int update = 0;
4149
4150 bnx2x_stats_comp(bp);
4151
4152 if (bp->port.pmf)
4153 update = (bnx2x_hw_stats_update(bp) == 0);
4154
4155 update |= (bnx2x_storm_stats_update(bp) == 0);
4156
4157 if (update) {
4158 bnx2x_net_stats_update(bp);
a2fbb9ea 4159
bb2a0f7a
YG
4160 if (bp->port.pmf)
4161 bnx2x_port_stats_stop(bp);
4162
4163 bnx2x_hw_stats_post(bp);
4164 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4165 }
4166}
4167
bb2a0f7a
YG
4168static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4169{
4170}
4171
4172static const struct {
4173 void (*action)(struct bnx2x *bp);
4174 enum bnx2x_stats_state next_state;
4175} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4176/* state event */
4177{
4178/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4179/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4180/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4181/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4182},
4183{
4184/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4185/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4186/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4187/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4188}
4189};
4190
4191static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4192{
4193 enum bnx2x_stats_state state = bp->stats_state;
4194
4195 bnx2x_stats_stm[state][event].action(bp);
4196 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4197
4198 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4199 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4200 state, event, bp->stats_state);
4201}
4202
a2fbb9ea
ET
4203static void bnx2x_timer(unsigned long data)
4204{
4205 struct bnx2x *bp = (struct bnx2x *) data;
4206
4207 if (!netif_running(bp->dev))
4208 return;
4209
4210 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4211 goto timer_restart;
a2fbb9ea
ET
4212
4213 if (poll) {
4214 struct bnx2x_fastpath *fp = &bp->fp[0];
4215 int rc;
4216
7961f791 4217 bnx2x_tx_int(fp);
a2fbb9ea
ET
4218 rc = bnx2x_rx_int(fp, 1000);
4219 }
4220
34f80b04
EG
4221 if (!BP_NOMCP(bp)) {
4222 int func = BP_FUNC(bp);
a2fbb9ea
ET
4223 u32 drv_pulse;
4224 u32 mcp_pulse;
4225
4226 ++bp->fw_drv_pulse_wr_seq;
4227 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4228 /* TBD - add SYSTEM_TIME */
4229 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4230 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4231
34f80b04 4232 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4233 MCP_PULSE_SEQ_MASK);
4234 /* The delta between driver pulse and mcp response
4235 * should be 1 (before mcp response) or 0 (after mcp response)
4236 */
4237 if ((drv_pulse != mcp_pulse) &&
4238 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4239 /* someone lost a heartbeat... */
4240 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4241 drv_pulse, mcp_pulse);
4242 }
4243 }
4244
bb2a0f7a
YG
4245 if ((bp->state == BNX2X_STATE_OPEN) ||
4246 (bp->state == BNX2X_STATE_DISABLED))
4247 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4248
f1410647 4249timer_restart:
a2fbb9ea
ET
4250 mod_timer(&bp->timer, jiffies + bp->current_interval);
4251}
4252
4253/* end of Statistics */
4254
4255/* nic init */
4256
4257/*
4258 * nic init service functions
4259 */
4260
34f80b04 4261static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4262{
34f80b04
EG
4263 int port = BP_PORT(bp);
4264
490c3c9b 4265 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04 4266 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4267 sizeof(struct ustorm_status_block)/4);
490c3c9b 4268 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04 4269 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4270 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4271}
4272
5c862848
EG
4273static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4274 dma_addr_t mapping, int sb_id)
34f80b04
EG
4275{
4276 int port = BP_PORT(bp);
bb2a0f7a 4277 int func = BP_FUNC(bp);
a2fbb9ea 4278 int index;
34f80b04 4279 u64 section;
a2fbb9ea
ET
4280
4281 /* USTORM */
4282 section = ((u64)mapping) + offsetof(struct host_status_block,
4283 u_status_block);
34f80b04 4284 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4285
4286 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4287 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4288 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4289 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4290 U64_HI(section));
bb2a0f7a
YG
4291 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4292 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4293
4294 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4295 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4296 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4297
4298 /* CSTORM */
4299 section = ((u64)mapping) + offsetof(struct host_status_block,
4300 c_status_block);
34f80b04 4301 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4302
4303 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4304 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4305 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4306 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4307 U64_HI(section));
7a9b2557
VZ
4308 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4309 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4310
4311 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4312 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4313 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4314
4315 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4316}
4317
4318static void bnx2x_zero_def_sb(struct bnx2x *bp)
4319{
4320 int func = BP_FUNC(bp);
a2fbb9ea 4321
490c3c9b
EG
4322 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4323 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4324 sizeof(struct tstorm_def_status_block)/4);
4325 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04
EG
4326 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4327 sizeof(struct ustorm_def_status_block)/4);
490c3c9b 4328 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04
EG
4329 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4330 sizeof(struct cstorm_def_status_block)/4);
490c3c9b 4331 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
34f80b04
EG
4332 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4333 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4334}
4335
4336static void bnx2x_init_def_sb(struct bnx2x *bp,
4337 struct host_def_status_block *def_sb,
34f80b04 4338 dma_addr_t mapping, int sb_id)
a2fbb9ea 4339{
34f80b04
EG
4340 int port = BP_PORT(bp);
4341 int func = BP_FUNC(bp);
a2fbb9ea
ET
4342 int index, val, reg_offset;
4343 u64 section;
4344
4345 /* ATTN */
4346 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4347 atten_status_block);
34f80b04 4348 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4349
49d66772
ET
4350 bp->attn_state = 0;
4351
a2fbb9ea
ET
4352 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4353 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4354
34f80b04 4355 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4356 bp->attn_group[index].sig[0] = REG_RD(bp,
4357 reg_offset + 0x10*index);
4358 bp->attn_group[index].sig[1] = REG_RD(bp,
4359 reg_offset + 0x4 + 0x10*index);
4360 bp->attn_group[index].sig[2] = REG_RD(bp,
4361 reg_offset + 0x8 + 0x10*index);
4362 bp->attn_group[index].sig[3] = REG_RD(bp,
4363 reg_offset + 0xc + 0x10*index);
4364 }
4365
a2fbb9ea
ET
4366 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4367 HC_REG_ATTN_MSG0_ADDR_L);
4368
4369 REG_WR(bp, reg_offset, U64_LO(section));
4370 REG_WR(bp, reg_offset + 4, U64_HI(section));
4371
4372 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4373
4374 val = REG_RD(bp, reg_offset);
34f80b04 4375 val |= sb_id;
a2fbb9ea
ET
4376 REG_WR(bp, reg_offset, val);
4377
4378 /* USTORM */
4379 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4380 u_def_status_block);
34f80b04 4381 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4382
4383 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4384 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4385 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4386 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4387 U64_HI(section));
5c862848 4388 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4389 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4390
4391 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4392 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4393 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4394
4395 /* CSTORM */
4396 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4397 c_def_status_block);
34f80b04 4398 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4399
4400 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4401 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4402 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4403 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4404 U64_HI(section));
5c862848 4405 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4406 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4407
4408 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4409 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4410 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4411
4412 /* TSTORM */
4413 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4414 t_def_status_block);
34f80b04 4415 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4416
4417 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4418 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4419 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4420 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4421 U64_HI(section));
5c862848 4422 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4423 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4424
4425 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4426 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4427 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4428
4429 /* XSTORM */
4430 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4431 x_def_status_block);
34f80b04 4432 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4433
4434 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4435 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4436 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4437 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4438 U64_HI(section));
5c862848 4439 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4440 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4441
4442 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4443 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4444 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4445
bb2a0f7a 4446 bp->stats_pending = 0;
66e855f3 4447 bp->set_mac_pending = 0;
bb2a0f7a 4448
34f80b04 4449 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4450}
4451
4452static void bnx2x_update_coalesce(struct bnx2x *bp)
4453{
34f80b04 4454 int port = BP_PORT(bp);
a2fbb9ea
ET
4455 int i;
4456
4457 for_each_queue(bp, i) {
34f80b04 4458 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4459
4460 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4461 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4462 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4463 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4464 bp->rx_ticks/12);
a2fbb9ea 4465 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4466 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4467 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4468 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4469
4470 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4471 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4472 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4473 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4474 bp->tx_ticks/12);
a2fbb9ea 4475 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4476 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4477 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4478 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4479 }
4480}
4481
7a9b2557
VZ
4482static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4483 struct bnx2x_fastpath *fp, int last)
4484{
4485 int i;
4486
4487 for (i = 0; i < last; i++) {
4488 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4489 struct sk_buff *skb = rx_buf->skb;
4490
4491 if (skb == NULL) {
4492 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4493 continue;
4494 }
4495
4496 if (fp->tpa_state[i] == BNX2X_TPA_START)
4497 pci_unmap_single(bp->pdev,
4498 pci_unmap_addr(rx_buf, mapping),
356e2385 4499 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4500
4501 dev_kfree_skb(skb);
4502 rx_buf->skb = NULL;
4503 }
4504}
4505
a2fbb9ea
ET
4506static void bnx2x_init_rx_rings(struct bnx2x *bp)
4507{
7a9b2557 4508 int func = BP_FUNC(bp);
32626230
EG
4509 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4510 ETH_MAX_AGGREGATION_QUEUES_E1H;
4511 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4512 int i, j;
a2fbb9ea 4513
87942b46 4514 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4515 DP(NETIF_MSG_IFUP,
4516 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4517
7a9b2557 4518 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4519
555f6c78 4520 for_each_rx_queue(bp, j) {
32626230 4521 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4522
32626230 4523 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4524 fp->tpa_pool[i].skb =
4525 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4526 if (!fp->tpa_pool[i].skb) {
4527 BNX2X_ERR("Failed to allocate TPA "
4528 "skb pool for queue[%d] - "
4529 "disabling TPA on this "
4530 "queue!\n", j);
4531 bnx2x_free_tpa_pool(bp, fp, i);
4532 fp->disable_tpa = 1;
4533 break;
4534 }
4535 pci_unmap_addr_set((struct sw_rx_bd *)
4536 &bp->fp->tpa_pool[i],
4537 mapping, 0);
4538 fp->tpa_state[i] = BNX2X_TPA_STOP;
4539 }
4540 }
4541 }
4542
555f6c78 4543 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4544 struct bnx2x_fastpath *fp = &bp->fp[j];
4545
4546 fp->rx_bd_cons = 0;
4547 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4548 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4549
4550 /* "next page" elements initialization */
4551 /* SGE ring */
4552 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4553 struct eth_rx_sge *sge;
4554
4555 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4556 sge->addr_hi =
4557 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4558 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4559 sge->addr_lo =
4560 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4561 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4562 }
4563
4564 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4565
7a9b2557 4566 /* RX BD ring */
a2fbb9ea
ET
4567 for (i = 1; i <= NUM_RX_RINGS; i++) {
4568 struct eth_rx_bd *rx_bd;
4569
4570 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4571 rx_bd->addr_hi =
4572 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4573 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4574 rx_bd->addr_lo =
4575 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4576 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4577 }
4578
34f80b04 4579 /* CQ ring */
a2fbb9ea
ET
4580 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4581 struct eth_rx_cqe_next_page *nextpg;
4582
4583 nextpg = (struct eth_rx_cqe_next_page *)
4584 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4585 nextpg->addr_hi =
4586 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4587 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4588 nextpg->addr_lo =
4589 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4590 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4591 }
4592
7a9b2557
VZ
4593 /* Allocate SGEs and initialize the ring elements */
4594 for (i = 0, ring_prod = 0;
4595 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4596
7a9b2557
VZ
4597 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4598 BNX2X_ERR("was only able to allocate "
4599 "%d rx sges\n", i);
4600 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4601 /* Cleanup already allocated elements */
4602 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4603 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4604 fp->disable_tpa = 1;
4605 ring_prod = 0;
4606 break;
4607 }
4608 ring_prod = NEXT_SGE_IDX(ring_prod);
4609 }
4610 fp->rx_sge_prod = ring_prod;
4611
4612 /* Allocate BDs and initialize BD ring */
66e855f3 4613 fp->rx_comp_cons = 0;
7a9b2557 4614 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4615 for (i = 0; i < bp->rx_ring_size; i++) {
4616 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4617 BNX2X_ERR("was only able to allocate "
de832a55
EG
4618 "%d rx skbs on queue[%d]\n", i, j);
4619 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4620 break;
4621 }
4622 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4623 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4624 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4625 }
4626
7a9b2557
VZ
4627 fp->rx_bd_prod = ring_prod;
4628 /* must not have more available CQEs than BDs */
4629 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4630 cqe_ring_prod);
a2fbb9ea
ET
4631 fp->rx_pkt = fp->rx_calls = 0;
4632
7a9b2557
VZ
4633 /* Warning!
4634 * this will generate an interrupt (to the TSTORM)
4635 * must only be done after chip is initialized
4636 */
4637 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4638 fp->rx_sge_prod);
a2fbb9ea
ET
4639 if (j != 0)
4640 continue;
4641
4642 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4643 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4644 U64_LO(fp->rx_comp_mapping));
4645 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4646 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4647 U64_HI(fp->rx_comp_mapping));
4648 }
4649}
4650
4651static void bnx2x_init_tx_ring(struct bnx2x *bp)
4652{
4653 int i, j;
4654
555f6c78 4655 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4656 struct bnx2x_fastpath *fp = &bp->fp[j];
4657
4658 for (i = 1; i <= NUM_TX_RINGS; i++) {
4659 struct eth_tx_bd *tx_bd =
4660 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4661
4662 tx_bd->addr_hi =
4663 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4664 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4665 tx_bd->addr_lo =
4666 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4667 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4668 }
4669
4670 fp->tx_pkt_prod = 0;
4671 fp->tx_pkt_cons = 0;
4672 fp->tx_bd_prod = 0;
4673 fp->tx_bd_cons = 0;
4674 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4675 fp->tx_pkt = 0;
4676 }
4677}
4678
4679static void bnx2x_init_sp_ring(struct bnx2x *bp)
4680{
34f80b04 4681 int func = BP_FUNC(bp);
a2fbb9ea
ET
4682
4683 spin_lock_init(&bp->spq_lock);
4684
4685 bp->spq_left = MAX_SPQ_PENDING;
4686 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4687 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4688 bp->spq_prod_bd = bp->spq;
4689 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4690
34f80b04 4691 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4692 U64_LO(bp->spq_mapping));
34f80b04
EG
4693 REG_WR(bp,
4694 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4695 U64_HI(bp->spq_mapping));
4696
34f80b04 4697 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4698 bp->spq_prod_idx);
4699}
4700
4701static void bnx2x_init_context(struct bnx2x *bp)
4702{
4703 int i;
4704
4705 for_each_queue(bp, i) {
4706 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4707 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4708 u8 cl_id = fp->cl_id;
0626b899 4709 u8 sb_id = fp->sb_id;
a2fbb9ea 4710
34f80b04
EG
4711 context->ustorm_st_context.common.sb_index_numbers =
4712 BNX2X_RX_SB_INDEX_NUM;
0626b899 4713 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4714 context->ustorm_st_context.common.status_block_id = sb_id;
4715 context->ustorm_st_context.common.flags =
de832a55
EG
4716 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4717 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4718 context->ustorm_st_context.common.statistics_counter_id =
4719 cl_id;
8d9c5f34 4720 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4721 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4722 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4723 bp->rx_buf_size;
34f80b04 4724 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4725 U64_HI(fp->rx_desc_mapping);
34f80b04 4726 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4727 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4728 if (!fp->disable_tpa) {
4729 context->ustorm_st_context.common.flags |=
4730 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4731 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4732 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4733 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4734 (u32)0xffff);
7a9b2557
VZ
4735 context->ustorm_st_context.common.sge_page_base_hi =
4736 U64_HI(fp->rx_sge_mapping);
4737 context->ustorm_st_context.common.sge_page_base_lo =
4738 U64_LO(fp->rx_sge_mapping);
4739 }
4740
8d9c5f34
EG
4741 context->ustorm_ag_context.cdu_usage =
4742 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4743 CDU_REGION_NUMBER_UCM_AG,
4744 ETH_CONNECTION_TYPE);
4745
4746 context->xstorm_st_context.tx_bd_page_base_hi =
4747 U64_HI(fp->tx_desc_mapping);
4748 context->xstorm_st_context.tx_bd_page_base_lo =
4749 U64_LO(fp->tx_desc_mapping);
4750 context->xstorm_st_context.db_data_addr_hi =
4751 U64_HI(fp->tx_prods_mapping);
4752 context->xstorm_st_context.db_data_addr_lo =
4753 U64_LO(fp->tx_prods_mapping);
0626b899 4754 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4755 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4756 context->cstorm_st_context.sb_index_number =
5c862848 4757 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4758 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4759
4760 context->xstorm_ag_context.cdu_reserved =
4761 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4762 CDU_REGION_NUMBER_XCM_AG,
4763 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4764 }
4765}
4766
4767static void bnx2x_init_ind_table(struct bnx2x *bp)
4768{
26c8fa4d 4769 int func = BP_FUNC(bp);
a2fbb9ea
ET
4770 int i;
4771
555f6c78 4772 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4773 return;
4774
555f6c78
EG
4775 DP(NETIF_MSG_IFUP,
4776 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4777 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4778 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4779 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4780 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4781}
4782
49d66772
ET
4783static void bnx2x_set_client_config(struct bnx2x *bp)
4784{
49d66772 4785 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4786 int port = BP_PORT(bp);
4787 int i;
49d66772 4788
e7799c5f 4789 tstorm_client.mtu = bp->dev->mtu;
49d66772 4790 tstorm_client.config_flags =
de832a55
EG
4791 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4792 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4793#ifdef BCM_VLAN
0c6671b0 4794 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4795 tstorm_client.config_flags |=
8d9c5f34 4796 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4797 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4798 }
4799#endif
49d66772 4800
7a9b2557
VZ
4801 if (bp->flags & TPA_ENABLE_FLAG) {
4802 tstorm_client.max_sges_for_packet =
4f40f2cb 4803 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4804 tstorm_client.max_sges_for_packet =
4805 ((tstorm_client.max_sges_for_packet +
4806 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4807 PAGES_PER_SGE_SHIFT;
4808
4809 tstorm_client.config_flags |=
4810 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4811 }
4812
49d66772 4813 for_each_queue(bp, i) {
de832a55
EG
4814 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4815
49d66772 4816 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4817 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4818 ((u32 *)&tstorm_client)[0]);
4819 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4820 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4821 ((u32 *)&tstorm_client)[1]);
4822 }
4823
34f80b04
EG
4824 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4825 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4826}
4827
a2fbb9ea
ET
4828static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4829{
a2fbb9ea 4830 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4831 int mode = bp->rx_mode;
4832 int mask = (1 << BP_L_ID(bp));
4833 int func = BP_FUNC(bp);
a2fbb9ea
ET
4834 int i;
4835
3196a88a 4836 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4837
4838 switch (mode) {
4839 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4840 tstorm_mac_filter.ucast_drop_all = mask;
4841 tstorm_mac_filter.mcast_drop_all = mask;
4842 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4843 break;
356e2385 4844
a2fbb9ea 4845 case BNX2X_RX_MODE_NORMAL:
34f80b04 4846 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4847 break;
356e2385 4848
a2fbb9ea 4849 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4850 tstorm_mac_filter.mcast_accept_all = mask;
4851 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4852 break;
356e2385 4853
a2fbb9ea 4854 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4855 tstorm_mac_filter.ucast_accept_all = mask;
4856 tstorm_mac_filter.mcast_accept_all = mask;
4857 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4858 break;
356e2385 4859
a2fbb9ea 4860 default:
34f80b04
EG
4861 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4862 break;
a2fbb9ea
ET
4863 }
4864
4865 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4866 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4867 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4868 ((u32 *)&tstorm_mac_filter)[i]);
4869
34f80b04 4870/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4871 ((u32 *)&tstorm_mac_filter)[i]); */
4872 }
a2fbb9ea 4873
49d66772
ET
4874 if (mode != BNX2X_RX_MODE_NONE)
4875 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4876}
4877
471de716
EG
4878static void bnx2x_init_internal_common(struct bnx2x *bp)
4879{
4880 int i;
4881
3cdf1db7
YG
4882 if (bp->flags & TPA_ENABLE_FLAG) {
4883 struct tstorm_eth_tpa_exist tpa = {0};
4884
4885 tpa.tpa_exist = 1;
4886
4887 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4888 ((u32 *)&tpa)[0]);
4889 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4890 ((u32 *)&tpa)[1]);
4891 }
4892
471de716
EG
4893 /* Zero this manually as its initialization is
4894 currently missing in the initTool */
4895 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4896 REG_WR(bp, BAR_USTRORM_INTMEM +
4897 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4898}
4899
4900static void bnx2x_init_internal_port(struct bnx2x *bp)
4901{
4902 int port = BP_PORT(bp);
4903
4904 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4905 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4906 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4907 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4908}
4909
8a1c38d1
EG
4910/* Calculates the sum of vn_min_rates.
4911 It's needed for further normalizing of the min_rates.
4912 Returns:
4913 sum of vn_min_rates.
4914 or
4915 0 - if all the min_rates are 0.
4916 In the later case fainess algorithm should be deactivated.
4917 If not all min_rates are zero then those that are zeroes will be set to 1.
4918 */
4919static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4920{
4921 int all_zero = 1;
4922 int port = BP_PORT(bp);
4923 int vn;
4924
4925 bp->vn_weight_sum = 0;
4926 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4927 int func = 2*vn + port;
4928 u32 vn_cfg =
4929 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4930 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4931 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4932
4933 /* Skip hidden vns */
4934 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4935 continue;
4936
4937 /* If min rate is zero - set it to 1 */
4938 if (!vn_min_rate)
4939 vn_min_rate = DEF_MIN_RATE;
4940 else
4941 all_zero = 0;
4942
4943 bp->vn_weight_sum += vn_min_rate;
4944 }
4945
4946 /* ... only if all min rates are zeros - disable fairness */
4947 if (all_zero)
4948 bp->vn_weight_sum = 0;
4949}
4950
471de716 4951static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4952{
a2fbb9ea
ET
4953 struct tstorm_eth_function_common_config tstorm_config = {0};
4954 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4955 int port = BP_PORT(bp);
4956 int func = BP_FUNC(bp);
de832a55
EG
4957 int i, j;
4958 u32 offset;
471de716 4959 u16 max_agg_size;
a2fbb9ea
ET
4960
4961 if (is_multi(bp)) {
555f6c78 4962 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4963 tstorm_config.rss_result_mask = MULTI_MASK;
4964 }
8d9c5f34
EG
4965 if (IS_E1HMF(bp))
4966 tstorm_config.config_flags |=
4967 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4968
34f80b04
EG
4969 tstorm_config.leading_client_id = BP_L_ID(bp);
4970
a2fbb9ea 4971 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4972 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4973 (*(u32 *)&tstorm_config));
4974
c14423fe 4975 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4976 bnx2x_set_storm_rx_mode(bp);
4977
de832a55
EG
4978 for_each_queue(bp, i) {
4979 u8 cl_id = bp->fp[i].cl_id;
4980
4981 /* reset xstorm per client statistics */
4982 offset = BAR_XSTRORM_INTMEM +
4983 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4984 for (j = 0;
4985 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4986 REG_WR(bp, offset + j*4, 0);
4987
4988 /* reset tstorm per client statistics */
4989 offset = BAR_TSTRORM_INTMEM +
4990 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4991 for (j = 0;
4992 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4993 REG_WR(bp, offset + j*4, 0);
4994
4995 /* reset ustorm per client statistics */
4996 offset = BAR_USTRORM_INTMEM +
4997 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4998 for (j = 0;
4999 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5000 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5001 }
5002
5003 /* Init statistics related context */
34f80b04 5004 stats_flags.collect_eth = 1;
a2fbb9ea 5005
66e855f3 5006 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5007 ((u32 *)&stats_flags)[0]);
66e855f3 5008 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5009 ((u32 *)&stats_flags)[1]);
5010
66e855f3 5011 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5012 ((u32 *)&stats_flags)[0]);
66e855f3 5013 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5014 ((u32 *)&stats_flags)[1]);
5015
de832a55
EG
5016 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5017 ((u32 *)&stats_flags)[0]);
5018 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5019 ((u32 *)&stats_flags)[1]);
5020
66e855f3 5021 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5022 ((u32 *)&stats_flags)[0]);
66e855f3 5023 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5024 ((u32 *)&stats_flags)[1]);
5025
66e855f3
YG
5026 REG_WR(bp, BAR_XSTRORM_INTMEM +
5027 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5028 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5029 REG_WR(bp, BAR_XSTRORM_INTMEM +
5030 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5031 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5032
5033 REG_WR(bp, BAR_TSTRORM_INTMEM +
5034 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5035 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5036 REG_WR(bp, BAR_TSTRORM_INTMEM +
5037 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5038 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5039
de832a55
EG
5040 REG_WR(bp, BAR_USTRORM_INTMEM +
5041 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5042 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5043 REG_WR(bp, BAR_USTRORM_INTMEM +
5044 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5045 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5046
34f80b04
EG
5047 if (CHIP_IS_E1H(bp)) {
5048 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5049 IS_E1HMF(bp));
5050 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5051 IS_E1HMF(bp));
5052 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5053 IS_E1HMF(bp));
5054 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5055 IS_E1HMF(bp));
5056
7a9b2557
VZ
5057 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5058 bp->e1hov);
34f80b04
EG
5059 }
5060
4f40f2cb
EG
5061 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5062 max_agg_size =
5063 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5064 SGE_PAGE_SIZE * PAGES_PER_SGE),
5065 (u32)0xffff);
555f6c78 5066 for_each_rx_queue(bp, i) {
7a9b2557 5067 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5068
5069 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5070 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5071 U64_LO(fp->rx_comp_mapping));
5072 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5073 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5074 U64_HI(fp->rx_comp_mapping));
5075
7a9b2557 5076 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5077 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5078 max_agg_size);
5079 }
8a1c38d1 5080
1c06328c
EG
5081 /* dropless flow control */
5082 if (CHIP_IS_E1H(bp)) {
5083 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5084
5085 rx_pause.bd_thr_low = 250;
5086 rx_pause.cqe_thr_low = 250;
5087 rx_pause.cos = 1;
5088 rx_pause.sge_thr_low = 0;
5089 rx_pause.bd_thr_high = 350;
5090 rx_pause.cqe_thr_high = 350;
5091 rx_pause.sge_thr_high = 0;
5092
5093 for_each_rx_queue(bp, i) {
5094 struct bnx2x_fastpath *fp = &bp->fp[i];
5095
5096 if (!fp->disable_tpa) {
5097 rx_pause.sge_thr_low = 150;
5098 rx_pause.sge_thr_high = 250;
5099 }
5100
5101
5102 offset = BAR_USTRORM_INTMEM +
5103 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5104 fp->cl_id);
5105 for (j = 0;
5106 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5107 j++)
5108 REG_WR(bp, offset + j*4,
5109 ((u32 *)&rx_pause)[j]);
5110 }
5111 }
5112
8a1c38d1
EG
5113 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5114
5115 /* Init rate shaping and fairness contexts */
5116 if (IS_E1HMF(bp)) {
5117 int vn;
5118
5119 /* During init there is no active link
5120 Until link is up, set link rate to 10Gbps */
5121 bp->link_vars.line_speed = SPEED_10000;
5122 bnx2x_init_port_minmax(bp);
5123
5124 bnx2x_calc_vn_weight_sum(bp);
5125
5126 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5127 bnx2x_init_vn_minmax(bp, 2*vn + port);
5128
5129 /* Enable rate shaping and fairness */
5130 bp->cmng.flags.cmng_enables =
5131 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5132 if (bp->vn_weight_sum)
5133 bp->cmng.flags.cmng_enables |=
5134 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5135 else
5136 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5137 " fairness will be disabled\n");
5138 } else {
5139 /* rate shaping and fairness are disabled */
5140 DP(NETIF_MSG_IFUP,
5141 "single function mode minmax will be disabled\n");
5142 }
5143
5144
5145 /* Store it to internal memory */
5146 if (bp->port.pmf)
5147 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5148 REG_WR(bp, BAR_XSTRORM_INTMEM +
5149 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5150 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5151}
5152
471de716
EG
5153static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5154{
5155 switch (load_code) {
5156 case FW_MSG_CODE_DRV_LOAD_COMMON:
5157 bnx2x_init_internal_common(bp);
5158 /* no break */
5159
5160 case FW_MSG_CODE_DRV_LOAD_PORT:
5161 bnx2x_init_internal_port(bp);
5162 /* no break */
5163
5164 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5165 bnx2x_init_internal_func(bp);
5166 break;
5167
5168 default:
5169 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5170 break;
5171 }
5172}
5173
5174static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5175{
5176 int i;
5177
5178 for_each_queue(bp, i) {
5179 struct bnx2x_fastpath *fp = &bp->fp[i];
5180
34f80b04 5181 fp->bp = bp;
a2fbb9ea 5182 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5183 fp->index = i;
34f80b04
EG
5184 fp->cl_id = BP_L_ID(bp) + i;
5185 fp->sb_id = fp->cl_id;
5186 DP(NETIF_MSG_IFUP,
f5372251
EG
5187 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5188 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5189 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5190 fp->sb_id);
5c862848 5191 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5192 }
5193
16119785
EG
5194 /* ensure status block indices were read */
5195 rmb();
5196
5197
5c862848
EG
5198 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5199 DEF_SB_ID);
5200 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5201 bnx2x_update_coalesce(bp);
5202 bnx2x_init_rx_rings(bp);
5203 bnx2x_init_tx_ring(bp);
5204 bnx2x_init_sp_ring(bp);
5205 bnx2x_init_context(bp);
471de716 5206 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5207 bnx2x_init_ind_table(bp);
0ef00459
EG
5208 bnx2x_stats_init(bp);
5209
5210 /* At this point, we are ready for interrupts */
5211 atomic_set(&bp->intr_sem, 0);
5212
5213 /* flush all before enabling interrupts */
5214 mb();
5215 mmiowb();
5216
615f8fd9 5217 bnx2x_int_enable(bp);
eb8da205
EG
5218
5219 /* Check for SPIO5 */
5220 bnx2x_attn_int_deasserted0(bp,
5221 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5222 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5223}
5224
5225/* end of nic init */
5226
5227/*
5228 * gzip service functions
5229 */
5230
5231static int bnx2x_gunzip_init(struct bnx2x *bp)
5232{
5233 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5234 &bp->gunzip_mapping);
5235 if (bp->gunzip_buf == NULL)
5236 goto gunzip_nomem1;
5237
5238 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5239 if (bp->strm == NULL)
5240 goto gunzip_nomem2;
5241
5242 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5243 GFP_KERNEL);
5244 if (bp->strm->workspace == NULL)
5245 goto gunzip_nomem3;
5246
5247 return 0;
5248
5249gunzip_nomem3:
5250 kfree(bp->strm);
5251 bp->strm = NULL;
5252
5253gunzip_nomem2:
5254 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5255 bp->gunzip_mapping);
5256 bp->gunzip_buf = NULL;
5257
5258gunzip_nomem1:
5259 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5260 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5261 return -ENOMEM;
5262}
5263
5264static void bnx2x_gunzip_end(struct bnx2x *bp)
5265{
5266 kfree(bp->strm->workspace);
5267
5268 kfree(bp->strm);
5269 bp->strm = NULL;
5270
5271 if (bp->gunzip_buf) {
5272 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5273 bp->gunzip_mapping);
5274 bp->gunzip_buf = NULL;
5275 }
5276}
5277
94a78b79 5278static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5279{
5280 int n, rc;
5281
5282 /* check gzip header */
94a78b79
VZ
5283 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5284 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5285 return -EINVAL;
94a78b79 5286 }
a2fbb9ea
ET
5287
5288 n = 10;
5289
34f80b04 5290#define FNAME 0x8
a2fbb9ea
ET
5291
5292 if (zbuf[3] & FNAME)
5293 while ((zbuf[n++] != 0) && (n < len));
5294
94a78b79 5295 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5296 bp->strm->avail_in = len - n;
5297 bp->strm->next_out = bp->gunzip_buf;
5298 bp->strm->avail_out = FW_BUF_SIZE;
5299
5300 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5301 if (rc != Z_OK)
5302 return rc;
5303
5304 rc = zlib_inflate(bp->strm, Z_FINISH);
5305 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5306 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5307 bp->dev->name, bp->strm->msg);
5308
5309 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5310 if (bp->gunzip_outlen & 0x3)
5311 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5312 " gunzip_outlen (%d) not aligned\n",
5313 bp->dev->name, bp->gunzip_outlen);
5314 bp->gunzip_outlen >>= 2;
5315
5316 zlib_inflateEnd(bp->strm);
5317
5318 if (rc == Z_STREAM_END)
5319 return 0;
5320
5321 return rc;
5322}
5323
5324/* nic load/unload */
5325
5326/*
34f80b04 5327 * General service functions
a2fbb9ea
ET
5328 */
5329
5330/* send a NIG loopback debug packet */
5331static void bnx2x_lb_pckt(struct bnx2x *bp)
5332{
a2fbb9ea 5333 u32 wb_write[3];
a2fbb9ea
ET
5334
5335 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5336 wb_write[0] = 0x55555555;
5337 wb_write[1] = 0x55555555;
34f80b04 5338 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5339 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5340
5341 /* NON-IP protocol */
a2fbb9ea
ET
5342 wb_write[0] = 0x09000000;
5343 wb_write[1] = 0x55555555;
34f80b04 5344 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5345 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5346}
5347
5348/* some of the internal memories
5349 * are not directly readable from the driver
5350 * to test them we send debug packets
5351 */
5352static int bnx2x_int_mem_test(struct bnx2x *bp)
5353{
5354 int factor;
5355 int count, i;
5356 u32 val = 0;
5357
ad8d3948 5358 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5359 factor = 120;
ad8d3948
EG
5360 else if (CHIP_REV_IS_EMUL(bp))
5361 factor = 200;
5362 else
a2fbb9ea 5363 factor = 1;
a2fbb9ea
ET
5364
5365 DP(NETIF_MSG_HW, "start part1\n");
5366
5367 /* Disable inputs of parser neighbor blocks */
5368 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5369 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5370 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5371 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5372
5373 /* Write 0 to parser credits for CFC search request */
5374 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5375
5376 /* send Ethernet packet */
5377 bnx2x_lb_pckt(bp);
5378
5379 /* TODO do i reset NIG statistic? */
5380 /* Wait until NIG register shows 1 packet of size 0x10 */
5381 count = 1000 * factor;
5382 while (count) {
34f80b04 5383
a2fbb9ea
ET
5384 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5385 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5386 if (val == 0x10)
5387 break;
5388
5389 msleep(10);
5390 count--;
5391 }
5392 if (val != 0x10) {
5393 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5394 return -1;
5395 }
5396
5397 /* Wait until PRS register shows 1 packet */
5398 count = 1000 * factor;
5399 while (count) {
5400 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5401 if (val == 1)
5402 break;
5403
5404 msleep(10);
5405 count--;
5406 }
5407 if (val != 0x1) {
5408 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5409 return -2;
5410 }
5411
5412 /* Reset and init BRB, PRS */
34f80b04 5413 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5414 msleep(50);
34f80b04 5415 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5416 msleep(50);
94a78b79
VZ
5417 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5418 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5419
5420 DP(NETIF_MSG_HW, "part2\n");
5421
5422 /* Disable inputs of parser neighbor blocks */
5423 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5424 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5425 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5426 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5427
5428 /* Write 0 to parser credits for CFC search request */
5429 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5430
5431 /* send 10 Ethernet packets */
5432 for (i = 0; i < 10; i++)
5433 bnx2x_lb_pckt(bp);
5434
5435 /* Wait until NIG register shows 10 + 1
5436 packets of size 11*0x10 = 0xb0 */
5437 count = 1000 * factor;
5438 while (count) {
34f80b04 5439
a2fbb9ea
ET
5440 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5441 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5442 if (val == 0xb0)
5443 break;
5444
5445 msleep(10);
5446 count--;
5447 }
5448 if (val != 0xb0) {
5449 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5450 return -3;
5451 }
5452
5453 /* Wait until PRS register shows 2 packets */
5454 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5455 if (val != 2)
5456 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5457
5458 /* Write 1 to parser credits for CFC search request */
5459 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5460
5461 /* Wait until PRS register shows 3 packets */
5462 msleep(10 * factor);
5463 /* Wait until NIG register shows 1 packet of size 0x10 */
5464 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5465 if (val != 3)
5466 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5467
5468 /* clear NIG EOP FIFO */
5469 for (i = 0; i < 11; i++)
5470 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5471 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5472 if (val != 1) {
5473 BNX2X_ERR("clear of NIG failed\n");
5474 return -4;
5475 }
5476
5477 /* Reset and init BRB, PRS, NIG */
5478 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5479 msleep(50);
5480 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5481 msleep(50);
94a78b79
VZ
5482 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5483 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5484#ifndef BCM_ISCSI
5485 /* set NIC mode */
5486 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5487#endif
5488
5489 /* Enable inputs of parser neighbor blocks */
5490 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5491 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5492 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5493 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5494
5495 DP(NETIF_MSG_HW, "done\n");
5496
5497 return 0; /* OK */
5498}
5499
5500static void enable_blocks_attention(struct bnx2x *bp)
5501{
5502 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5503 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5504 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5505 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5506 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5507 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5508 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5509 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5510 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5511/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5512/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5513 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5514 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5515 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5516/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5517/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5518 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5519 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5520 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5521 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5522/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5523/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5524 if (CHIP_REV_IS_FPGA(bp))
5525 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5526 else
5527 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5528 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5529 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5530 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5531/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5532/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5533 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5534 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5535/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5536 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5537}
5538
34f80b04 5539
81f75bbf
EG
5540static void bnx2x_reset_common(struct bnx2x *bp)
5541{
5542 /* reset_common */
5543 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5544 0xd3ffff7f);
5545 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5546}
5547
fd4ef40d
EG
5548
5549static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5550{
5551 u32 val;
5552 u8 port;
5553 u8 is_required = 0;
5554
5555 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5556 SHARED_HW_CFG_FAN_FAILURE_MASK;
5557
5558 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5559 is_required = 1;
5560
5561 /*
5562 * The fan failure mechanism is usually related to the PHY type since
5563 * the power consumption of the board is affected by the PHY. Currently,
5564 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5565 */
5566 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5567 for (port = PORT_0; port < PORT_MAX; port++) {
5568 u32 phy_type =
5569 SHMEM_RD(bp, dev_info.port_hw_config[port].
5570 external_phy_config) &
5571 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5572 is_required |=
5573 ((phy_type ==
5574 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5575 (phy_type ==
5576 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5577 (phy_type ==
5578 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5579 }
5580
5581 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5582
5583 if (is_required == 0)
5584 return;
5585
5586 /* Fan failure is indicated by SPIO 5 */
5587 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5588 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5589
5590 /* set to active low mode */
5591 val = REG_RD(bp, MISC_REG_SPIO_INT);
5592 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5593 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5594 REG_WR(bp, MISC_REG_SPIO_INT, val);
5595
5596 /* enable interrupt to signal the IGU */
5597 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5598 val |= (1 << MISC_REGISTERS_SPIO_5);
5599 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5600}
5601
34f80b04 5602static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5603{
a2fbb9ea 5604 u32 val, i;
a2fbb9ea 5605
34f80b04 5606 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5607
81f75bbf 5608 bnx2x_reset_common(bp);
34f80b04
EG
5609 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5610 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5611
94a78b79 5612 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5613 if (CHIP_IS_E1H(bp))
5614 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5615
34f80b04
EG
5616 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5617 msleep(30);
5618 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5619
94a78b79 5620 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5621 if (CHIP_IS_E1(bp)) {
5622 /* enable HW interrupt from PXP on USDM overflow
5623 bit 16 on INT_MASK_0 */
5624 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5625 }
a2fbb9ea 5626
94a78b79 5627 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5628 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5629
5630#ifdef __BIG_ENDIAN
34f80b04
EG
5631 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5632 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5633 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5634 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5635 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5636 /* make sure this value is 0 */
5637 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5638
5639/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5640 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5641 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5642 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5643 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5644#endif
5645
34f80b04 5646 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5647#ifdef BCM_ISCSI
34f80b04
EG
5648 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5649 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5650 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5651#endif
5652
34f80b04
EG
5653 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5654 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5655
34f80b04
EG
5656 /* let the HW do it's magic ... */
5657 msleep(100);
5658 /* finish PXP init */
5659 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5660 if (val != 1) {
5661 BNX2X_ERR("PXP2 CFG failed\n");
5662 return -EBUSY;
5663 }
5664 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5665 if (val != 1) {
5666 BNX2X_ERR("PXP2 RD_INIT failed\n");
5667 return -EBUSY;
5668 }
a2fbb9ea 5669
34f80b04
EG
5670 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5671 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5672
94a78b79 5673 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5674
34f80b04
EG
5675 /* clean the DMAE memory */
5676 bp->dmae_ready = 1;
5677 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5678
94a78b79
VZ
5679 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5680 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5681 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5682 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5683
34f80b04
EG
5684 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5685 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5686 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5687 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5688
94a78b79 5689 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
5690 /* soft reset pulse */
5691 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5692 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5693
5694#ifdef BCM_ISCSI
94a78b79 5695 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5696#endif
a2fbb9ea 5697
94a78b79 5698 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5699 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5700 if (!CHIP_REV_IS_SLOW(bp)) {
5701 /* enable hw interrupt from doorbell Q */
5702 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5703 }
a2fbb9ea 5704
94a78b79
VZ
5705 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5706 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5707 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5708 /* set NIC mode */
5709 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5710 if (CHIP_IS_E1H(bp))
5711 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5712
94a78b79
VZ
5713 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5714 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5715 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5716 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5717
490c3c9b
EG
5718 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5719 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5720 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5721 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5722
94a78b79
VZ
5723 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5724 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5725 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5726 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5727
34f80b04
EG
5728 /* sync semi rtc */
5729 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5730 0x80000000);
5731 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5732 0x80000000);
a2fbb9ea 5733
94a78b79
VZ
5734 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5735 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5736 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5737
34f80b04
EG
5738 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5739 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5740 REG_WR(bp, i, 0xc0cac01a);
5741 /* TODO: replace with something meaningful */
5742 }
94a78b79 5743 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 5744 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5745
34f80b04
EG
5746 if (sizeof(union cdu_context) != 1024)
5747 /* we currently assume that a context is 1024 bytes */
5748 printk(KERN_ALERT PFX "please adjust the size of"
5749 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5750
94a78b79 5751 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5752 val = (4 << 24) + (0 << 12) + 1024;
5753 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5754 if (CHIP_IS_E1(bp)) {
5755 /* !!! fix pxp client crdit until excel update */
5756 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5757 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5758 }
a2fbb9ea 5759
94a78b79 5760 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5761 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5762 /* enable context validation interrupt from CFC */
5763 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5764
5765 /* set the thresholds to prevent CFC/CDU race */
5766 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5767
94a78b79
VZ
5768 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5769 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5770
34f80b04 5771 /* PXPCS COMMON comes here */
94a78b79 5772 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5773 /* Reset PCIE errors for debug */
5774 REG_WR(bp, 0x2814, 0xffffffff);
5775 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5776
34f80b04 5777 /* EMAC0 COMMON comes here */
94a78b79 5778 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
34f80b04 5779 /* EMAC1 COMMON comes here */
94a78b79 5780 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
34f80b04 5781 /* DBU COMMON comes here */
94a78b79 5782 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
34f80b04 5783 /* DBG COMMON comes here */
94a78b79 5784 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5785
94a78b79 5786 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
5787 if (CHIP_IS_E1H(bp)) {
5788 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5789 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5790 }
5791
5792 if (CHIP_REV_IS_SLOW(bp))
5793 msleep(200);
5794
5795 /* finish CFC init */
5796 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5797 if (val != 1) {
5798 BNX2X_ERR("CFC LL_INIT failed\n");
5799 return -EBUSY;
5800 }
5801 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5802 if (val != 1) {
5803 BNX2X_ERR("CFC AC_INIT failed\n");
5804 return -EBUSY;
5805 }
5806 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5807 if (val != 1) {
5808 BNX2X_ERR("CFC CAM_INIT failed\n");
5809 return -EBUSY;
5810 }
5811 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5812
34f80b04
EG
5813 /* read NIG statistic
5814 to see if this is our first up since powerup */
5815 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5816 val = *bnx2x_sp(bp, wb_data[0]);
5817
5818 /* do internal memory self test */
5819 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5820 BNX2X_ERR("internal mem self test failed\n");
5821 return -EBUSY;
5822 }
5823
35b19ba5 5824 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5825 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5826 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5827 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 5828 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
5829 bp->port.need_hw_lock = 1;
5830 break;
5831
34f80b04
EG
5832 default:
5833 break;
5834 }
f1410647 5835
fd4ef40d
EG
5836 bnx2x_setup_fan_failure_detection(bp);
5837
34f80b04
EG
5838 /* clear PXP2 attentions */
5839 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5840
34f80b04 5841 enable_blocks_attention(bp);
a2fbb9ea 5842
6bbca910
YR
5843 if (!BP_NOMCP(bp)) {
5844 bnx2x_acquire_phy_lock(bp);
5845 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5846 bnx2x_release_phy_lock(bp);
5847 } else
5848 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5849
34f80b04
EG
5850 return 0;
5851}
a2fbb9ea 5852
34f80b04
EG
5853static int bnx2x_init_port(struct bnx2x *bp)
5854{
5855 int port = BP_PORT(bp);
94a78b79 5856 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5857 u32 low, high;
34f80b04 5858 u32 val;
a2fbb9ea 5859
34f80b04
EG
5860 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5861
5862 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5863
5864 /* Port PXP comes here */
94a78b79 5865 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
a2fbb9ea 5866 /* Port PXP2 comes here */
94a78b79 5867 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
a2fbb9ea
ET
5868#ifdef BCM_ISCSI
5869 /* Port0 1
5870 * Port1 385 */
5871 i++;
5872 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5873 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5874 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5875 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5876
5877 /* Port0 2
5878 * Port1 386 */
5879 i++;
5880 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5881 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5882 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5883 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5884
5885 /* Port0 3
5886 * Port1 387 */
5887 i++;
5888 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5889 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5890 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5891 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5892#endif
34f80b04 5893 /* Port CMs come here */
94a78b79 5894 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea
ET
5895
5896 /* Port QM comes here */
a2fbb9ea
ET
5897#ifdef BCM_ISCSI
5898 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5899 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5900
94a78b79 5901 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea
ET
5902#endif
5903 /* Port DQ comes here */
94a78b79 5904 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5905
94a78b79 5906 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
5907 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5908 /* no pause for emulation and FPGA */
5909 low = 0;
5910 high = 513;
5911 } else {
5912 if (IS_E1HMF(bp))
5913 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5914 else if (bp->dev->mtu > 4096) {
5915 if (bp->flags & ONE_PORT_FLAG)
5916 low = 160;
5917 else {
5918 val = bp->dev->mtu;
5919 /* (24*1024 + val*4)/256 */
5920 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5921 }
5922 } else
5923 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5924 high = low + 56; /* 14*1024/256 */
5925 }
5926 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5927 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5928
5929
ad8d3948 5930 /* Port PRS comes here */
94a78b79 5931 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
a2fbb9ea 5932 /* Port TSDM comes here */
94a78b79 5933 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
a2fbb9ea 5934 /* Port CSDM comes here */
94a78b79 5935 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
a2fbb9ea 5936 /* Port USDM comes here */
94a78b79 5937 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
a2fbb9ea 5938 /* Port XSDM comes here */
94a78b79 5939 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5940
94a78b79
VZ
5941 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5942 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5943 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5944 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 5945
a2fbb9ea 5946 /* Port UPB comes here */
94a78b79 5947 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
34f80b04 5948 /* Port XPB comes here */
94a78b79 5949 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5950
94a78b79 5951 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
5952
5953 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5954 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5955
5956 /* update threshold */
34f80b04 5957 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5958 /* update init credit */
34f80b04 5959 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5960
5961 /* probe changes */
34f80b04 5962 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5963 msleep(5);
34f80b04 5964 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5965
5966#ifdef BCM_ISCSI
5967 /* tell the searcher where the T2 table is */
5968 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5969
5970 wb_write[0] = U64_LO(bp->t2_mapping);
5971 wb_write[1] = U64_HI(bp->t2_mapping);
5972 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5973 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5974 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5975 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5976
5977 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5978 /* Port SRCH comes here */
5979#endif
5980 /* Port CDU comes here */
94a78b79 5981 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
a2fbb9ea 5982 /* Port CFC comes here */
94a78b79 5983 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5984
5985 if (CHIP_IS_E1(bp)) {
5986 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5987 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5988 }
94a78b79 5989 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5990
94a78b79 5991 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5992 /* init aeu_mask_attn_func_0/1:
5993 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5994 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5995 * bits 4-7 are used for "per vn group attention" */
5996 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5997 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5998
a2fbb9ea 5999 /* Port PXPCS comes here */
94a78b79 6000 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
a2fbb9ea 6001 /* Port EMAC0 comes here */
94a78b79 6002 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
a2fbb9ea 6003 /* Port EMAC1 comes here */
94a78b79 6004 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
a2fbb9ea 6005 /* Port DBU comes here */
94a78b79 6006 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
a2fbb9ea 6007 /* Port DBG comes here */
94a78b79 6008 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6009
94a78b79 6010 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6011
6012 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6013
6014 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6015 /* 0x2 disable e1hov, 0x1 enable */
6016 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6017 (IS_E1HMF(bp) ? 0x1 : 0x2));
6018
1c06328c
EG
6019 /* support pause requests from USDM, TSDM and BRB */
6020 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6021
6022 {
6023 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6024 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6025 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6026 }
34f80b04
EG
6027 }
6028
a2fbb9ea 6029 /* Port MCP comes here */
94a78b79 6030 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
a2fbb9ea 6031 /* Port DMAE comes here */
94a78b79 6032 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6033
35b19ba5 6034 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6036 {
6037 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6038
6039 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6040 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6041
6042 /* The GPIO should be swapped if the swap register is
6043 set and active */
6044 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6045 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6046
6047 /* Select function upon port-swap configuration */
6048 if (port == 0) {
6049 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6050 aeu_gpio_mask = (swap_val && swap_override) ?
6051 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6052 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6053 } else {
6054 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6055 aeu_gpio_mask = (swap_val && swap_override) ?
6056 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6057 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6058 }
6059 val = REG_RD(bp, offset);
6060 /* add GPIO3 to group */
6061 val |= aeu_gpio_mask;
6062 REG_WR(bp, offset, val);
6063 }
6064 break;
6065
35b19ba5 6066 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6067 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6068 /* add SPIO 5 to group 0 */
4d295db0
EG
6069 {
6070 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6071 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6072 val = REG_RD(bp, reg_addr);
f1410647 6073 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6074 REG_WR(bp, reg_addr, val);
6075 }
f1410647
ET
6076 break;
6077
6078 default:
6079 break;
6080 }
6081
c18487ee 6082 bnx2x__link_reset(bp);
a2fbb9ea 6083
34f80b04
EG
6084 return 0;
6085}
6086
6087#define ILT_PER_FUNC (768/2)
6088#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6089/* the phys address is shifted right 12 bits and has an added
6090 1=valid bit added to the 53rd bit
6091 then since this is a wide register(TM)
6092 we split it into two 32 bit writes
6093 */
6094#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6095#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6096#define PXP_ONE_ILT(x) (((x) << 10) | x)
6097#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6098
6099#define CNIC_ILT_LINES 0
6100
6101static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6102{
6103 int reg;
6104
6105 if (CHIP_IS_E1H(bp))
6106 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6107 else /* E1 */
6108 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6109
6110 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6111}
6112
6113static int bnx2x_init_func(struct bnx2x *bp)
6114{
6115 int port = BP_PORT(bp);
6116 int func = BP_FUNC(bp);
8badd27a 6117 u32 addr, val;
34f80b04
EG
6118 int i;
6119
6120 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6121
8badd27a
EG
6122 /* set MSI reconfigure capability */
6123 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6124 val = REG_RD(bp, addr);
6125 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6126 REG_WR(bp, addr, val);
6127
34f80b04
EG
6128 i = FUNC_ILT_BASE(func);
6129
6130 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6131 if (CHIP_IS_E1H(bp)) {
6132 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6133 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6134 } else /* E1 */
6135 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6136 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6137
6138
6139 if (CHIP_IS_E1H(bp)) {
6140 for (i = 0; i < 9; i++)
6141 bnx2x_init_block(bp,
94a78b79 6142 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6143
6144 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6145 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6146 }
6147
6148 /* HC init per function */
6149 if (CHIP_IS_E1H(bp)) {
6150 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6151
6152 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6153 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6154 }
94a78b79 6155 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6156
c14423fe 6157 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6158 REG_WR(bp, 0x2114, 0xffffffff);
6159 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6160
34f80b04
EG
6161 return 0;
6162}
6163
6164static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6165{
6166 int i, rc = 0;
a2fbb9ea 6167
34f80b04
EG
6168 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6169 BP_FUNC(bp), load_code);
a2fbb9ea 6170
34f80b04
EG
6171 bp->dmae_ready = 0;
6172 mutex_init(&bp->dmae_mutex);
6173 bnx2x_gunzip_init(bp);
a2fbb9ea 6174
34f80b04
EG
6175 switch (load_code) {
6176 case FW_MSG_CODE_DRV_LOAD_COMMON:
6177 rc = bnx2x_init_common(bp);
6178 if (rc)
6179 goto init_hw_err;
6180 /* no break */
6181
6182 case FW_MSG_CODE_DRV_LOAD_PORT:
6183 bp->dmae_ready = 1;
6184 rc = bnx2x_init_port(bp);
6185 if (rc)
6186 goto init_hw_err;
6187 /* no break */
6188
6189 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6190 bp->dmae_ready = 1;
6191 rc = bnx2x_init_func(bp);
6192 if (rc)
6193 goto init_hw_err;
6194 break;
6195
6196 default:
6197 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6198 break;
6199 }
6200
6201 if (!BP_NOMCP(bp)) {
6202 int func = BP_FUNC(bp);
a2fbb9ea
ET
6203
6204 bp->fw_drv_pulse_wr_seq =
34f80b04 6205 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6206 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6207 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6208 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6209 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6210 } else
6211 bp->func_stx = 0;
a2fbb9ea 6212
34f80b04
EG
6213 /* this needs to be done before gunzip end */
6214 bnx2x_zero_def_sb(bp);
6215 for_each_queue(bp, i)
6216 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6217
6218init_hw_err:
6219 bnx2x_gunzip_end(bp);
6220
6221 return rc;
a2fbb9ea
ET
6222}
6223
c14423fe 6224/* send the MCP a request, block until there is a reply */
4d295db0 6225u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
a2fbb9ea 6226{
34f80b04 6227 int func = BP_FUNC(bp);
f1410647
ET
6228 u32 seq = ++bp->fw_seq;
6229 u32 rc = 0;
19680c48
EG
6230 u32 cnt = 1;
6231 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6232
34f80b04 6233 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6234 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6235
19680c48
EG
6236 do {
6237 /* let the FW do it's magic ... */
6238 msleep(delay);
a2fbb9ea 6239
19680c48 6240 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6241
19680c48
EG
6242 /* Give the FW up to 2 second (200*10ms) */
6243 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6244
6245 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6246 cnt*delay, rc, seq);
a2fbb9ea
ET
6247
6248 /* is this a reply to our command? */
6249 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6250 rc &= FW_MSG_CODE_MASK;
f1410647 6251
a2fbb9ea
ET
6252 } else {
6253 /* FW BUG! */
6254 BNX2X_ERR("FW failed to respond!\n");
6255 bnx2x_fw_dump(bp);
6256 rc = 0;
6257 }
f1410647 6258
a2fbb9ea
ET
6259 return rc;
6260}
6261
6262static void bnx2x_free_mem(struct bnx2x *bp)
6263{
6264
6265#define BNX2X_PCI_FREE(x, y, size) \
6266 do { \
6267 if (x) { \
6268 pci_free_consistent(bp->pdev, size, x, y); \
6269 x = NULL; \
6270 y = 0; \
6271 } \
6272 } while (0)
6273
6274#define BNX2X_FREE(x) \
6275 do { \
6276 if (x) { \
6277 vfree(x); \
6278 x = NULL; \
6279 } \
6280 } while (0)
6281
6282 int i;
6283
6284 /* fastpath */
555f6c78 6285 /* Common */
a2fbb9ea
ET
6286 for_each_queue(bp, i) {
6287
555f6c78 6288 /* status blocks */
a2fbb9ea
ET
6289 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6290 bnx2x_fp(bp, i, status_blk_mapping),
6291 sizeof(struct host_status_block) +
6292 sizeof(struct eth_tx_db_data));
555f6c78
EG
6293 }
6294 /* Rx */
6295 for_each_rx_queue(bp, i) {
a2fbb9ea 6296
555f6c78 6297 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6298 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6299 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6300 bnx2x_fp(bp, i, rx_desc_mapping),
6301 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6302
6303 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6304 bnx2x_fp(bp, i, rx_comp_mapping),
6305 sizeof(struct eth_fast_path_rx_cqe) *
6306 NUM_RCQ_BD);
a2fbb9ea 6307
7a9b2557 6308 /* SGE ring */
32626230 6309 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6310 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6311 bnx2x_fp(bp, i, rx_sge_mapping),
6312 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6313 }
555f6c78
EG
6314 /* Tx */
6315 for_each_tx_queue(bp, i) {
6316
6317 /* fastpath tx rings: tx_buf tx_desc */
6318 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6319 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6320 bnx2x_fp(bp, i, tx_desc_mapping),
6321 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6322 }
a2fbb9ea
ET
6323 /* end of fastpath */
6324
6325 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6326 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6327
6328 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6329 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6330
6331#ifdef BCM_ISCSI
6332 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6333 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6334 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6335 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6336#endif
7a9b2557 6337 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6338
6339#undef BNX2X_PCI_FREE
6340#undef BNX2X_KFREE
6341}
6342
6343static int bnx2x_alloc_mem(struct bnx2x *bp)
6344{
6345
6346#define BNX2X_PCI_ALLOC(x, y, size) \
6347 do { \
6348 x = pci_alloc_consistent(bp->pdev, size, y); \
6349 if (x == NULL) \
6350 goto alloc_mem_err; \
6351 memset(x, 0, size); \
6352 } while (0)
6353
6354#define BNX2X_ALLOC(x, size) \
6355 do { \
6356 x = vmalloc(size); \
6357 if (x == NULL) \
6358 goto alloc_mem_err; \
6359 memset(x, 0, size); \
6360 } while (0)
6361
6362 int i;
6363
6364 /* fastpath */
555f6c78 6365 /* Common */
a2fbb9ea
ET
6366 for_each_queue(bp, i) {
6367 bnx2x_fp(bp, i, bp) = bp;
6368
555f6c78 6369 /* status blocks */
a2fbb9ea
ET
6370 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6371 &bnx2x_fp(bp, i, status_blk_mapping),
6372 sizeof(struct host_status_block) +
6373 sizeof(struct eth_tx_db_data));
555f6c78
EG
6374 }
6375 /* Rx */
6376 for_each_rx_queue(bp, i) {
a2fbb9ea 6377
555f6c78 6378 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6379 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6380 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6381 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6382 &bnx2x_fp(bp, i, rx_desc_mapping),
6383 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6384
6385 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6386 &bnx2x_fp(bp, i, rx_comp_mapping),
6387 sizeof(struct eth_fast_path_rx_cqe) *
6388 NUM_RCQ_BD);
6389
7a9b2557
VZ
6390 /* SGE ring */
6391 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6392 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6393 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6394 &bnx2x_fp(bp, i, rx_sge_mapping),
6395 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6396 }
555f6c78
EG
6397 /* Tx */
6398 for_each_tx_queue(bp, i) {
6399
6400 bnx2x_fp(bp, i, hw_tx_prods) =
6401 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6402
6403 bnx2x_fp(bp, i, tx_prods_mapping) =
6404 bnx2x_fp(bp, i, status_blk_mapping) +
6405 sizeof(struct host_status_block);
6406
6407 /* fastpath tx rings: tx_buf tx_desc */
6408 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6409 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6410 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6411 &bnx2x_fp(bp, i, tx_desc_mapping),
6412 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6413 }
a2fbb9ea
ET
6414 /* end of fastpath */
6415
6416 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6417 sizeof(struct host_def_status_block));
6418
6419 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6420 sizeof(struct bnx2x_slowpath));
6421
6422#ifdef BCM_ISCSI
6423 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6424
6425 /* Initialize T1 */
6426 for (i = 0; i < 64*1024; i += 64) {
6427 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6428 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6429 }
6430
6431 /* allocate searcher T2 table
6432 we allocate 1/4 of alloc num for T2
6433 (which is not entered into the ILT) */
6434 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6435
6436 /* Initialize T2 */
6437 for (i = 0; i < 16*1024; i += 64)
6438 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6439
c14423fe 6440 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6441 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6442
6443 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6444 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6445
6446 /* QM queues (128*MAX_CONN) */
6447 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6448#endif
6449
6450 /* Slow path ring */
6451 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6452
6453 return 0;
6454
6455alloc_mem_err:
6456 bnx2x_free_mem(bp);
6457 return -ENOMEM;
6458
6459#undef BNX2X_PCI_ALLOC
6460#undef BNX2X_ALLOC
6461}
6462
6463static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6464{
6465 int i;
6466
555f6c78 6467 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6468 struct bnx2x_fastpath *fp = &bp->fp[i];
6469
6470 u16 bd_cons = fp->tx_bd_cons;
6471 u16 sw_prod = fp->tx_pkt_prod;
6472 u16 sw_cons = fp->tx_pkt_cons;
6473
a2fbb9ea
ET
6474 while (sw_cons != sw_prod) {
6475 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6476 sw_cons++;
6477 }
6478 }
6479}
6480
6481static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6482{
6483 int i, j;
6484
555f6c78 6485 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6486 struct bnx2x_fastpath *fp = &bp->fp[j];
6487
a2fbb9ea
ET
6488 for (i = 0; i < NUM_RX_BD; i++) {
6489 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6490 struct sk_buff *skb = rx_buf->skb;
6491
6492 if (skb == NULL)
6493 continue;
6494
6495 pci_unmap_single(bp->pdev,
6496 pci_unmap_addr(rx_buf, mapping),
356e2385 6497 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6498
6499 rx_buf->skb = NULL;
6500 dev_kfree_skb(skb);
6501 }
7a9b2557 6502 if (!fp->disable_tpa)
32626230
EG
6503 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6504 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6505 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6506 }
6507}
6508
6509static void bnx2x_free_skbs(struct bnx2x *bp)
6510{
6511 bnx2x_free_tx_skbs(bp);
6512 bnx2x_free_rx_skbs(bp);
6513}
6514
6515static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6516{
34f80b04 6517 int i, offset = 1;
a2fbb9ea
ET
6518
6519 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6520 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6521 bp->msix_table[0].vector);
6522
6523 for_each_queue(bp, i) {
c14423fe 6524 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6525 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6526 bnx2x_fp(bp, i, state));
6527
34f80b04 6528 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6529 }
a2fbb9ea
ET
6530}
6531
6532static void bnx2x_free_irq(struct bnx2x *bp)
6533{
a2fbb9ea 6534 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6535 bnx2x_free_msix_irqs(bp);
6536 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6537 bp->flags &= ~USING_MSIX_FLAG;
6538
8badd27a
EG
6539 } else if (bp->flags & USING_MSI_FLAG) {
6540 free_irq(bp->pdev->irq, bp->dev);
6541 pci_disable_msi(bp->pdev);
6542 bp->flags &= ~USING_MSI_FLAG;
6543
a2fbb9ea
ET
6544 } else
6545 free_irq(bp->pdev->irq, bp->dev);
6546}
6547
6548static int bnx2x_enable_msix(struct bnx2x *bp)
6549{
8badd27a
EG
6550 int i, rc, offset = 1;
6551 int igu_vec = 0;
a2fbb9ea 6552
8badd27a
EG
6553 bp->msix_table[0].entry = igu_vec;
6554 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6555
34f80b04 6556 for_each_queue(bp, i) {
8badd27a 6557 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6558 bp->msix_table[i + offset].entry = igu_vec;
6559 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6560 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6561 }
6562
34f80b04 6563 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6564 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6565 if (rc) {
8badd27a
EG
6566 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6567 return rc;
34f80b04 6568 }
8badd27a 6569
a2fbb9ea
ET
6570 bp->flags |= USING_MSIX_FLAG;
6571
6572 return 0;
a2fbb9ea
ET
6573}
6574
a2fbb9ea
ET
6575static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6576{
34f80b04 6577 int i, rc, offset = 1;
a2fbb9ea 6578
a2fbb9ea
ET
6579 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6580 bp->dev->name, bp->dev);
a2fbb9ea
ET
6581 if (rc) {
6582 BNX2X_ERR("request sp irq failed\n");
6583 return -EBUSY;
6584 }
6585
6586 for_each_queue(bp, i) {
555f6c78
EG
6587 struct bnx2x_fastpath *fp = &bp->fp[i];
6588
6589 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6590 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6591 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6592 if (rc) {
555f6c78 6593 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6594 bnx2x_free_msix_irqs(bp);
6595 return -EBUSY;
6596 }
6597
555f6c78 6598 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6599 }
6600
555f6c78
EG
6601 i = BNX2X_NUM_QUEUES(bp);
6602 if (is_multi(bp))
6603 printk(KERN_INFO PFX
6604 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6605 bp->dev->name, bp->msix_table[0].vector,
6606 bp->msix_table[offset].vector,
6607 bp->msix_table[offset + i - 1].vector);
6608 else
6609 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6610 bp->dev->name, bp->msix_table[0].vector,
6611 bp->msix_table[offset + i - 1].vector);
6612
a2fbb9ea 6613 return 0;
a2fbb9ea
ET
6614}
6615
8badd27a
EG
6616static int bnx2x_enable_msi(struct bnx2x *bp)
6617{
6618 int rc;
6619
6620 rc = pci_enable_msi(bp->pdev);
6621 if (rc) {
6622 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6623 return -1;
6624 }
6625 bp->flags |= USING_MSI_FLAG;
6626
6627 return 0;
6628}
6629
a2fbb9ea
ET
6630static int bnx2x_req_irq(struct bnx2x *bp)
6631{
8badd27a 6632 unsigned long flags;
34f80b04 6633 int rc;
a2fbb9ea 6634
8badd27a
EG
6635 if (bp->flags & USING_MSI_FLAG)
6636 flags = 0;
6637 else
6638 flags = IRQF_SHARED;
6639
6640 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6641 bp->dev->name, bp->dev);
a2fbb9ea
ET
6642 if (!rc)
6643 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6644
6645 return rc;
a2fbb9ea
ET
6646}
6647
65abd74d
YG
6648static void bnx2x_napi_enable(struct bnx2x *bp)
6649{
6650 int i;
6651
555f6c78 6652 for_each_rx_queue(bp, i)
65abd74d
YG
6653 napi_enable(&bnx2x_fp(bp, i, napi));
6654}
6655
6656static void bnx2x_napi_disable(struct bnx2x *bp)
6657{
6658 int i;
6659
555f6c78 6660 for_each_rx_queue(bp, i)
65abd74d
YG
6661 napi_disable(&bnx2x_fp(bp, i, napi));
6662}
6663
6664static void bnx2x_netif_start(struct bnx2x *bp)
6665{
e1510706
EG
6666 int intr_sem;
6667
6668 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6669 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6670
6671 if (intr_sem) {
65abd74d 6672 if (netif_running(bp->dev)) {
65abd74d
YG
6673 bnx2x_napi_enable(bp);
6674 bnx2x_int_enable(bp);
555f6c78
EG
6675 if (bp->state == BNX2X_STATE_OPEN)
6676 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6677 }
6678 }
6679}
6680
f8ef6e44 6681static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6682{
f8ef6e44 6683 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6684 bnx2x_napi_disable(bp);
762d5f6c
EG
6685 netif_tx_disable(bp->dev);
6686 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6687}
6688
a2fbb9ea
ET
6689/*
6690 * Init service functions
6691 */
6692
3101c2bc 6693static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6694{
6695 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6696 int port = BP_PORT(bp);
a2fbb9ea
ET
6697
6698 /* CAM allocation
6699 * unicasts 0-31:port0 32-63:port1
6700 * multicast 64-127:port0 128-191:port1
6701 */
8d9c5f34 6702 config->hdr.length = 2;
af246401 6703 config->hdr.offset = port ? 32 : 0;
0626b899 6704 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6705 config->hdr.reserved1 = 0;
6706
6707 /* primary MAC */
6708 config->config_table[0].cam_entry.msb_mac_addr =
6709 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6710 config->config_table[0].cam_entry.middle_mac_addr =
6711 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6712 config->config_table[0].cam_entry.lsb_mac_addr =
6713 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6714 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6715 if (set)
6716 config->config_table[0].target_table_entry.flags = 0;
6717 else
6718 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6719 config->config_table[0].target_table_entry.client_id = 0;
6720 config->config_table[0].target_table_entry.vlan_id = 0;
6721
3101c2bc
YG
6722 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6723 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6724 config->config_table[0].cam_entry.msb_mac_addr,
6725 config->config_table[0].cam_entry.middle_mac_addr,
6726 config->config_table[0].cam_entry.lsb_mac_addr);
6727
6728 /* broadcast */
4781bfad
EG
6729 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6730 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6731 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6732 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6733 if (set)
6734 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6735 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6736 else
6737 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6738 config->config_table[1].target_table_entry.client_id = 0;
6739 config->config_table[1].target_table_entry.vlan_id = 0;
6740
6741 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6742 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6743 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6744}
6745
3101c2bc 6746static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6747{
6748 struct mac_configuration_cmd_e1h *config =
6749 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6750
3101c2bc 6751 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6752 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6753 return;
6754 }
6755
6756 /* CAM allocation for E1H
6757 * unicasts: by func number
6758 * multicast: 20+FUNC*20, 20 each
6759 */
8d9c5f34 6760 config->hdr.length = 1;
34f80b04 6761 config->hdr.offset = BP_FUNC(bp);
0626b899 6762 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6763 config->hdr.reserved1 = 0;
6764
6765 /* primary MAC */
6766 config->config_table[0].msb_mac_addr =
6767 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6768 config->config_table[0].middle_mac_addr =
6769 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6770 config->config_table[0].lsb_mac_addr =
6771 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6772 config->config_table[0].client_id = BP_L_ID(bp);
6773 config->config_table[0].vlan_id = 0;
6774 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6775 if (set)
6776 config->config_table[0].flags = BP_PORT(bp);
6777 else
6778 config->config_table[0].flags =
6779 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6780
3101c2bc
YG
6781 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6782 (set ? "setting" : "clearing"),
34f80b04
EG
6783 config->config_table[0].msb_mac_addr,
6784 config->config_table[0].middle_mac_addr,
6785 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6786
6787 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6788 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6789 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6790}
6791
a2fbb9ea
ET
6792static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6793 int *state_p, int poll)
6794{
6795 /* can take a while if any port is running */
8b3a0f0b 6796 int cnt = 5000;
a2fbb9ea 6797
c14423fe
ET
6798 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6799 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6800
6801 might_sleep();
34f80b04 6802 while (cnt--) {
a2fbb9ea
ET
6803 if (poll) {
6804 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6805 /* if index is different from 0
6806 * the reply for some commands will
3101c2bc 6807 * be on the non default queue
a2fbb9ea
ET
6808 */
6809 if (idx)
6810 bnx2x_rx_int(&bp->fp[idx], 10);
6811 }
a2fbb9ea 6812
3101c2bc 6813 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6814 if (*state_p == state) {
6815#ifdef BNX2X_STOP_ON_ERROR
6816 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6817#endif
a2fbb9ea 6818 return 0;
8b3a0f0b 6819 }
a2fbb9ea 6820
a2fbb9ea 6821 msleep(1);
a2fbb9ea
ET
6822 }
6823
a2fbb9ea 6824 /* timeout! */
49d66772
ET
6825 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6826 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6827#ifdef BNX2X_STOP_ON_ERROR
6828 bnx2x_panic();
6829#endif
a2fbb9ea 6830
49d66772 6831 return -EBUSY;
a2fbb9ea
ET
6832}
6833
6834static int bnx2x_setup_leading(struct bnx2x *bp)
6835{
34f80b04 6836 int rc;
a2fbb9ea 6837
c14423fe 6838 /* reset IGU state */
34f80b04 6839 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6840
6841 /* SETUP ramrod */
6842 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6843
34f80b04
EG
6844 /* Wait for completion */
6845 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6846
34f80b04 6847 return rc;
a2fbb9ea
ET
6848}
6849
6850static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6851{
555f6c78
EG
6852 struct bnx2x_fastpath *fp = &bp->fp[index];
6853
a2fbb9ea 6854 /* reset IGU state */
555f6c78 6855 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6856
228241eb 6857 /* SETUP ramrod */
555f6c78
EG
6858 fp->state = BNX2X_FP_STATE_OPENING;
6859 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6860 fp->cl_id, 0);
a2fbb9ea
ET
6861
6862 /* Wait for completion */
6863 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6864 &(fp->state), 0);
a2fbb9ea
ET
6865}
6866
a2fbb9ea 6867static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6868
8badd27a 6869static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6870{
555f6c78 6871 int num_queues;
a2fbb9ea 6872
8badd27a
EG
6873 switch (int_mode) {
6874 case INT_MODE_INTx:
6875 case INT_MODE_MSI:
555f6c78
EG
6876 num_queues = 1;
6877 bp->num_rx_queues = num_queues;
6878 bp->num_tx_queues = num_queues;
6879 DP(NETIF_MSG_IFUP,
6880 "set number of queues to %d\n", num_queues);
8badd27a
EG
6881 break;
6882
6883 case INT_MODE_MSIX:
6884 default:
555f6c78
EG
6885 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6886 num_queues = min_t(u32, num_online_cpus(),
6887 BNX2X_MAX_QUEUES(bp));
34f80b04 6888 else
555f6c78
EG
6889 num_queues = 1;
6890 bp->num_rx_queues = num_queues;
6891 bp->num_tx_queues = num_queues;
6892 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6893 " number of tx queues to %d\n",
6894 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6895 /* if we can't use MSI-X we only need one fp,
6896 * so try to enable MSI-X with the requested number of fp's
6897 * and fallback to MSI or legacy INTx with one fp
6898 */
8badd27a 6899 if (bnx2x_enable_msix(bp)) {
34f80b04 6900 /* failed to enable MSI-X */
555f6c78
EG
6901 num_queues = 1;
6902 bp->num_rx_queues = num_queues;
6903 bp->num_tx_queues = num_queues;
6904 if (bp->multi_mode)
6905 BNX2X_ERR("Multi requested but failed to "
6906 "enable MSI-X set number of "
6907 "queues to %d\n", num_queues);
a2fbb9ea 6908 }
8badd27a 6909 break;
a2fbb9ea 6910 }
555f6c78 6911 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6912}
6913
6914static void bnx2x_set_rx_mode(struct net_device *dev);
6915
6916/* must be called with rtnl_lock */
6917static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6918{
6919 u32 load_code;
6920 int i, rc = 0;
6921#ifdef BNX2X_STOP_ON_ERROR
6922 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6923 if (unlikely(bp->panic))
6924 return -EPERM;
6925#endif
6926
6927 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6928
6929 bnx2x_set_int_mode(bp);
c14423fe 6930
a2fbb9ea
ET
6931 if (bnx2x_alloc_mem(bp))
6932 return -ENOMEM;
6933
555f6c78 6934 for_each_rx_queue(bp, i)
7a9b2557
VZ
6935 bnx2x_fp(bp, i, disable_tpa) =
6936 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6937
555f6c78 6938 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6939 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6940 bnx2x_poll, 128);
6941
6942#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6943 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6944 struct bnx2x_fastpath *fp = &bp->fp[i];
6945
6946 fp->poll_no_work = 0;
6947 fp->poll_calls = 0;
6948 fp->poll_max_calls = 0;
6949 fp->poll_complete = 0;
6950 fp->poll_exit = 0;
6951 }
6952#endif
6953 bnx2x_napi_enable(bp);
6954
34f80b04
EG
6955 if (bp->flags & USING_MSIX_FLAG) {
6956 rc = bnx2x_req_msix_irqs(bp);
6957 if (rc) {
6958 pci_disable_msix(bp->pdev);
2dfe0e1f 6959 goto load_error1;
34f80b04
EG
6960 }
6961 } else {
8badd27a
EG
6962 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6963 bnx2x_enable_msi(bp);
34f80b04
EG
6964 bnx2x_ack_int(bp);
6965 rc = bnx2x_req_irq(bp);
6966 if (rc) {
2dfe0e1f 6967 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6968 if (bp->flags & USING_MSI_FLAG)
6969 pci_disable_msi(bp->pdev);
2dfe0e1f 6970 goto load_error1;
a2fbb9ea 6971 }
8badd27a
EG
6972 if (bp->flags & USING_MSI_FLAG) {
6973 bp->dev->irq = bp->pdev->irq;
6974 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6975 bp->dev->name, bp->pdev->irq);
6976 }
a2fbb9ea
ET
6977 }
6978
2dfe0e1f
EG
6979 /* Send LOAD_REQUEST command to MCP
6980 Returns the type of LOAD command:
6981 if it is the first port to be initialized
6982 common blocks should be initialized, otherwise - not
6983 */
6984 if (!BP_NOMCP(bp)) {
6985 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6986 if (!load_code) {
6987 BNX2X_ERR("MCP response failure, aborting\n");
6988 rc = -EBUSY;
6989 goto load_error2;
6990 }
6991 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6992 rc = -EBUSY; /* other port in diagnostic mode */
6993 goto load_error2;
6994 }
6995
6996 } else {
6997 int port = BP_PORT(bp);
6998
f5372251 6999 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7000 load_count[0], load_count[1], load_count[2]);
7001 load_count[0]++;
7002 load_count[1 + port]++;
f5372251 7003 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7004 load_count[0], load_count[1], load_count[2]);
7005 if (load_count[0] == 1)
7006 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7007 else if (load_count[1 + port] == 1)
7008 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7009 else
7010 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7011 }
7012
7013 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7014 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7015 bp->port.pmf = 1;
7016 else
7017 bp->port.pmf = 0;
7018 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7019
a2fbb9ea 7020 /* Initialize HW */
34f80b04
EG
7021 rc = bnx2x_init_hw(bp, load_code);
7022 if (rc) {
a2fbb9ea 7023 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7024 goto load_error2;
a2fbb9ea
ET
7025 }
7026
a2fbb9ea 7027 /* Setup NIC internals and enable interrupts */
471de716 7028 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
7029
7030 /* Send LOAD_DONE command to MCP */
34f80b04 7031 if (!BP_NOMCP(bp)) {
228241eb
ET
7032 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7033 if (!load_code) {
da5a662a 7034 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7035 rc = -EBUSY;
2dfe0e1f 7036 goto load_error3;
a2fbb9ea
ET
7037 }
7038 }
7039
7040 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7041
34f80b04
EG
7042 rc = bnx2x_setup_leading(bp);
7043 if (rc) {
da5a662a 7044 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7045 goto load_error3;
34f80b04 7046 }
a2fbb9ea 7047
34f80b04
EG
7048 if (CHIP_IS_E1H(bp))
7049 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7050 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7051 bp->state = BNX2X_STATE_DISABLED;
7052 }
a2fbb9ea 7053
34f80b04
EG
7054 if (bp->state == BNX2X_STATE_OPEN)
7055 for_each_nondefault_queue(bp, i) {
7056 rc = bnx2x_setup_multi(bp, i);
7057 if (rc)
2dfe0e1f 7058 goto load_error3;
34f80b04 7059 }
a2fbb9ea 7060
34f80b04 7061 if (CHIP_IS_E1(bp))
3101c2bc 7062 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 7063 else
3101c2bc 7064 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
7065
7066 if (bp->port.pmf)
b5bf9068 7067 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7068
7069 /* Start fast path */
34f80b04
EG
7070 switch (load_mode) {
7071 case LOAD_NORMAL:
7072 /* Tx queue should be only reenabled */
555f6c78 7073 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 7074 /* Initialize the receive filter. */
34f80b04
EG
7075 bnx2x_set_rx_mode(bp->dev);
7076 break;
7077
7078 case LOAD_OPEN:
555f6c78 7079 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 7080 /* Initialize the receive filter. */
34f80b04 7081 bnx2x_set_rx_mode(bp->dev);
34f80b04 7082 break;
a2fbb9ea 7083
34f80b04 7084 case LOAD_DIAG:
2dfe0e1f 7085 /* Initialize the receive filter. */
a2fbb9ea 7086 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7087 bp->state = BNX2X_STATE_DIAG;
7088 break;
7089
7090 default:
7091 break;
a2fbb9ea
ET
7092 }
7093
34f80b04
EG
7094 if (!bp->port.pmf)
7095 bnx2x__link_status_update(bp);
7096
a2fbb9ea
ET
7097 /* start the timer */
7098 mod_timer(&bp->timer, jiffies + bp->current_interval);
7099
34f80b04 7100
a2fbb9ea
ET
7101 return 0;
7102
2dfe0e1f
EG
7103load_error3:
7104 bnx2x_int_disable_sync(bp, 1);
7105 if (!BP_NOMCP(bp)) {
7106 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7107 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7108 }
7109 bp->port.pmf = 0;
7a9b2557
VZ
7110 /* Free SKBs, SGEs, TPA pool and driver internals */
7111 bnx2x_free_skbs(bp);
555f6c78 7112 for_each_rx_queue(bp, i)
3196a88a 7113 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7114load_error2:
d1014634
YG
7115 /* Release IRQs */
7116 bnx2x_free_irq(bp);
2dfe0e1f
EG
7117load_error1:
7118 bnx2x_napi_disable(bp);
555f6c78 7119 for_each_rx_queue(bp, i)
7cde1c8b 7120 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7121 bnx2x_free_mem(bp);
7122
34f80b04 7123 return rc;
a2fbb9ea
ET
7124}
7125
7126static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7127{
555f6c78 7128 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7129 int rc;
7130
c14423fe 7131 /* halt the connection */
555f6c78
EG
7132 fp->state = BNX2X_FP_STATE_HALTING;
7133 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7134
34f80b04 7135 /* Wait for completion */
a2fbb9ea 7136 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7137 &(fp->state), 1);
c14423fe 7138 if (rc) /* timeout */
a2fbb9ea
ET
7139 return rc;
7140
7141 /* delete cfc entry */
7142 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7143
34f80b04
EG
7144 /* Wait for completion */
7145 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7146 &(fp->state), 1);
34f80b04 7147 return rc;
a2fbb9ea
ET
7148}
7149
da5a662a 7150static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7151{
4781bfad 7152 __le16 dsb_sp_prod_idx;
c14423fe 7153 /* if the other port is handling traffic,
a2fbb9ea 7154 this can take a lot of time */
34f80b04
EG
7155 int cnt = 500;
7156 int rc;
a2fbb9ea
ET
7157
7158 might_sleep();
7159
7160 /* Send HALT ramrod */
7161 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7162 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7163
34f80b04
EG
7164 /* Wait for completion */
7165 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7166 &(bp->fp[0].state), 1);
7167 if (rc) /* timeout */
da5a662a 7168 return rc;
a2fbb9ea 7169
49d66772 7170 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7171
228241eb 7172 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7173 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7174
49d66772 7175 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7176 we are going to reset the chip anyway
7177 so there is not much to do if this times out
7178 */
34f80b04 7179 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7180 if (!cnt) {
7181 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7182 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7183 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7184#ifdef BNX2X_STOP_ON_ERROR
7185 bnx2x_panic();
7186#endif
36e552ab 7187 rc = -EBUSY;
34f80b04
EG
7188 break;
7189 }
7190 cnt--;
da5a662a 7191 msleep(1);
5650d9d4 7192 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7193 }
7194 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7195 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7196
7197 return rc;
a2fbb9ea
ET
7198}
7199
34f80b04
EG
7200static void bnx2x_reset_func(struct bnx2x *bp)
7201{
7202 int port = BP_PORT(bp);
7203 int func = BP_FUNC(bp);
7204 int base, i;
7205
7206 /* Configure IGU */
7207 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7208 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7209
34f80b04
EG
7210 /* Clear ILT */
7211 base = FUNC_ILT_BASE(func);
7212 for (i = base; i < base + ILT_PER_FUNC; i++)
7213 bnx2x_ilt_wr(bp, i, 0);
7214}
7215
7216static void bnx2x_reset_port(struct bnx2x *bp)
7217{
7218 int port = BP_PORT(bp);
7219 u32 val;
7220
7221 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7222
7223 /* Do not rcv packets to BRB */
7224 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7225 /* Do not direct rcv packets that are not for MCP to the BRB */
7226 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7227 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7228
7229 /* Configure AEU */
7230 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7231
7232 msleep(100);
7233 /* Check for BRB port occupancy */
7234 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7235 if (val)
7236 DP(NETIF_MSG_IFDOWN,
33471629 7237 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7238
7239 /* TODO: Close Doorbell port? */
7240}
7241
34f80b04
EG
7242static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7243{
7244 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7245 BP_FUNC(bp), reset_code);
7246
7247 switch (reset_code) {
7248 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7249 bnx2x_reset_port(bp);
7250 bnx2x_reset_func(bp);
7251 bnx2x_reset_common(bp);
7252 break;
7253
7254 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7255 bnx2x_reset_port(bp);
7256 bnx2x_reset_func(bp);
7257 break;
7258
7259 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7260 bnx2x_reset_func(bp);
7261 break;
49d66772 7262
34f80b04
EG
7263 default:
7264 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7265 break;
7266 }
7267}
7268
33471629 7269/* must be called with rtnl_lock */
34f80b04 7270static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7271{
da5a662a 7272 int port = BP_PORT(bp);
a2fbb9ea 7273 u32 reset_code = 0;
da5a662a 7274 int i, cnt, rc;
a2fbb9ea
ET
7275
7276 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7277
228241eb
ET
7278 bp->rx_mode = BNX2X_RX_MODE_NONE;
7279 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7280
f8ef6e44 7281 bnx2x_netif_stop(bp, 1);
e94d8af3 7282
34f80b04
EG
7283 del_timer_sync(&bp->timer);
7284 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7285 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7286 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7287
70b9986c
EG
7288 /* Release IRQs */
7289 bnx2x_free_irq(bp);
7290
555f6c78
EG
7291 /* Wait until tx fastpath tasks complete */
7292 for_each_tx_queue(bp, i) {
228241eb
ET
7293 struct bnx2x_fastpath *fp = &bp->fp[i];
7294
34f80b04 7295 cnt = 1000;
e8b5fc51 7296 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7297
7961f791 7298 bnx2x_tx_int(fp);
34f80b04
EG
7299 if (!cnt) {
7300 BNX2X_ERR("timeout waiting for queue[%d]\n",
7301 i);
7302#ifdef BNX2X_STOP_ON_ERROR
7303 bnx2x_panic();
7304 return -EBUSY;
7305#else
7306 break;
7307#endif
7308 }
7309 cnt--;
da5a662a 7310 msleep(1);
34f80b04 7311 }
228241eb 7312 }
da5a662a
VZ
7313 /* Give HW time to discard old tx messages */
7314 msleep(1);
a2fbb9ea 7315
3101c2bc
YG
7316 if (CHIP_IS_E1(bp)) {
7317 struct mac_configuration_cmd *config =
7318 bnx2x_sp(bp, mcast_config);
7319
7320 bnx2x_set_mac_addr_e1(bp, 0);
7321
8d9c5f34 7322 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7323 CAM_INVALIDATE(config->config_table[i]);
7324
8d9c5f34 7325 config->hdr.length = i;
3101c2bc
YG
7326 if (CHIP_REV_IS_SLOW(bp))
7327 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7328 else
7329 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7330 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7331 config->hdr.reserved1 = 0;
7332
7333 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7334 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7335 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7336
7337 } else { /* E1H */
65abd74d
YG
7338 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7339
3101c2bc
YG
7340 bnx2x_set_mac_addr_e1h(bp, 0);
7341
7342 for (i = 0; i < MC_HASH_SIZE; i++)
7343 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7344 }
7345
65abd74d
YG
7346 if (unload_mode == UNLOAD_NORMAL)
7347 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7348
7349 else if (bp->flags & NO_WOL_FLAG) {
7350 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7351 if (CHIP_IS_E1H(bp))
7352 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7353
7354 } else if (bp->wol) {
7355 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7356 u8 *mac_addr = bp->dev->dev_addr;
7357 u32 val;
7358 /* The mac address is written to entries 1-4 to
7359 preserve entry 0 which is used by the PMF */
7360 u8 entry = (BP_E1HVN(bp) + 1)*8;
7361
7362 val = (mac_addr[0] << 8) | mac_addr[1];
7363 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7364
7365 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7366 (mac_addr[4] << 8) | mac_addr[5];
7367 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7368
7369 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7370
7371 } else
7372 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7373
34f80b04
EG
7374 /* Close multi and leading connections
7375 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7376 for_each_nondefault_queue(bp, i)
7377 if (bnx2x_stop_multi(bp, i))
228241eb 7378 goto unload_error;
a2fbb9ea 7379
da5a662a
VZ
7380 rc = bnx2x_stop_leading(bp);
7381 if (rc) {
34f80b04 7382 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7383#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7384 return -EBUSY;
da5a662a
VZ
7385#else
7386 goto unload_error;
34f80b04 7387#endif
228241eb
ET
7388 }
7389
7390unload_error:
34f80b04 7391 if (!BP_NOMCP(bp))
228241eb 7392 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7393 else {
f5372251 7394 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7395 load_count[0], load_count[1], load_count[2]);
7396 load_count[0]--;
da5a662a 7397 load_count[1 + port]--;
f5372251 7398 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7399 load_count[0], load_count[1], load_count[2]);
7400 if (load_count[0] == 0)
7401 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7402 else if (load_count[1 + port] == 0)
34f80b04
EG
7403 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7404 else
7405 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7406 }
a2fbb9ea 7407
34f80b04
EG
7408 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7409 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7410 bnx2x__link_reset(bp);
a2fbb9ea
ET
7411
7412 /* Reset the chip */
228241eb 7413 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7414
7415 /* Report UNLOAD_DONE to MCP */
34f80b04 7416 if (!BP_NOMCP(bp))
a2fbb9ea 7417 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7418
9a035440 7419 bp->port.pmf = 0;
a2fbb9ea 7420
7a9b2557 7421 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7422 bnx2x_free_skbs(bp);
555f6c78 7423 for_each_rx_queue(bp, i)
3196a88a 7424 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7425 for_each_rx_queue(bp, i)
7cde1c8b 7426 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7427 bnx2x_free_mem(bp);
7428
7429 bp->state = BNX2X_STATE_CLOSED;
228241eb 7430
a2fbb9ea
ET
7431 netif_carrier_off(bp->dev);
7432
7433 return 0;
7434}
7435
34f80b04
EG
7436static void bnx2x_reset_task(struct work_struct *work)
7437{
7438 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7439
7440#ifdef BNX2X_STOP_ON_ERROR
7441 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7442 " so reset not done to allow debug dump,\n"
7443 KERN_ERR " you will need to reboot when done\n");
7444 return;
7445#endif
7446
7447 rtnl_lock();
7448
7449 if (!netif_running(bp->dev))
7450 goto reset_task_exit;
7451
7452 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7453 bnx2x_nic_load(bp, LOAD_NORMAL);
7454
7455reset_task_exit:
7456 rtnl_unlock();
7457}
7458
a2fbb9ea
ET
7459/* end of nic load/unload */
7460
7461/* ethtool_ops */
7462
7463/*
7464 * Init service functions
7465 */
7466
f1ef27ef
EG
7467static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7468{
7469 switch (func) {
7470 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7471 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7472 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7473 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7474 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7475 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7476 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7477 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7478 default:
7479 BNX2X_ERR("Unsupported function index: %d\n", func);
7480 return (u32)(-1);
7481 }
7482}
7483
7484static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7485{
7486 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7487
7488 /* Flush all outstanding writes */
7489 mmiowb();
7490
7491 /* Pretend to be function 0 */
7492 REG_WR(bp, reg, 0);
7493 /* Flush the GRC transaction (in the chip) */
7494 new_val = REG_RD(bp, reg);
7495 if (new_val != 0) {
7496 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7497 new_val);
7498 BUG();
7499 }
7500
7501 /* From now we are in the "like-E1" mode */
7502 bnx2x_int_disable(bp);
7503
7504 /* Flush all outstanding writes */
7505 mmiowb();
7506
7507 /* Restore the original funtion settings */
7508 REG_WR(bp, reg, orig_func);
7509 new_val = REG_RD(bp, reg);
7510 if (new_val != orig_func) {
7511 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7512 orig_func, new_val);
7513 BUG();
7514 }
7515}
7516
7517static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7518{
7519 if (CHIP_IS_E1H(bp))
7520 bnx2x_undi_int_disable_e1h(bp, func);
7521 else
7522 bnx2x_int_disable(bp);
7523}
7524
34f80b04
EG
7525static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7526{
7527 u32 val;
7528
7529 /* Check if there is any driver already loaded */
7530 val = REG_RD(bp, MISC_REG_UNPREPARED);
7531 if (val == 0x1) {
7532 /* Check if it is the UNDI driver
7533 * UNDI driver initializes CID offset for normal bell to 0x7
7534 */
4a37fb66 7535 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7536 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7537 if (val == 0x7) {
7538 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7539 /* save our func */
34f80b04 7540 int func = BP_FUNC(bp);
da5a662a
VZ
7541 u32 swap_en;
7542 u32 swap_val;
34f80b04 7543
b4661739
EG
7544 /* clear the UNDI indication */
7545 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7546
34f80b04
EG
7547 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7548
7549 /* try unload UNDI on port 0 */
7550 bp->func = 0;
da5a662a
VZ
7551 bp->fw_seq =
7552 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7553 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7554 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7555
7556 /* if UNDI is loaded on the other port */
7557 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7558
da5a662a
VZ
7559 /* send "DONE" for previous unload */
7560 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7561
7562 /* unload UNDI on port 1 */
34f80b04 7563 bp->func = 1;
da5a662a
VZ
7564 bp->fw_seq =
7565 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7566 DRV_MSG_SEQ_NUMBER_MASK);
7567 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7568
7569 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7570 }
7571
b4661739
EG
7572 /* now it's safe to release the lock */
7573 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7574
f1ef27ef 7575 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7576
7577 /* close input traffic and wait for it */
7578 /* Do not rcv packets to BRB */
7579 REG_WR(bp,
7580 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7581 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7582 /* Do not direct rcv packets that are not for MCP to
7583 * the BRB */
7584 REG_WR(bp,
7585 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7586 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7587 /* clear AEU */
7588 REG_WR(bp,
7589 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7590 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7591 msleep(10);
7592
7593 /* save NIG port swap info */
7594 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7595 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7596 /* reset device */
7597 REG_WR(bp,
7598 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7599 0xd3ffffff);
34f80b04
EG
7600 REG_WR(bp,
7601 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7602 0x1403);
da5a662a
VZ
7603 /* take the NIG out of reset and restore swap values */
7604 REG_WR(bp,
7605 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7606 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7607 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7608 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7609
7610 /* send unload done to the MCP */
7611 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7612
7613 /* restore our func and fw_seq */
7614 bp->func = func;
7615 bp->fw_seq =
7616 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7617 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7618
7619 } else
7620 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7621 }
7622}
7623
7624static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7625{
7626 u32 val, val2, val3, val4, id;
72ce58c3 7627 u16 pmc;
34f80b04
EG
7628
7629 /* Get the chip revision id and number. */
7630 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7631 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7632 id = ((val & 0xffff) << 16);
7633 val = REG_RD(bp, MISC_REG_CHIP_REV);
7634 id |= ((val & 0xf) << 12);
7635 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7636 id |= ((val & 0xff) << 4);
5a40e08e 7637 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7638 id |= (val & 0xf);
7639 bp->common.chip_id = id;
7640 bp->link_params.chip_id = bp->common.chip_id;
7641 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7642
1c06328c
EG
7643 val = (REG_RD(bp, 0x2874) & 0x55);
7644 if ((bp->common.chip_id & 0x1) ||
7645 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7646 bp->flags |= ONE_PORT_FLAG;
7647 BNX2X_DEV_INFO("single port device\n");
7648 }
7649
34f80b04
EG
7650 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7651 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7652 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7653 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7654 bp->common.flash_size, bp->common.flash_size);
7655
7656 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7657 bp->link_params.shmem_base = bp->common.shmem_base;
7658 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7659
7660 if (!bp->common.shmem_base ||
7661 (bp->common.shmem_base < 0xA0000) ||
7662 (bp->common.shmem_base >= 0xC0000)) {
7663 BNX2X_DEV_INFO("MCP not active\n");
7664 bp->flags |= NO_MCP_FLAG;
7665 return;
7666 }
7667
7668 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7669 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7670 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7671 BNX2X_ERR("BAD MCP validity signature\n");
7672
7673 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7674 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7675
7676 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7677 SHARED_HW_CFG_LED_MODE_MASK) >>
7678 SHARED_HW_CFG_LED_MODE_SHIFT);
7679
c2c8b03e
EG
7680 bp->link_params.feature_config_flags = 0;
7681 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7682 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7683 bp->link_params.feature_config_flags |=
7684 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7685 else
7686 bp->link_params.feature_config_flags &=
7687 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7688
34f80b04
EG
7689 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7690 bp->common.bc_ver = val;
7691 BNX2X_DEV_INFO("bc_ver %X\n", val);
7692 if (val < BNX2X_BC_VER) {
7693 /* for now only warn
7694 * later we might need to enforce this */
7695 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7696 " please upgrade BC\n", BNX2X_BC_VER, val);
7697 }
4d295db0
EG
7698 bp->link_params.feature_config_flags |=
7699 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7700 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
7701
7702 if (BP_E1HVN(bp) == 0) {
7703 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7704 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7705 } else {
7706 /* no WOL capability for E1HVN != 0 */
7707 bp->flags |= NO_WOL_FLAG;
7708 }
7709 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7710 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7711
7712 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7713 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7714 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7715 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7716
7717 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7718 val, val2, val3, val4);
7719}
7720
7721static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7722 u32 switch_cfg)
a2fbb9ea 7723{
34f80b04 7724 int port = BP_PORT(bp);
a2fbb9ea
ET
7725 u32 ext_phy_type;
7726
a2fbb9ea
ET
7727 switch (switch_cfg) {
7728 case SWITCH_CFG_1G:
7729 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7730
c18487ee
YR
7731 ext_phy_type =
7732 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7733 switch (ext_phy_type) {
7734 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7735 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7736 ext_phy_type);
7737
34f80b04
EG
7738 bp->port.supported |= (SUPPORTED_10baseT_Half |
7739 SUPPORTED_10baseT_Full |
7740 SUPPORTED_100baseT_Half |
7741 SUPPORTED_100baseT_Full |
7742 SUPPORTED_1000baseT_Full |
7743 SUPPORTED_2500baseX_Full |
7744 SUPPORTED_TP |
7745 SUPPORTED_FIBRE |
7746 SUPPORTED_Autoneg |
7747 SUPPORTED_Pause |
7748 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7749 break;
7750
7751 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7752 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7753 ext_phy_type);
7754
34f80b04
EG
7755 bp->port.supported |= (SUPPORTED_10baseT_Half |
7756 SUPPORTED_10baseT_Full |
7757 SUPPORTED_100baseT_Half |
7758 SUPPORTED_100baseT_Full |
7759 SUPPORTED_1000baseT_Full |
7760 SUPPORTED_TP |
7761 SUPPORTED_FIBRE |
7762 SUPPORTED_Autoneg |
7763 SUPPORTED_Pause |
7764 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7765 break;
7766
7767 default:
7768 BNX2X_ERR("NVRAM config error. "
7769 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7770 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7771 return;
7772 }
7773
34f80b04
EG
7774 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7775 port*0x10);
7776 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7777 break;
7778
7779 case SWITCH_CFG_10G:
7780 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7781
c18487ee
YR
7782 ext_phy_type =
7783 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7784 switch (ext_phy_type) {
7785 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7786 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7787 ext_phy_type);
7788
34f80b04
EG
7789 bp->port.supported |= (SUPPORTED_10baseT_Half |
7790 SUPPORTED_10baseT_Full |
7791 SUPPORTED_100baseT_Half |
7792 SUPPORTED_100baseT_Full |
7793 SUPPORTED_1000baseT_Full |
7794 SUPPORTED_2500baseX_Full |
7795 SUPPORTED_10000baseT_Full |
7796 SUPPORTED_TP |
7797 SUPPORTED_FIBRE |
7798 SUPPORTED_Autoneg |
7799 SUPPORTED_Pause |
7800 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7801 break;
7802
589abe3a
EG
7803 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7804 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7805 ext_phy_type);
f1410647 7806
34f80b04 7807 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7808 SUPPORTED_1000baseT_Full |
34f80b04 7809 SUPPORTED_FIBRE |
589abe3a 7810 SUPPORTED_Autoneg |
34f80b04
EG
7811 SUPPORTED_Pause |
7812 SUPPORTED_Asym_Pause);
f1410647
ET
7813 break;
7814
589abe3a
EG
7815 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7816 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7817 ext_phy_type);
7818
34f80b04 7819 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7820 SUPPORTED_2500baseX_Full |
34f80b04 7821 SUPPORTED_1000baseT_Full |
589abe3a
EG
7822 SUPPORTED_FIBRE |
7823 SUPPORTED_Autoneg |
7824 SUPPORTED_Pause |
7825 SUPPORTED_Asym_Pause);
7826 break;
7827
7828 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7829 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7830 ext_phy_type);
7831
7832 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7833 SUPPORTED_FIBRE |
7834 SUPPORTED_Pause |
7835 SUPPORTED_Asym_Pause);
f1410647
ET
7836 break;
7837
589abe3a
EG
7838 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7839 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7840 ext_phy_type);
7841
34f80b04
EG
7842 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7843 SUPPORTED_1000baseT_Full |
7844 SUPPORTED_FIBRE |
34f80b04
EG
7845 SUPPORTED_Pause |
7846 SUPPORTED_Asym_Pause);
f1410647
ET
7847 break;
7848
589abe3a
EG
7849 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7850 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7851 ext_phy_type);
7852
34f80b04 7853 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7854 SUPPORTED_1000baseT_Full |
34f80b04 7855 SUPPORTED_Autoneg |
589abe3a 7856 SUPPORTED_FIBRE |
34f80b04
EG
7857 SUPPORTED_Pause |
7858 SUPPORTED_Asym_Pause);
c18487ee
YR
7859 break;
7860
4d295db0
EG
7861 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7862 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7863 ext_phy_type);
7864
7865 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7866 SUPPORTED_1000baseT_Full |
7867 SUPPORTED_Autoneg |
7868 SUPPORTED_FIBRE |
7869 SUPPORTED_Pause |
7870 SUPPORTED_Asym_Pause);
7871 break;
7872
f1410647
ET
7873 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7874 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7875 ext_phy_type);
7876
34f80b04
EG
7877 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7878 SUPPORTED_TP |
7879 SUPPORTED_Autoneg |
7880 SUPPORTED_Pause |
7881 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7882 break;
7883
28577185
EG
7884 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7885 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7886 ext_phy_type);
7887
7888 bp->port.supported |= (SUPPORTED_10baseT_Half |
7889 SUPPORTED_10baseT_Full |
7890 SUPPORTED_100baseT_Half |
7891 SUPPORTED_100baseT_Full |
7892 SUPPORTED_1000baseT_Full |
7893 SUPPORTED_10000baseT_Full |
7894 SUPPORTED_TP |
7895 SUPPORTED_Autoneg |
7896 SUPPORTED_Pause |
7897 SUPPORTED_Asym_Pause);
7898 break;
7899
c18487ee
YR
7900 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7901 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7902 bp->link_params.ext_phy_config);
7903 break;
7904
a2fbb9ea
ET
7905 default:
7906 BNX2X_ERR("NVRAM config error. "
7907 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7908 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7909 return;
7910 }
7911
34f80b04
EG
7912 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7913 port*0x18);
7914 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7915
a2fbb9ea
ET
7916 break;
7917
7918 default:
7919 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7920 bp->port.link_config);
a2fbb9ea
ET
7921 return;
7922 }
34f80b04 7923 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7924
7925 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7926 if (!(bp->link_params.speed_cap_mask &
7927 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7928 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7929
c18487ee
YR
7930 if (!(bp->link_params.speed_cap_mask &
7931 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7932 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7933
c18487ee
YR
7934 if (!(bp->link_params.speed_cap_mask &
7935 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7936 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7937
c18487ee
YR
7938 if (!(bp->link_params.speed_cap_mask &
7939 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7940 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7941
c18487ee
YR
7942 if (!(bp->link_params.speed_cap_mask &
7943 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7944 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7945 SUPPORTED_1000baseT_Full);
a2fbb9ea 7946
c18487ee
YR
7947 if (!(bp->link_params.speed_cap_mask &
7948 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7949 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7950
c18487ee
YR
7951 if (!(bp->link_params.speed_cap_mask &
7952 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7953 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7954
34f80b04 7955 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7956}
7957
34f80b04 7958static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7959{
c18487ee 7960 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7961
34f80b04 7962 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7963 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7964 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7965 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7966 bp->port.advertising = bp->port.supported;
a2fbb9ea 7967 } else {
c18487ee
YR
7968 u32 ext_phy_type =
7969 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7970
7971 if ((ext_phy_type ==
7972 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7973 (ext_phy_type ==
7974 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7975 /* force 10G, no AN */
c18487ee 7976 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7977 bp->port.advertising =
a2fbb9ea
ET
7978 (ADVERTISED_10000baseT_Full |
7979 ADVERTISED_FIBRE);
7980 break;
7981 }
7982 BNX2X_ERR("NVRAM config error. "
7983 "Invalid link_config 0x%x"
7984 " Autoneg not supported\n",
34f80b04 7985 bp->port.link_config);
a2fbb9ea
ET
7986 return;
7987 }
7988 break;
7989
7990 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7991 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7992 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7993 bp->port.advertising = (ADVERTISED_10baseT_Full |
7994 ADVERTISED_TP);
a2fbb9ea
ET
7995 } else {
7996 BNX2X_ERR("NVRAM config error. "
7997 "Invalid link_config 0x%x"
7998 " speed_cap_mask 0x%x\n",
34f80b04 7999 bp->port.link_config,
c18487ee 8000 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8001 return;
8002 }
8003 break;
8004
8005 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8006 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8007 bp->link_params.req_line_speed = SPEED_10;
8008 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8009 bp->port.advertising = (ADVERTISED_10baseT_Half |
8010 ADVERTISED_TP);
a2fbb9ea
ET
8011 } else {
8012 BNX2X_ERR("NVRAM config error. "
8013 "Invalid link_config 0x%x"
8014 " speed_cap_mask 0x%x\n",
34f80b04 8015 bp->port.link_config,
c18487ee 8016 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8017 return;
8018 }
8019 break;
8020
8021 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8022 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8023 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8024 bp->port.advertising = (ADVERTISED_100baseT_Full |
8025 ADVERTISED_TP);
a2fbb9ea
ET
8026 } else {
8027 BNX2X_ERR("NVRAM config error. "
8028 "Invalid link_config 0x%x"
8029 " speed_cap_mask 0x%x\n",
34f80b04 8030 bp->port.link_config,
c18487ee 8031 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8032 return;
8033 }
8034 break;
8035
8036 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8037 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8038 bp->link_params.req_line_speed = SPEED_100;
8039 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8040 bp->port.advertising = (ADVERTISED_100baseT_Half |
8041 ADVERTISED_TP);
a2fbb9ea
ET
8042 } else {
8043 BNX2X_ERR("NVRAM config error. "
8044 "Invalid link_config 0x%x"
8045 " speed_cap_mask 0x%x\n",
34f80b04 8046 bp->port.link_config,
c18487ee 8047 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8048 return;
8049 }
8050 break;
8051
8052 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8053 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8054 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8055 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8056 ADVERTISED_TP);
a2fbb9ea
ET
8057 } else {
8058 BNX2X_ERR("NVRAM config error. "
8059 "Invalid link_config 0x%x"
8060 " speed_cap_mask 0x%x\n",
34f80b04 8061 bp->port.link_config,
c18487ee 8062 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8063 return;
8064 }
8065 break;
8066
8067 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8068 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8069 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8070 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8071 ADVERTISED_TP);
a2fbb9ea
ET
8072 } else {
8073 BNX2X_ERR("NVRAM config error. "
8074 "Invalid link_config 0x%x"
8075 " speed_cap_mask 0x%x\n",
34f80b04 8076 bp->port.link_config,
c18487ee 8077 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8078 return;
8079 }
8080 break;
8081
8082 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8083 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8084 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8085 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8086 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8087 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8088 ADVERTISED_FIBRE);
a2fbb9ea
ET
8089 } else {
8090 BNX2X_ERR("NVRAM config error. "
8091 "Invalid link_config 0x%x"
8092 " speed_cap_mask 0x%x\n",
34f80b04 8093 bp->port.link_config,
c18487ee 8094 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8095 return;
8096 }
8097 break;
8098
8099 default:
8100 BNX2X_ERR("NVRAM config error. "
8101 "BAD link speed link_config 0x%x\n",
34f80b04 8102 bp->port.link_config);
c18487ee 8103 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8104 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8105 break;
8106 }
a2fbb9ea 8107
34f80b04
EG
8108 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8109 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8110 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8111 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8112 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8113
c18487ee 8114 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8115 " advertising 0x%x\n",
c18487ee
YR
8116 bp->link_params.req_line_speed,
8117 bp->link_params.req_duplex,
34f80b04 8118 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8119}
8120
34f80b04 8121static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8122{
34f80b04
EG
8123 int port = BP_PORT(bp);
8124 u32 val, val2;
589abe3a 8125 u32 config;
c2c8b03e 8126 u16 i;
a2fbb9ea 8127
c18487ee 8128 bp->link_params.bp = bp;
34f80b04 8129 bp->link_params.port = port;
c18487ee 8130
c18487ee 8131 bp->link_params.lane_config =
a2fbb9ea 8132 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8133 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8134 SHMEM_RD(bp,
8135 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8136 /* BCM8727_NOC => BCM8727 no over current */
8137 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8138 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8139 bp->link_params.ext_phy_config &=
8140 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8141 bp->link_params.ext_phy_config |=
8142 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8143 bp->link_params.feature_config_flags |=
8144 FEATURE_CONFIG_BCM8727_NOC;
8145 }
8146
c18487ee 8147 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8148 SHMEM_RD(bp,
8149 dev_info.port_hw_config[port].speed_capability_mask);
8150
34f80b04 8151 bp->port.link_config =
a2fbb9ea
ET
8152 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8153
c2c8b03e
EG
8154 /* Get the 4 lanes xgxs config rx and tx */
8155 for (i = 0; i < 2; i++) {
8156 val = SHMEM_RD(bp,
8157 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8158 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8159 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8160
8161 val = SHMEM_RD(bp,
8162 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8163 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8164 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8165 }
8166
3ce2c3f9
EG
8167 /* If the device is capable of WoL, set the default state according
8168 * to the HW
8169 */
4d295db0 8170 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8171 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8172 (config & PORT_FEATURE_WOL_ENABLED));
8173
c2c8b03e
EG
8174 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8175 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8176 bp->link_params.lane_config,
8177 bp->link_params.ext_phy_config,
34f80b04 8178 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8179
4d295db0
EG
8180 bp->link_params.switch_cfg |= (bp->port.link_config &
8181 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8182 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8183
8184 bnx2x_link_settings_requested(bp);
8185
8186 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8187 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8188 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8189 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8190 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8191 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8192 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8193 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8194 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8195 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8196}
8197
8198static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8199{
8200 int func = BP_FUNC(bp);
8201 u32 val, val2;
8202 int rc = 0;
a2fbb9ea 8203
34f80b04 8204 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8205
34f80b04
EG
8206 bp->e1hov = 0;
8207 bp->e1hmf = 0;
8208 if (CHIP_IS_E1H(bp)) {
8209 bp->mf_config =
8210 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8211
3196a88a
EG
8212 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8213 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8214 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8215
34f80b04
EG
8216 bp->e1hov = val;
8217 bp->e1hmf = 1;
8218 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8219 "(0x%04x)\n",
8220 func, bp->e1hov, bp->e1hov);
8221 } else {
f5372251 8222 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8223 if (BP_E1HVN(bp)) {
8224 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8225 " aborting\n", func);
8226 rc = -EPERM;
8227 }
8228 }
8229 }
a2fbb9ea 8230
34f80b04
EG
8231 if (!BP_NOMCP(bp)) {
8232 bnx2x_get_port_hwinfo(bp);
8233
8234 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8235 DRV_MSG_SEQ_NUMBER_MASK);
8236 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8237 }
8238
8239 if (IS_E1HMF(bp)) {
8240 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8241 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8242 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8243 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8244 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8245 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8246 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8247 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8248 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8249 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8250 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8251 ETH_ALEN);
8252 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8253 ETH_ALEN);
a2fbb9ea 8254 }
34f80b04
EG
8255
8256 return rc;
a2fbb9ea
ET
8257 }
8258
34f80b04
EG
8259 if (BP_NOMCP(bp)) {
8260 /* only supposed to happen on emulation/FPGA */
33471629 8261 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8262 random_ether_addr(bp->dev->dev_addr);
8263 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8264 }
a2fbb9ea 8265
34f80b04
EG
8266 return rc;
8267}
8268
8269static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8270{
8271 int func = BP_FUNC(bp);
87942b46 8272 int timer_interval;
34f80b04
EG
8273 int rc;
8274
da5a662a
VZ
8275 /* Disable interrupt handling until HW is initialized */
8276 atomic_set(&bp->intr_sem, 1);
e1510706 8277 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8278
34f80b04 8279 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8280
1cf167f2 8281 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8282 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8283
8284 rc = bnx2x_get_hwinfo(bp);
8285
8286 /* need to reset chip if undi was active */
8287 if (!BP_NOMCP(bp))
8288 bnx2x_undi_unload(bp);
8289
8290 if (CHIP_REV_IS_FPGA(bp))
8291 printk(KERN_ERR PFX "FPGA detected\n");
8292
8293 if (BP_NOMCP(bp) && (func == 0))
8294 printk(KERN_ERR PFX
8295 "MCP disabled, must load devices in order!\n");
8296
555f6c78 8297 /* Set multi queue mode */
8badd27a
EG
8298 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8299 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8300 printk(KERN_ERR PFX
8badd27a 8301 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8302 multi_mode = ETH_RSS_MODE_DISABLED;
8303 }
8304 bp->multi_mode = multi_mode;
8305
8306
7a9b2557
VZ
8307 /* Set TPA flags */
8308 if (disable_tpa) {
8309 bp->flags &= ~TPA_ENABLE_FLAG;
8310 bp->dev->features &= ~NETIF_F_LRO;
8311 } else {
8312 bp->flags |= TPA_ENABLE_FLAG;
8313 bp->dev->features |= NETIF_F_LRO;
8314 }
8315
8d5726c4 8316 bp->mrrs = mrrs;
7a9b2557 8317
34f80b04
EG
8318 bp->tx_ring_size = MAX_TX_AVAIL;
8319 bp->rx_ring_size = MAX_RX_AVAIL;
8320
8321 bp->rx_csum = 1;
34f80b04
EG
8322
8323 bp->tx_ticks = 50;
8324 bp->rx_ticks = 25;
8325
87942b46
EG
8326 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8327 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8328
8329 init_timer(&bp->timer);
8330 bp->timer.expires = jiffies + bp->current_interval;
8331 bp->timer.data = (unsigned long) bp;
8332 bp->timer.function = bnx2x_timer;
8333
8334 return rc;
a2fbb9ea
ET
8335}
8336
8337/*
8338 * ethtool service functions
8339 */
8340
8341/* All ethtool functions called with rtnl_lock */
8342
8343static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8344{
8345 struct bnx2x *bp = netdev_priv(dev);
8346
34f80b04
EG
8347 cmd->supported = bp->port.supported;
8348 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8349
8350 if (netif_carrier_ok(dev)) {
c18487ee
YR
8351 cmd->speed = bp->link_vars.line_speed;
8352 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8353 } else {
c18487ee
YR
8354 cmd->speed = bp->link_params.req_line_speed;
8355 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8356 }
34f80b04
EG
8357 if (IS_E1HMF(bp)) {
8358 u16 vn_max_rate;
8359
8360 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8361 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8362 if (vn_max_rate < cmd->speed)
8363 cmd->speed = vn_max_rate;
8364 }
a2fbb9ea 8365
c18487ee
YR
8366 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8367 u32 ext_phy_type =
8368 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8369
8370 switch (ext_phy_type) {
8371 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8372 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8373 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8374 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8375 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8376 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8377 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8378 cmd->port = PORT_FIBRE;
8379 break;
8380
8381 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8382 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8383 cmd->port = PORT_TP;
8384 break;
8385
c18487ee
YR
8386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8387 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8388 bp->link_params.ext_phy_config);
8389 break;
8390
f1410647
ET
8391 default:
8392 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8393 bp->link_params.ext_phy_config);
8394 break;
f1410647
ET
8395 }
8396 } else
a2fbb9ea 8397 cmd->port = PORT_TP;
a2fbb9ea 8398
34f80b04 8399 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8400 cmd->transceiver = XCVR_INTERNAL;
8401
c18487ee 8402 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8403 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8404 else
a2fbb9ea 8405 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8406
8407 cmd->maxtxpkt = 0;
8408 cmd->maxrxpkt = 0;
8409
8410 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8411 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8412 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8413 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8414 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8415 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8416 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8417
8418 return 0;
8419}
8420
8421static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8422{
8423 struct bnx2x *bp = netdev_priv(dev);
8424 u32 advertising;
8425
34f80b04
EG
8426 if (IS_E1HMF(bp))
8427 return 0;
8428
a2fbb9ea
ET
8429 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8430 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8431 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8432 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8433 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8434 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8435 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8436
a2fbb9ea 8437 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8438 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8439 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8440 return -EINVAL;
f1410647 8441 }
a2fbb9ea
ET
8442
8443 /* advertise the requested speed and duplex if supported */
34f80b04 8444 cmd->advertising &= bp->port.supported;
a2fbb9ea 8445
c18487ee
YR
8446 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8447 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8448 bp->port.advertising |= (ADVERTISED_Autoneg |
8449 cmd->advertising);
a2fbb9ea
ET
8450
8451 } else { /* forced speed */
8452 /* advertise the requested speed and duplex if supported */
8453 switch (cmd->speed) {
8454 case SPEED_10:
8455 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8456 if (!(bp->port.supported &
f1410647
ET
8457 SUPPORTED_10baseT_Full)) {
8458 DP(NETIF_MSG_LINK,
8459 "10M full not supported\n");
a2fbb9ea 8460 return -EINVAL;
f1410647 8461 }
a2fbb9ea
ET
8462
8463 advertising = (ADVERTISED_10baseT_Full |
8464 ADVERTISED_TP);
8465 } else {
34f80b04 8466 if (!(bp->port.supported &
f1410647
ET
8467 SUPPORTED_10baseT_Half)) {
8468 DP(NETIF_MSG_LINK,
8469 "10M half not supported\n");
a2fbb9ea 8470 return -EINVAL;
f1410647 8471 }
a2fbb9ea
ET
8472
8473 advertising = (ADVERTISED_10baseT_Half |
8474 ADVERTISED_TP);
8475 }
8476 break;
8477
8478 case SPEED_100:
8479 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8480 if (!(bp->port.supported &
f1410647
ET
8481 SUPPORTED_100baseT_Full)) {
8482 DP(NETIF_MSG_LINK,
8483 "100M full not supported\n");
a2fbb9ea 8484 return -EINVAL;
f1410647 8485 }
a2fbb9ea
ET
8486
8487 advertising = (ADVERTISED_100baseT_Full |
8488 ADVERTISED_TP);
8489 } else {
34f80b04 8490 if (!(bp->port.supported &
f1410647
ET
8491 SUPPORTED_100baseT_Half)) {
8492 DP(NETIF_MSG_LINK,
8493 "100M half not supported\n");
a2fbb9ea 8494 return -EINVAL;
f1410647 8495 }
a2fbb9ea
ET
8496
8497 advertising = (ADVERTISED_100baseT_Half |
8498 ADVERTISED_TP);
8499 }
8500 break;
8501
8502 case SPEED_1000:
f1410647
ET
8503 if (cmd->duplex != DUPLEX_FULL) {
8504 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8505 return -EINVAL;
f1410647 8506 }
a2fbb9ea 8507
34f80b04 8508 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8509 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8510 return -EINVAL;
f1410647 8511 }
a2fbb9ea
ET
8512
8513 advertising = (ADVERTISED_1000baseT_Full |
8514 ADVERTISED_TP);
8515 break;
8516
8517 case SPEED_2500:
f1410647
ET
8518 if (cmd->duplex != DUPLEX_FULL) {
8519 DP(NETIF_MSG_LINK,
8520 "2.5G half not supported\n");
a2fbb9ea 8521 return -EINVAL;
f1410647 8522 }
a2fbb9ea 8523
34f80b04 8524 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8525 DP(NETIF_MSG_LINK,
8526 "2.5G full not supported\n");
a2fbb9ea 8527 return -EINVAL;
f1410647 8528 }
a2fbb9ea 8529
f1410647 8530 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8531 ADVERTISED_TP);
8532 break;
8533
8534 case SPEED_10000:
f1410647
ET
8535 if (cmd->duplex != DUPLEX_FULL) {
8536 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8537 return -EINVAL;
f1410647 8538 }
a2fbb9ea 8539
34f80b04 8540 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8541 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8542 return -EINVAL;
f1410647 8543 }
a2fbb9ea
ET
8544
8545 advertising = (ADVERTISED_10000baseT_Full |
8546 ADVERTISED_FIBRE);
8547 break;
8548
8549 default:
f1410647 8550 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8551 return -EINVAL;
8552 }
8553
c18487ee
YR
8554 bp->link_params.req_line_speed = cmd->speed;
8555 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8556 bp->port.advertising = advertising;
a2fbb9ea
ET
8557 }
8558
c18487ee 8559 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8560 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8561 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8562 bp->port.advertising);
a2fbb9ea 8563
34f80b04 8564 if (netif_running(dev)) {
bb2a0f7a 8565 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8566 bnx2x_link_set(bp);
8567 }
a2fbb9ea
ET
8568
8569 return 0;
8570}
8571
c18487ee
YR
8572#define PHY_FW_VER_LEN 10
8573
a2fbb9ea
ET
8574static void bnx2x_get_drvinfo(struct net_device *dev,
8575 struct ethtool_drvinfo *info)
8576{
8577 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8578 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8579
8580 strcpy(info->driver, DRV_MODULE_NAME);
8581 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8582
8583 phy_fw_ver[0] = '\0';
34f80b04 8584 if (bp->port.pmf) {
4a37fb66 8585 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8586 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8587 (bp->state != BNX2X_STATE_CLOSED),
8588 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8589 bnx2x_release_phy_lock(bp);
34f80b04 8590 }
c18487ee 8591
f0e53a84
EG
8592 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8593 (bp->common.bc_ver & 0xff0000) >> 16,
8594 (bp->common.bc_ver & 0xff00) >> 8,
8595 (bp->common.bc_ver & 0xff),
8596 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8597 strcpy(info->bus_info, pci_name(bp->pdev));
8598 info->n_stats = BNX2X_NUM_STATS;
8599 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8600 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8601 info->regdump_len = 0;
8602}
8603
0a64ea57
EG
8604#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8605#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8606
8607static int bnx2x_get_regs_len(struct net_device *dev)
8608{
8609 static u32 regdump_len;
8610 struct bnx2x *bp = netdev_priv(dev);
8611 int i;
8612
8613 if (regdump_len)
8614 return regdump_len;
8615
8616 if (CHIP_IS_E1(bp)) {
8617 for (i = 0; i < REGS_COUNT; i++)
8618 if (IS_E1_ONLINE(reg_addrs[i].info))
8619 regdump_len += reg_addrs[i].size;
8620
8621 for (i = 0; i < WREGS_COUNT_E1; i++)
8622 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8623 regdump_len += wreg_addrs_e1[i].size *
8624 (1 + wreg_addrs_e1[i].read_regs_count);
8625
8626 } else { /* E1H */
8627 for (i = 0; i < REGS_COUNT; i++)
8628 if (IS_E1H_ONLINE(reg_addrs[i].info))
8629 regdump_len += reg_addrs[i].size;
8630
8631 for (i = 0; i < WREGS_COUNT_E1H; i++)
8632 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8633 regdump_len += wreg_addrs_e1h[i].size *
8634 (1 + wreg_addrs_e1h[i].read_regs_count);
8635 }
8636 regdump_len *= 4;
8637 regdump_len += sizeof(struct dump_hdr);
8638
8639 return regdump_len;
8640}
8641
8642static void bnx2x_get_regs(struct net_device *dev,
8643 struct ethtool_regs *regs, void *_p)
8644{
8645 u32 *p = _p, i, j;
8646 struct bnx2x *bp = netdev_priv(dev);
8647 struct dump_hdr dump_hdr = {0};
8648
8649 regs->version = 0;
8650 memset(p, 0, regs->len);
8651
8652 if (!netif_running(bp->dev))
8653 return;
8654
8655 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8656 dump_hdr.dump_sign = dump_sign_all;
8657 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8658 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8659 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8660 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8661 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8662
8663 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8664 p += dump_hdr.hdr_size + 1;
8665
8666 if (CHIP_IS_E1(bp)) {
8667 for (i = 0; i < REGS_COUNT; i++)
8668 if (IS_E1_ONLINE(reg_addrs[i].info))
8669 for (j = 0; j < reg_addrs[i].size; j++)
8670 *p++ = REG_RD(bp,
8671 reg_addrs[i].addr + j*4);
8672
8673 } else { /* E1H */
8674 for (i = 0; i < REGS_COUNT; i++)
8675 if (IS_E1H_ONLINE(reg_addrs[i].info))
8676 for (j = 0; j < reg_addrs[i].size; j++)
8677 *p++ = REG_RD(bp,
8678 reg_addrs[i].addr + j*4);
8679 }
8680}
8681
a2fbb9ea
ET
8682static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8683{
8684 struct bnx2x *bp = netdev_priv(dev);
8685
8686 if (bp->flags & NO_WOL_FLAG) {
8687 wol->supported = 0;
8688 wol->wolopts = 0;
8689 } else {
8690 wol->supported = WAKE_MAGIC;
8691 if (bp->wol)
8692 wol->wolopts = WAKE_MAGIC;
8693 else
8694 wol->wolopts = 0;
8695 }
8696 memset(&wol->sopass, 0, sizeof(wol->sopass));
8697}
8698
8699static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8700{
8701 struct bnx2x *bp = netdev_priv(dev);
8702
8703 if (wol->wolopts & ~WAKE_MAGIC)
8704 return -EINVAL;
8705
8706 if (wol->wolopts & WAKE_MAGIC) {
8707 if (bp->flags & NO_WOL_FLAG)
8708 return -EINVAL;
8709
8710 bp->wol = 1;
34f80b04 8711 } else
a2fbb9ea 8712 bp->wol = 0;
34f80b04 8713
a2fbb9ea
ET
8714 return 0;
8715}
8716
8717static u32 bnx2x_get_msglevel(struct net_device *dev)
8718{
8719 struct bnx2x *bp = netdev_priv(dev);
8720
8721 return bp->msglevel;
8722}
8723
8724static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8725{
8726 struct bnx2x *bp = netdev_priv(dev);
8727
8728 if (capable(CAP_NET_ADMIN))
8729 bp->msglevel = level;
8730}
8731
8732static int bnx2x_nway_reset(struct net_device *dev)
8733{
8734 struct bnx2x *bp = netdev_priv(dev);
8735
34f80b04
EG
8736 if (!bp->port.pmf)
8737 return 0;
a2fbb9ea 8738
34f80b04 8739 if (netif_running(dev)) {
bb2a0f7a 8740 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8741 bnx2x_link_set(bp);
8742 }
a2fbb9ea
ET
8743
8744 return 0;
8745}
8746
01e53298
NO
8747static u32
8748bnx2x_get_link(struct net_device *dev)
8749{
8750 struct bnx2x *bp = netdev_priv(dev);
8751
8752 return bp->link_vars.link_up;
8753}
8754
a2fbb9ea
ET
8755static int bnx2x_get_eeprom_len(struct net_device *dev)
8756{
8757 struct bnx2x *bp = netdev_priv(dev);
8758
34f80b04 8759 return bp->common.flash_size;
a2fbb9ea
ET
8760}
8761
8762static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8763{
34f80b04 8764 int port = BP_PORT(bp);
a2fbb9ea
ET
8765 int count, i;
8766 u32 val = 0;
8767
8768 /* adjust timeout for emulation/FPGA */
8769 count = NVRAM_TIMEOUT_COUNT;
8770 if (CHIP_REV_IS_SLOW(bp))
8771 count *= 100;
8772
8773 /* request access to nvram interface */
8774 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8775 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8776
8777 for (i = 0; i < count*10; i++) {
8778 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8779 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8780 break;
8781
8782 udelay(5);
8783 }
8784
8785 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8786 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8787 return -EBUSY;
8788 }
8789
8790 return 0;
8791}
8792
8793static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8794{
34f80b04 8795 int port = BP_PORT(bp);
a2fbb9ea
ET
8796 int count, i;
8797 u32 val = 0;
8798
8799 /* adjust timeout for emulation/FPGA */
8800 count = NVRAM_TIMEOUT_COUNT;
8801 if (CHIP_REV_IS_SLOW(bp))
8802 count *= 100;
8803
8804 /* relinquish nvram interface */
8805 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8806 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8807
8808 for (i = 0; i < count*10; i++) {
8809 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8810 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8811 break;
8812
8813 udelay(5);
8814 }
8815
8816 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8817 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8818 return -EBUSY;
8819 }
8820
8821 return 0;
8822}
8823
8824static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8825{
8826 u32 val;
8827
8828 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8829
8830 /* enable both bits, even on read */
8831 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8832 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8833 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8834}
8835
8836static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8837{
8838 u32 val;
8839
8840 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8841
8842 /* disable both bits, even after read */
8843 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8844 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8845 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8846}
8847
4781bfad 8848static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8849 u32 cmd_flags)
8850{
f1410647 8851 int count, i, rc;
a2fbb9ea
ET
8852 u32 val;
8853
8854 /* build the command word */
8855 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8856
8857 /* need to clear DONE bit separately */
8858 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8859
8860 /* address of the NVRAM to read from */
8861 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8862 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8863
8864 /* issue a read command */
8865 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8866
8867 /* adjust timeout for emulation/FPGA */
8868 count = NVRAM_TIMEOUT_COUNT;
8869 if (CHIP_REV_IS_SLOW(bp))
8870 count *= 100;
8871
8872 /* wait for completion */
8873 *ret_val = 0;
8874 rc = -EBUSY;
8875 for (i = 0; i < count; i++) {
8876 udelay(5);
8877 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8878
8879 if (val & MCPR_NVM_COMMAND_DONE) {
8880 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8881 /* we read nvram data in cpu order
8882 * but ethtool sees it as an array of bytes
8883 * converting to big-endian will do the work */
4781bfad 8884 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8885 rc = 0;
8886 break;
8887 }
8888 }
8889
8890 return rc;
8891}
8892
8893static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8894 int buf_size)
8895{
8896 int rc;
8897 u32 cmd_flags;
4781bfad 8898 __be32 val;
a2fbb9ea
ET
8899
8900 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8901 DP(BNX2X_MSG_NVM,
c14423fe 8902 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8903 offset, buf_size);
8904 return -EINVAL;
8905 }
8906
34f80b04
EG
8907 if (offset + buf_size > bp->common.flash_size) {
8908 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8909 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8910 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8911 return -EINVAL;
8912 }
8913
8914 /* request access to nvram interface */
8915 rc = bnx2x_acquire_nvram_lock(bp);
8916 if (rc)
8917 return rc;
8918
8919 /* enable access to nvram interface */
8920 bnx2x_enable_nvram_access(bp);
8921
8922 /* read the first word(s) */
8923 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8924 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8925 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8926 memcpy(ret_buf, &val, 4);
8927
8928 /* advance to the next dword */
8929 offset += sizeof(u32);
8930 ret_buf += sizeof(u32);
8931 buf_size -= sizeof(u32);
8932 cmd_flags = 0;
8933 }
8934
8935 if (rc == 0) {
8936 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8937 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8938 memcpy(ret_buf, &val, 4);
8939 }
8940
8941 /* disable access to nvram interface */
8942 bnx2x_disable_nvram_access(bp);
8943 bnx2x_release_nvram_lock(bp);
8944
8945 return rc;
8946}
8947
8948static int bnx2x_get_eeprom(struct net_device *dev,
8949 struct ethtool_eeprom *eeprom, u8 *eebuf)
8950{
8951 struct bnx2x *bp = netdev_priv(dev);
8952 int rc;
8953
2add3acb
EG
8954 if (!netif_running(dev))
8955 return -EAGAIN;
8956
34f80b04 8957 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8958 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8959 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8960 eeprom->len, eeprom->len);
8961
8962 /* parameters already validated in ethtool_get_eeprom */
8963
8964 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8965
8966 return rc;
8967}
8968
8969static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8970 u32 cmd_flags)
8971{
f1410647 8972 int count, i, rc;
a2fbb9ea
ET
8973
8974 /* build the command word */
8975 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8976
8977 /* need to clear DONE bit separately */
8978 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8979
8980 /* write the data */
8981 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8982
8983 /* address of the NVRAM to write to */
8984 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8985 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8986
8987 /* issue the write command */
8988 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8989
8990 /* adjust timeout for emulation/FPGA */
8991 count = NVRAM_TIMEOUT_COUNT;
8992 if (CHIP_REV_IS_SLOW(bp))
8993 count *= 100;
8994
8995 /* wait for completion */
8996 rc = -EBUSY;
8997 for (i = 0; i < count; i++) {
8998 udelay(5);
8999 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9000 if (val & MCPR_NVM_COMMAND_DONE) {
9001 rc = 0;
9002 break;
9003 }
9004 }
9005
9006 return rc;
9007}
9008
f1410647 9009#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9010
9011static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9012 int buf_size)
9013{
9014 int rc;
9015 u32 cmd_flags;
9016 u32 align_offset;
4781bfad 9017 __be32 val;
a2fbb9ea 9018
34f80b04
EG
9019 if (offset + buf_size > bp->common.flash_size) {
9020 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9021 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9022 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9023 return -EINVAL;
9024 }
9025
9026 /* request access to nvram interface */
9027 rc = bnx2x_acquire_nvram_lock(bp);
9028 if (rc)
9029 return rc;
9030
9031 /* enable access to nvram interface */
9032 bnx2x_enable_nvram_access(bp);
9033
9034 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9035 align_offset = (offset & ~0x03);
9036 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9037
9038 if (rc == 0) {
9039 val &= ~(0xff << BYTE_OFFSET(offset));
9040 val |= (*data_buf << BYTE_OFFSET(offset));
9041
9042 /* nvram data is returned as an array of bytes
9043 * convert it back to cpu order */
9044 val = be32_to_cpu(val);
9045
a2fbb9ea
ET
9046 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9047 cmd_flags);
9048 }
9049
9050 /* disable access to nvram interface */
9051 bnx2x_disable_nvram_access(bp);
9052 bnx2x_release_nvram_lock(bp);
9053
9054 return rc;
9055}
9056
9057static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9058 int buf_size)
9059{
9060 int rc;
9061 u32 cmd_flags;
9062 u32 val;
9063 u32 written_so_far;
9064
34f80b04 9065 if (buf_size == 1) /* ethtool */
a2fbb9ea 9066 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9067
9068 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9069 DP(BNX2X_MSG_NVM,
c14423fe 9070 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9071 offset, buf_size);
9072 return -EINVAL;
9073 }
9074
34f80b04
EG
9075 if (offset + buf_size > bp->common.flash_size) {
9076 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9077 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9078 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9079 return -EINVAL;
9080 }
9081
9082 /* request access to nvram interface */
9083 rc = bnx2x_acquire_nvram_lock(bp);
9084 if (rc)
9085 return rc;
9086
9087 /* enable access to nvram interface */
9088 bnx2x_enable_nvram_access(bp);
9089
9090 written_so_far = 0;
9091 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9092 while ((written_so_far < buf_size) && (rc == 0)) {
9093 if (written_so_far == (buf_size - sizeof(u32)))
9094 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9095 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9096 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9097 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9098 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9099
9100 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9101
9102 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9103
9104 /* advance to the next dword */
9105 offset += sizeof(u32);
9106 data_buf += sizeof(u32);
9107 written_so_far += sizeof(u32);
9108 cmd_flags = 0;
9109 }
9110
9111 /* disable access to nvram interface */
9112 bnx2x_disable_nvram_access(bp);
9113 bnx2x_release_nvram_lock(bp);
9114
9115 return rc;
9116}
9117
9118static int bnx2x_set_eeprom(struct net_device *dev,
9119 struct ethtool_eeprom *eeprom, u8 *eebuf)
9120{
9121 struct bnx2x *bp = netdev_priv(dev);
9122 int rc;
9123
9f4c9583
EG
9124 if (!netif_running(dev))
9125 return -EAGAIN;
9126
34f80b04 9127 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9128 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9129 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9130 eeprom->len, eeprom->len);
9131
9132 /* parameters already validated in ethtool_set_eeprom */
9133
c18487ee 9134 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
9135 if (eeprom->magic == 0x00504859)
9136 if (bp->port.pmf) {
9137
4a37fb66 9138 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9139 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9140 bp->link_params.ext_phy_config,
9141 (bp->state != BNX2X_STATE_CLOSED),
9142 eebuf, eeprom->len);
bb2a0f7a
YG
9143 if ((bp->state == BNX2X_STATE_OPEN) ||
9144 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9145 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9146 &bp->link_vars, 1);
34f80b04
EG
9147 rc |= bnx2x_phy_init(&bp->link_params,
9148 &bp->link_vars);
bb2a0f7a 9149 }
4a37fb66 9150 bnx2x_release_phy_lock(bp);
34f80b04
EG
9151
9152 } else /* Only the PMF can access the PHY */
9153 return -EINVAL;
9154 else
c18487ee 9155 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9156
9157 return rc;
9158}
9159
9160static int bnx2x_get_coalesce(struct net_device *dev,
9161 struct ethtool_coalesce *coal)
9162{
9163 struct bnx2x *bp = netdev_priv(dev);
9164
9165 memset(coal, 0, sizeof(struct ethtool_coalesce));
9166
9167 coal->rx_coalesce_usecs = bp->rx_ticks;
9168 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9169
9170 return 0;
9171}
9172
9173static int bnx2x_set_coalesce(struct net_device *dev,
9174 struct ethtool_coalesce *coal)
9175{
9176 struct bnx2x *bp = netdev_priv(dev);
9177
9178 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
1e9d9987
EG
9179 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9180 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea
ET
9181
9182 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
1e9d9987
EG
9183 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9184 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 9185
34f80b04 9186 if (netif_running(dev))
a2fbb9ea
ET
9187 bnx2x_update_coalesce(bp);
9188
9189 return 0;
9190}
9191
9192static void bnx2x_get_ringparam(struct net_device *dev,
9193 struct ethtool_ringparam *ering)
9194{
9195 struct bnx2x *bp = netdev_priv(dev);
9196
9197 ering->rx_max_pending = MAX_RX_AVAIL;
9198 ering->rx_mini_max_pending = 0;
9199 ering->rx_jumbo_max_pending = 0;
9200
9201 ering->rx_pending = bp->rx_ring_size;
9202 ering->rx_mini_pending = 0;
9203 ering->rx_jumbo_pending = 0;
9204
9205 ering->tx_max_pending = MAX_TX_AVAIL;
9206 ering->tx_pending = bp->tx_ring_size;
9207}
9208
9209static int bnx2x_set_ringparam(struct net_device *dev,
9210 struct ethtool_ringparam *ering)
9211{
9212 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9213 int rc = 0;
a2fbb9ea
ET
9214
9215 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9216 (ering->tx_pending > MAX_TX_AVAIL) ||
9217 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9218 return -EINVAL;
9219
9220 bp->rx_ring_size = ering->rx_pending;
9221 bp->tx_ring_size = ering->tx_pending;
9222
34f80b04
EG
9223 if (netif_running(dev)) {
9224 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9225 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9226 }
9227
34f80b04 9228 return rc;
a2fbb9ea
ET
9229}
9230
9231static void bnx2x_get_pauseparam(struct net_device *dev,
9232 struct ethtool_pauseparam *epause)
9233{
9234 struct bnx2x *bp = netdev_priv(dev);
9235
356e2385
EG
9236 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9237 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9238 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9239
c0700f90
DM
9240 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9241 BNX2X_FLOW_CTRL_RX);
9242 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9243 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9244
9245 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9246 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9247 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9248}
9249
9250static int bnx2x_set_pauseparam(struct net_device *dev,
9251 struct ethtool_pauseparam *epause)
9252{
9253 struct bnx2x *bp = netdev_priv(dev);
9254
34f80b04
EG
9255 if (IS_E1HMF(bp))
9256 return 0;
9257
a2fbb9ea
ET
9258 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9259 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9260 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9261
c0700f90 9262 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9263
f1410647 9264 if (epause->rx_pause)
c0700f90 9265 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9266
f1410647 9267 if (epause->tx_pause)
c0700f90 9268 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9269
c0700f90
DM
9270 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9271 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9272
c18487ee 9273 if (epause->autoneg) {
34f80b04 9274 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9275 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9276 return -EINVAL;
9277 }
a2fbb9ea 9278
c18487ee 9279 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9280 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9281 }
a2fbb9ea 9282
c18487ee
YR
9283 DP(NETIF_MSG_LINK,
9284 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9285
9286 if (netif_running(dev)) {
bb2a0f7a 9287 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9288 bnx2x_link_set(bp);
9289 }
a2fbb9ea
ET
9290
9291 return 0;
9292}
9293
df0f2343
VZ
9294static int bnx2x_set_flags(struct net_device *dev, u32 data)
9295{
9296 struct bnx2x *bp = netdev_priv(dev);
9297 int changed = 0;
9298 int rc = 0;
9299
9300 /* TPA requires Rx CSUM offloading */
9301 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9302 if (!(dev->features & NETIF_F_LRO)) {
9303 dev->features |= NETIF_F_LRO;
9304 bp->flags |= TPA_ENABLE_FLAG;
9305 changed = 1;
9306 }
9307
9308 } else if (dev->features & NETIF_F_LRO) {
9309 dev->features &= ~NETIF_F_LRO;
9310 bp->flags &= ~TPA_ENABLE_FLAG;
9311 changed = 1;
9312 }
9313
9314 if (changed && netif_running(dev)) {
9315 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9316 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9317 }
9318
9319 return rc;
9320}
9321
a2fbb9ea
ET
9322static u32 bnx2x_get_rx_csum(struct net_device *dev)
9323{
9324 struct bnx2x *bp = netdev_priv(dev);
9325
9326 return bp->rx_csum;
9327}
9328
9329static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9330{
9331 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9332 int rc = 0;
a2fbb9ea
ET
9333
9334 bp->rx_csum = data;
df0f2343
VZ
9335
9336 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9337 TPA'ed packets will be discarded due to wrong TCP CSUM */
9338 if (!data) {
9339 u32 flags = ethtool_op_get_flags(dev);
9340
9341 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9342 }
9343
9344 return rc;
a2fbb9ea
ET
9345}
9346
9347static int bnx2x_set_tso(struct net_device *dev, u32 data)
9348{
755735eb 9349 if (data) {
a2fbb9ea 9350 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9351 dev->features |= NETIF_F_TSO6;
9352 } else {
a2fbb9ea 9353 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9354 dev->features &= ~NETIF_F_TSO6;
9355 }
9356
a2fbb9ea
ET
9357 return 0;
9358}
9359
f3c87cdd 9360static const struct {
a2fbb9ea
ET
9361 char string[ETH_GSTRING_LEN];
9362} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9363 { "register_test (offline)" },
9364 { "memory_test (offline)" },
9365 { "loopback_test (offline)" },
9366 { "nvram_test (online)" },
9367 { "interrupt_test (online)" },
9368 { "link_test (online)" },
d3d4f495 9369 { "idle check (online)" }
a2fbb9ea
ET
9370};
9371
9372static int bnx2x_self_test_count(struct net_device *dev)
9373{
9374 return BNX2X_NUM_TESTS;
9375}
9376
f3c87cdd
YG
9377static int bnx2x_test_registers(struct bnx2x *bp)
9378{
9379 int idx, i, rc = -ENODEV;
9380 u32 wr_val = 0;
9dabc424 9381 int port = BP_PORT(bp);
f3c87cdd
YG
9382 static const struct {
9383 u32 offset0;
9384 u32 offset1;
9385 u32 mask;
9386 } reg_tbl[] = {
9387/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9388 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9389 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9390 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9391 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9392 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9393 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9394 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9395 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9396 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9397/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9398 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9399 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9400 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9401 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9402 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9403 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9404 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9405 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9406 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9407/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9408 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9409 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9410 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9411 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9412 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9413 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9414 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9415 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9416 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9417/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9418 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9419 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9420 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9421 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9422 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9423 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9424 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9425
9426 { 0xffffffff, 0, 0x00000000 }
9427 };
9428
9429 if (!netif_running(bp->dev))
9430 return rc;
9431
9432 /* Repeat the test twice:
9433 First by writing 0x00000000, second by writing 0xffffffff */
9434 for (idx = 0; idx < 2; idx++) {
9435
9436 switch (idx) {
9437 case 0:
9438 wr_val = 0;
9439 break;
9440 case 1:
9441 wr_val = 0xffffffff;
9442 break;
9443 }
9444
9445 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9446 u32 offset, mask, save_val, val;
f3c87cdd
YG
9447
9448 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9449 mask = reg_tbl[i].mask;
9450
9451 save_val = REG_RD(bp, offset);
9452
9453 REG_WR(bp, offset, wr_val);
9454 val = REG_RD(bp, offset);
9455
9456 /* Restore the original register's value */
9457 REG_WR(bp, offset, save_val);
9458
9459 /* verify that value is as expected value */
9460 if ((val & mask) != (wr_val & mask))
9461 goto test_reg_exit;
9462 }
9463 }
9464
9465 rc = 0;
9466
9467test_reg_exit:
9468 return rc;
9469}
9470
9471static int bnx2x_test_memory(struct bnx2x *bp)
9472{
9473 int i, j, rc = -ENODEV;
9474 u32 val;
9475 static const struct {
9476 u32 offset;
9477 int size;
9478 } mem_tbl[] = {
9479 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9480 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9481 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9482 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9483 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9484 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9485 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9486
9487 { 0xffffffff, 0 }
9488 };
9489 static const struct {
9490 char *name;
9491 u32 offset;
9dabc424
YG
9492 u32 e1_mask;
9493 u32 e1h_mask;
f3c87cdd 9494 } prty_tbl[] = {
9dabc424
YG
9495 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9496 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9497 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9498 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9499 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9500 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9501
9502 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9503 };
9504
9505 if (!netif_running(bp->dev))
9506 return rc;
9507
9508 /* Go through all the memories */
9509 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9510 for (j = 0; j < mem_tbl[i].size; j++)
9511 REG_RD(bp, mem_tbl[i].offset + j*4);
9512
9513 /* Check the parity status */
9514 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9515 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9516 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9517 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9518 DP(NETIF_MSG_HW,
9519 "%s is 0x%x\n", prty_tbl[i].name, val);
9520 goto test_mem_exit;
9521 }
9522 }
9523
9524 rc = 0;
9525
9526test_mem_exit:
9527 return rc;
9528}
9529
f3c87cdd
YG
9530static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9531{
9532 int cnt = 1000;
9533
9534 if (link_up)
9535 while (bnx2x_link_test(bp) && cnt--)
9536 msleep(10);
9537}
9538
9539static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9540{
9541 unsigned int pkt_size, num_pkts, i;
9542 struct sk_buff *skb;
9543 unsigned char *packet;
9544 struct bnx2x_fastpath *fp = &bp->fp[0];
9545 u16 tx_start_idx, tx_idx;
9546 u16 rx_start_idx, rx_idx;
9547 u16 pkt_prod;
9548 struct sw_tx_bd *tx_buf;
9549 struct eth_tx_bd *tx_bd;
9550 dma_addr_t mapping;
9551 union eth_rx_cqe *cqe;
9552 u8 cqe_fp_flags;
9553 struct sw_rx_bd *rx_buf;
9554 u16 len;
9555 int rc = -ENODEV;
9556
b5bf9068
EG
9557 /* check the loopback mode */
9558 switch (loopback_mode) {
9559 case BNX2X_PHY_LOOPBACK:
9560 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9561 return -EINVAL;
9562 break;
9563 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9564 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9565 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9566 break;
9567 default:
f3c87cdd 9568 return -EINVAL;
b5bf9068 9569 }
f3c87cdd 9570
b5bf9068
EG
9571 /* prepare the loopback packet */
9572 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9573 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9574 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9575 if (!skb) {
9576 rc = -ENOMEM;
9577 goto test_loopback_exit;
9578 }
9579 packet = skb_put(skb, pkt_size);
9580 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9581 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9582 for (i = ETH_HLEN; i < pkt_size; i++)
9583 packet[i] = (unsigned char) (i & 0xff);
9584
b5bf9068 9585 /* send the loopback packet */
f3c87cdd
YG
9586 num_pkts = 0;
9587 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9588 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9589
9590 pkt_prod = fp->tx_pkt_prod++;
9591 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9592 tx_buf->first_bd = fp->tx_bd_prod;
9593 tx_buf->skb = skb;
9594
9595 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9596 mapping = pci_map_single(bp->pdev, skb->data,
9597 skb_headlen(skb), PCI_DMA_TODEVICE);
9598 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9599 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9600 tx_bd->nbd = cpu_to_le16(1);
9601 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9602 tx_bd->vlan = cpu_to_le16(pkt_prod);
9603 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9604 ETH_TX_BD_FLAGS_END_BD);
9605 tx_bd->general_data = ((UNICAST_ADDRESS <<
9606 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9607
58f4c4cf
EG
9608 wmb();
9609
4781bfad 9610 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9611 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9612 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9613 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9614
9615 mmiowb();
9616
9617 num_pkts++;
9618 fp->tx_bd_prod++;
9619 bp->dev->trans_start = jiffies;
9620
9621 udelay(100);
9622
9623 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9624 if (tx_idx != tx_start_idx + num_pkts)
9625 goto test_loopback_exit;
9626
9627 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9628 if (rx_idx != rx_start_idx + num_pkts)
9629 goto test_loopback_exit;
9630
9631 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9632 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9633 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9634 goto test_loopback_rx_exit;
9635
9636 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9637 if (len != pkt_size)
9638 goto test_loopback_rx_exit;
9639
9640 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9641 skb = rx_buf->skb;
9642 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9643 for (i = ETH_HLEN; i < pkt_size; i++)
9644 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9645 goto test_loopback_rx_exit;
9646
9647 rc = 0;
9648
9649test_loopback_rx_exit:
f3c87cdd
YG
9650
9651 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9652 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9653 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9654 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9655
9656 /* Update producers */
9657 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9658 fp->rx_sge_prod);
f3c87cdd
YG
9659
9660test_loopback_exit:
9661 bp->link_params.loopback_mode = LOOPBACK_NONE;
9662
9663 return rc;
9664}
9665
9666static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9667{
b5bf9068 9668 int rc = 0, res;
f3c87cdd
YG
9669
9670 if (!netif_running(bp->dev))
9671 return BNX2X_LOOPBACK_FAILED;
9672
f8ef6e44 9673 bnx2x_netif_stop(bp, 1);
3910c8ae 9674 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9675
b5bf9068
EG
9676 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9677 if (res) {
9678 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9679 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9680 }
9681
b5bf9068
EG
9682 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9683 if (res) {
9684 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9685 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9686 }
9687
3910c8ae 9688 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9689 bnx2x_netif_start(bp);
9690
9691 return rc;
9692}
9693
9694#define CRC32_RESIDUAL 0xdebb20e3
9695
9696static int bnx2x_test_nvram(struct bnx2x *bp)
9697{
9698 static const struct {
9699 int offset;
9700 int size;
9701 } nvram_tbl[] = {
9702 { 0, 0x14 }, /* bootstrap */
9703 { 0x14, 0xec }, /* dir */
9704 { 0x100, 0x350 }, /* manuf_info */
9705 { 0x450, 0xf0 }, /* feature_info */
9706 { 0x640, 0x64 }, /* upgrade_key_info */
9707 { 0x6a4, 0x64 },
9708 { 0x708, 0x70 }, /* manuf_key_info */
9709 { 0x778, 0x70 },
9710 { 0, 0 }
9711 };
4781bfad 9712 __be32 buf[0x350 / 4];
f3c87cdd
YG
9713 u8 *data = (u8 *)buf;
9714 int i, rc;
9715 u32 magic, csum;
9716
9717 rc = bnx2x_nvram_read(bp, 0, data, 4);
9718 if (rc) {
f5372251 9719 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9720 goto test_nvram_exit;
9721 }
9722
9723 magic = be32_to_cpu(buf[0]);
9724 if (magic != 0x669955aa) {
9725 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9726 rc = -ENODEV;
9727 goto test_nvram_exit;
9728 }
9729
9730 for (i = 0; nvram_tbl[i].size; i++) {
9731
9732 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9733 nvram_tbl[i].size);
9734 if (rc) {
9735 DP(NETIF_MSG_PROBE,
f5372251 9736 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9737 goto test_nvram_exit;
9738 }
9739
9740 csum = ether_crc_le(nvram_tbl[i].size, data);
9741 if (csum != CRC32_RESIDUAL) {
9742 DP(NETIF_MSG_PROBE,
9743 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9744 rc = -ENODEV;
9745 goto test_nvram_exit;
9746 }
9747 }
9748
9749test_nvram_exit:
9750 return rc;
9751}
9752
9753static int bnx2x_test_intr(struct bnx2x *bp)
9754{
9755 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9756 int i, rc;
9757
9758 if (!netif_running(bp->dev))
9759 return -ENODEV;
9760
8d9c5f34 9761 config->hdr.length = 0;
af246401
EG
9762 if (CHIP_IS_E1(bp))
9763 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9764 else
9765 config->hdr.offset = BP_FUNC(bp);
0626b899 9766 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9767 config->hdr.reserved1 = 0;
9768
9769 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9770 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9771 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9772 if (rc == 0) {
9773 bp->set_mac_pending++;
9774 for (i = 0; i < 10; i++) {
9775 if (!bp->set_mac_pending)
9776 break;
9777 msleep_interruptible(10);
9778 }
9779 if (i == 10)
9780 rc = -ENODEV;
9781 }
9782
9783 return rc;
9784}
9785
a2fbb9ea
ET
9786static void bnx2x_self_test(struct net_device *dev,
9787 struct ethtool_test *etest, u64 *buf)
9788{
9789 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9790
9791 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9792
f3c87cdd 9793 if (!netif_running(dev))
a2fbb9ea 9794 return;
a2fbb9ea 9795
33471629 9796 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9797 if (IS_E1HMF(bp))
9798 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9799
9800 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
9801 int port = BP_PORT(bp);
9802 u32 val;
f3c87cdd
YG
9803 u8 link_up;
9804
279abdf5
EG
9805 /* save current value of input enable for TX port IF */
9806 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9807 /* disable input for TX port IF */
9808 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9809
f3c87cdd
YG
9810 link_up = bp->link_vars.link_up;
9811 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9812 bnx2x_nic_load(bp, LOAD_DIAG);
9813 /* wait until link state is restored */
9814 bnx2x_wait_for_link(bp, link_up);
9815
9816 if (bnx2x_test_registers(bp) != 0) {
9817 buf[0] = 1;
9818 etest->flags |= ETH_TEST_FL_FAILED;
9819 }
9820 if (bnx2x_test_memory(bp) != 0) {
9821 buf[1] = 1;
9822 etest->flags |= ETH_TEST_FL_FAILED;
9823 }
9824 buf[2] = bnx2x_test_loopback(bp, link_up);
9825 if (buf[2] != 0)
9826 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9827
f3c87cdd 9828 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
9829
9830 /* restore input for TX port IF */
9831 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9832
f3c87cdd
YG
9833 bnx2x_nic_load(bp, LOAD_NORMAL);
9834 /* wait until link state is restored */
9835 bnx2x_wait_for_link(bp, link_up);
9836 }
9837 if (bnx2x_test_nvram(bp) != 0) {
9838 buf[3] = 1;
a2fbb9ea
ET
9839 etest->flags |= ETH_TEST_FL_FAILED;
9840 }
f3c87cdd
YG
9841 if (bnx2x_test_intr(bp) != 0) {
9842 buf[4] = 1;
9843 etest->flags |= ETH_TEST_FL_FAILED;
9844 }
9845 if (bp->port.pmf)
9846 if (bnx2x_link_test(bp) != 0) {
9847 buf[5] = 1;
9848 etest->flags |= ETH_TEST_FL_FAILED;
9849 }
f3c87cdd
YG
9850
9851#ifdef BNX2X_EXTRA_DEBUG
9852 bnx2x_panic_dump(bp);
9853#endif
a2fbb9ea
ET
9854}
9855
de832a55
EG
9856static const struct {
9857 long offset;
9858 int size;
9859 u8 string[ETH_GSTRING_LEN];
9860} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9861/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9862 { Q_STATS_OFFSET32(error_bytes_received_hi),
9863 8, "[%d]: rx_error_bytes" },
9864 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9865 8, "[%d]: rx_ucast_packets" },
9866 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9867 8, "[%d]: rx_mcast_packets" },
9868 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9869 8, "[%d]: rx_bcast_packets" },
9870 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9871 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9872 4, "[%d]: rx_phy_ip_err_discards"},
9873 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9874 4, "[%d]: rx_skb_alloc_discard" },
9875 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9876
9877/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9878 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9879 8, "[%d]: tx_packets" }
9880};
9881
bb2a0f7a
YG
9882static const struct {
9883 long offset;
9884 int size;
9885 u32 flags;
66e855f3
YG
9886#define STATS_FLAGS_PORT 1
9887#define STATS_FLAGS_FUNC 2
de832a55 9888#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9889 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9890} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9891/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9892 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9893 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9894 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9895 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9896 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9897 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9898 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9899 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9900 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9901 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9902 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9903 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9904 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9905 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9906 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9907 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9908 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9909/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9910 8, STATS_FLAGS_PORT, "rx_fragments" },
9911 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9912 8, STATS_FLAGS_PORT, "rx_jabbers" },
9913 { STATS_OFFSET32(no_buff_discard_hi),
9914 8, STATS_FLAGS_BOTH, "rx_discards" },
9915 { STATS_OFFSET32(mac_filter_discard),
9916 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9917 { STATS_OFFSET32(xxoverflow_discard),
9918 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9919 { STATS_OFFSET32(brb_drop_hi),
9920 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9921 { STATS_OFFSET32(brb_truncate_hi),
9922 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9923 { STATS_OFFSET32(pause_frames_received_hi),
9924 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9925 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9926 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9927 { STATS_OFFSET32(nig_timer_max),
9928 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9929/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9930 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9931 { STATS_OFFSET32(rx_skb_alloc_failed),
9932 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9933 { STATS_OFFSET32(hw_csum_err),
9934 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9935
9936 { STATS_OFFSET32(total_bytes_transmitted_hi),
9937 8, STATS_FLAGS_BOTH, "tx_bytes" },
9938 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9939 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9940 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9941 8, STATS_FLAGS_BOTH, "tx_packets" },
9942 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9943 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9944 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9945 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9946 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9947 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9948 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9949 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9950/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9951 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9952 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9953 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9954 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9955 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9956 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9957 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9958 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9959 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9960 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9961 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9962 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9963 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9964 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9965 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9966 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9967 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9968 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9969 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9970/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9971 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9972 { STATS_OFFSET32(pause_frames_sent_hi),
9973 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9974};
9975
de832a55
EG
9976#define IS_PORT_STAT(i) \
9977 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9978#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9979#define IS_E1HMF_MODE_STAT(bp) \
9980 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9981
a2fbb9ea
ET
9982static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9983{
bb2a0f7a 9984 struct bnx2x *bp = netdev_priv(dev);
de832a55 9985 int i, j, k;
bb2a0f7a 9986
a2fbb9ea
ET
9987 switch (stringset) {
9988 case ETH_SS_STATS:
de832a55
EG
9989 if (is_multi(bp)) {
9990 k = 0;
9991 for_each_queue(bp, i) {
9992 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9993 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9994 bnx2x_q_stats_arr[j].string, i);
9995 k += BNX2X_NUM_Q_STATS;
9996 }
9997 if (IS_E1HMF_MODE_STAT(bp))
9998 break;
9999 for (j = 0; j < BNX2X_NUM_STATS; j++)
10000 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10001 bnx2x_stats_arr[j].string);
10002 } else {
10003 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10004 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10005 continue;
10006 strcpy(buf + j*ETH_GSTRING_LEN,
10007 bnx2x_stats_arr[i].string);
10008 j++;
10009 }
bb2a0f7a 10010 }
a2fbb9ea
ET
10011 break;
10012
10013 case ETH_SS_TEST:
10014 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10015 break;
10016 }
10017}
10018
10019static int bnx2x_get_stats_count(struct net_device *dev)
10020{
bb2a0f7a 10021 struct bnx2x *bp = netdev_priv(dev);
de832a55 10022 int i, num_stats;
bb2a0f7a 10023
de832a55
EG
10024 if (is_multi(bp)) {
10025 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
10026 if (!IS_E1HMF_MODE_STAT(bp))
10027 num_stats += BNX2X_NUM_STATS;
10028 } else {
10029 if (IS_E1HMF_MODE_STAT(bp)) {
10030 num_stats = 0;
10031 for (i = 0; i < BNX2X_NUM_STATS; i++)
10032 if (IS_FUNC_STAT(i))
10033 num_stats++;
10034 } else
10035 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10036 }
de832a55 10037
bb2a0f7a 10038 return num_stats;
a2fbb9ea
ET
10039}
10040
10041static void bnx2x_get_ethtool_stats(struct net_device *dev,
10042 struct ethtool_stats *stats, u64 *buf)
10043{
10044 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10045 u32 *hw_stats, *offset;
10046 int i, j, k;
bb2a0f7a 10047
de832a55
EG
10048 if (is_multi(bp)) {
10049 k = 0;
10050 for_each_queue(bp, i) {
10051 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10052 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10053 if (bnx2x_q_stats_arr[j].size == 0) {
10054 /* skip this counter */
10055 buf[k + j] = 0;
10056 continue;
10057 }
10058 offset = (hw_stats +
10059 bnx2x_q_stats_arr[j].offset);
10060 if (bnx2x_q_stats_arr[j].size == 4) {
10061 /* 4-byte counter */
10062 buf[k + j] = (u64) *offset;
10063 continue;
10064 }
10065 /* 8-byte counter */
10066 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10067 }
10068 k += BNX2X_NUM_Q_STATS;
10069 }
10070 if (IS_E1HMF_MODE_STAT(bp))
10071 return;
10072 hw_stats = (u32 *)&bp->eth_stats;
10073 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10074 if (bnx2x_stats_arr[j].size == 0) {
10075 /* skip this counter */
10076 buf[k + j] = 0;
10077 continue;
10078 }
10079 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10080 if (bnx2x_stats_arr[j].size == 4) {
10081 /* 4-byte counter */
10082 buf[k + j] = (u64) *offset;
10083 continue;
10084 }
10085 /* 8-byte counter */
10086 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10087 }
de832a55
EG
10088 } else {
10089 hw_stats = (u32 *)&bp->eth_stats;
10090 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10091 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10092 continue;
10093 if (bnx2x_stats_arr[i].size == 0) {
10094 /* skip this counter */
10095 buf[j] = 0;
10096 j++;
10097 continue;
10098 }
10099 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10100 if (bnx2x_stats_arr[i].size == 4) {
10101 /* 4-byte counter */
10102 buf[j] = (u64) *offset;
10103 j++;
10104 continue;
10105 }
10106 /* 8-byte counter */
10107 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10108 j++;
a2fbb9ea 10109 }
a2fbb9ea
ET
10110 }
10111}
10112
10113static int bnx2x_phys_id(struct net_device *dev, u32 data)
10114{
10115 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10116 int port = BP_PORT(bp);
a2fbb9ea
ET
10117 int i;
10118
34f80b04
EG
10119 if (!netif_running(dev))
10120 return 0;
10121
10122 if (!bp->port.pmf)
10123 return 0;
10124
a2fbb9ea
ET
10125 if (data == 0)
10126 data = 2;
10127
10128 for (i = 0; i < (data * 2); i++) {
c18487ee 10129 if ((i % 2) == 0)
34f80b04 10130 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10131 bp->link_params.hw_led_mode,
10132 bp->link_params.chip_id);
10133 else
34f80b04 10134 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10135 bp->link_params.hw_led_mode,
10136 bp->link_params.chip_id);
10137
a2fbb9ea
ET
10138 msleep_interruptible(500);
10139 if (signal_pending(current))
10140 break;
10141 }
10142
c18487ee 10143 if (bp->link_vars.link_up)
34f80b04 10144 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10145 bp->link_vars.line_speed,
10146 bp->link_params.hw_led_mode,
10147 bp->link_params.chip_id);
a2fbb9ea
ET
10148
10149 return 0;
10150}
10151
10152static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10153 .get_settings = bnx2x_get_settings,
10154 .set_settings = bnx2x_set_settings,
10155 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10156 .get_regs_len = bnx2x_get_regs_len,
10157 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10158 .get_wol = bnx2x_get_wol,
10159 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10160 .get_msglevel = bnx2x_get_msglevel,
10161 .set_msglevel = bnx2x_set_msglevel,
10162 .nway_reset = bnx2x_nway_reset,
01e53298 10163 .get_link = bnx2x_get_link,
7a9b2557
VZ
10164 .get_eeprom_len = bnx2x_get_eeprom_len,
10165 .get_eeprom = bnx2x_get_eeprom,
10166 .set_eeprom = bnx2x_set_eeprom,
10167 .get_coalesce = bnx2x_get_coalesce,
10168 .set_coalesce = bnx2x_set_coalesce,
10169 .get_ringparam = bnx2x_get_ringparam,
10170 .set_ringparam = bnx2x_set_ringparam,
10171 .get_pauseparam = bnx2x_get_pauseparam,
10172 .set_pauseparam = bnx2x_set_pauseparam,
10173 .get_rx_csum = bnx2x_get_rx_csum,
10174 .set_rx_csum = bnx2x_set_rx_csum,
10175 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10176 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10177 .set_flags = bnx2x_set_flags,
10178 .get_flags = ethtool_op_get_flags,
10179 .get_sg = ethtool_op_get_sg,
10180 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10181 .get_tso = ethtool_op_get_tso,
10182 .set_tso = bnx2x_set_tso,
10183 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10184 .self_test = bnx2x_self_test,
10185 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10186 .phys_id = bnx2x_phys_id,
10187 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10188 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10189};
10190
10191/* end of ethtool_ops */
10192
10193/****************************************************************************
10194* General service functions
10195****************************************************************************/
10196
10197static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10198{
10199 u16 pmcsr;
10200
10201 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10202
10203 switch (state) {
10204 case PCI_D0:
34f80b04 10205 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10206 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10207 PCI_PM_CTRL_PME_STATUS));
10208
10209 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10210 /* delay required during transition out of D3hot */
a2fbb9ea 10211 msleep(20);
34f80b04 10212 break;
a2fbb9ea 10213
34f80b04
EG
10214 case PCI_D3hot:
10215 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10216 pmcsr |= 3;
a2fbb9ea 10217
34f80b04
EG
10218 if (bp->wol)
10219 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10220
34f80b04
EG
10221 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10222 pmcsr);
a2fbb9ea 10223
34f80b04
EG
10224 /* No more memory access after this point until
10225 * device is brought back to D0.
10226 */
10227 break;
10228
10229 default:
10230 return -EINVAL;
10231 }
10232 return 0;
a2fbb9ea
ET
10233}
10234
237907c1
EG
10235static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10236{
10237 u16 rx_cons_sb;
10238
10239 /* Tell compiler that status block fields can change */
10240 barrier();
10241 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10242 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10243 rx_cons_sb++;
10244 return (fp->rx_comp_cons != rx_cons_sb);
10245}
10246
34f80b04
EG
10247/*
10248 * net_device service functions
10249 */
10250
a2fbb9ea
ET
10251static int bnx2x_poll(struct napi_struct *napi, int budget)
10252{
10253 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10254 napi);
10255 struct bnx2x *bp = fp->bp;
10256 int work_done = 0;
10257
10258#ifdef BNX2X_STOP_ON_ERROR
10259 if (unlikely(bp->panic))
34f80b04 10260 goto poll_panic;
a2fbb9ea
ET
10261#endif
10262
10263 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10264 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10265 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10266
10267 bnx2x_update_fpsb_idx(fp);
10268
237907c1 10269 if (bnx2x_has_tx_work(fp))
7961f791 10270 bnx2x_tx_int(fp);
a2fbb9ea 10271
8534f32c 10272 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10273 work_done = bnx2x_rx_int(fp, budget);
356e2385 10274
8534f32c
EG
10275 /* must not complete if we consumed full budget */
10276 if (work_done >= budget)
10277 goto poll_again;
10278 }
a2fbb9ea 10279
8534f32c
EG
10280 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10281 * ensure that status block indices have been actually read
10282 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10283 * so that we won't write the "newer" value of the status block to IGU
10284 * (if there was a DMA right after BNX2X_HAS_WORK and
10285 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10286 * may be postponed to right before bnx2x_ack_sb). In this case
10287 * there will never be another interrupt until there is another update
10288 * of the status block, while there is still unhandled work.
10289 */
10290 rmb();
a2fbb9ea 10291
8534f32c 10292 if (!BNX2X_HAS_WORK(fp)) {
a2fbb9ea 10293#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10294poll_panic:
a2fbb9ea 10295#endif
288379f0 10296 napi_complete(napi);
a2fbb9ea 10297
0626b899 10298 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10299 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10300 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10301 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10302 }
356e2385 10303
8534f32c 10304poll_again:
a2fbb9ea
ET
10305 return work_done;
10306}
10307
755735eb
EG
10308
10309/* we split the first BD into headers and data BDs
33471629 10310 * to ease the pain of our fellow microcode engineers
755735eb
EG
10311 * we use one mapping for both BDs
10312 * So far this has only been observed to happen
10313 * in Other Operating Systems(TM)
10314 */
10315static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10316 struct bnx2x_fastpath *fp,
10317 struct eth_tx_bd **tx_bd, u16 hlen,
10318 u16 bd_prod, int nbd)
10319{
10320 struct eth_tx_bd *h_tx_bd = *tx_bd;
10321 struct eth_tx_bd *d_tx_bd;
10322 dma_addr_t mapping;
10323 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10324
10325 /* first fix first BD */
10326 h_tx_bd->nbd = cpu_to_le16(nbd);
10327 h_tx_bd->nbytes = cpu_to_le16(hlen);
10328
10329 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10330 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10331 h_tx_bd->addr_lo, h_tx_bd->nbd);
10332
10333 /* now get a new data BD
10334 * (after the pbd) and fill it */
10335 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10336 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10337
10338 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10339 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10340
10341 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10342 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10343 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10344 d_tx_bd->vlan = 0;
10345 /* this marks the BD as one that has no individual mapping
10346 * the FW ignores this flag in a BD not marked start
10347 */
10348 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10349 DP(NETIF_MSG_TX_QUEUED,
10350 "TSO split data size is %d (%x:%x)\n",
10351 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10352
10353 /* update tx_bd for marking the last BD flag */
10354 *tx_bd = d_tx_bd;
10355
10356 return bd_prod;
10357}
10358
10359static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10360{
10361 if (fix > 0)
10362 csum = (u16) ~csum_fold(csum_sub(csum,
10363 csum_partial(t_header - fix, fix, 0)));
10364
10365 else if (fix < 0)
10366 csum = (u16) ~csum_fold(csum_add(csum,
10367 csum_partial(t_header, -fix, 0)));
10368
10369 return swab16(csum);
10370}
10371
10372static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10373{
10374 u32 rc;
10375
10376 if (skb->ip_summed != CHECKSUM_PARTIAL)
10377 rc = XMIT_PLAIN;
10378
10379 else {
4781bfad 10380 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10381 rc = XMIT_CSUM_V6;
10382 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10383 rc |= XMIT_CSUM_TCP;
10384
10385 } else {
10386 rc = XMIT_CSUM_V4;
10387 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10388 rc |= XMIT_CSUM_TCP;
10389 }
10390 }
10391
10392 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10393 rc |= XMIT_GSO_V4;
10394
10395 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10396 rc |= XMIT_GSO_V6;
10397
10398 return rc;
10399}
10400
632da4d6 10401#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10402/* check if packet requires linearization (packet is too fragmented)
10403 no need to check fragmentation if page size > 8K (there will be no
10404 violation to FW restrictions) */
755735eb
EG
10405static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10406 u32 xmit_type)
10407{
10408 int to_copy = 0;
10409 int hlen = 0;
10410 int first_bd_sz = 0;
10411
10412 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10413 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10414
10415 if (xmit_type & XMIT_GSO) {
10416 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10417 /* Check if LSO packet needs to be copied:
10418 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10419 int wnd_size = MAX_FETCH_BD - 3;
33471629 10420 /* Number of windows to check */
755735eb
EG
10421 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10422 int wnd_idx = 0;
10423 int frag_idx = 0;
10424 u32 wnd_sum = 0;
10425
10426 /* Headers length */
10427 hlen = (int)(skb_transport_header(skb) - skb->data) +
10428 tcp_hdrlen(skb);
10429
10430 /* Amount of data (w/o headers) on linear part of SKB*/
10431 first_bd_sz = skb_headlen(skb) - hlen;
10432
10433 wnd_sum = first_bd_sz;
10434
10435 /* Calculate the first sum - it's special */
10436 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10437 wnd_sum +=
10438 skb_shinfo(skb)->frags[frag_idx].size;
10439
10440 /* If there was data on linear skb data - check it */
10441 if (first_bd_sz > 0) {
10442 if (unlikely(wnd_sum < lso_mss)) {
10443 to_copy = 1;
10444 goto exit_lbl;
10445 }
10446
10447 wnd_sum -= first_bd_sz;
10448 }
10449
10450 /* Others are easier: run through the frag list and
10451 check all windows */
10452 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10453 wnd_sum +=
10454 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10455
10456 if (unlikely(wnd_sum < lso_mss)) {
10457 to_copy = 1;
10458 break;
10459 }
10460 wnd_sum -=
10461 skb_shinfo(skb)->frags[wnd_idx].size;
10462 }
755735eb
EG
10463 } else {
10464 /* in non-LSO too fragmented packet should always
10465 be linearized */
10466 to_copy = 1;
10467 }
10468 }
10469
10470exit_lbl:
10471 if (unlikely(to_copy))
10472 DP(NETIF_MSG_TX_QUEUED,
10473 "Linearization IS REQUIRED for %s packet. "
10474 "num_frags %d hlen %d first_bd_sz %d\n",
10475 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10476 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10477
10478 return to_copy;
10479}
632da4d6 10480#endif
755735eb
EG
10481
10482/* called with netif_tx_lock
a2fbb9ea 10483 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10484 * netif_wake_queue()
a2fbb9ea
ET
10485 */
10486static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10487{
10488 struct bnx2x *bp = netdev_priv(dev);
10489 struct bnx2x_fastpath *fp;
555f6c78 10490 struct netdev_queue *txq;
a2fbb9ea
ET
10491 struct sw_tx_bd *tx_buf;
10492 struct eth_tx_bd *tx_bd;
10493 struct eth_tx_parse_bd *pbd = NULL;
10494 u16 pkt_prod, bd_prod;
755735eb 10495 int nbd, fp_index;
a2fbb9ea 10496 dma_addr_t mapping;
755735eb
EG
10497 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10498 int vlan_off = (bp->e1hov ? 4 : 0);
10499 int i;
10500 u8 hlen = 0;
a2fbb9ea
ET
10501
10502#ifdef BNX2X_STOP_ON_ERROR
10503 if (unlikely(bp->panic))
10504 return NETDEV_TX_BUSY;
10505#endif
10506
555f6c78
EG
10507 fp_index = skb_get_queue_mapping(skb);
10508 txq = netdev_get_tx_queue(dev, fp_index);
10509
a2fbb9ea 10510 fp = &bp->fp[fp_index];
755735eb 10511
231fd58a 10512 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10513 fp->eth_q_stats.driver_xoff++,
555f6c78 10514 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10515 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10516 return NETDEV_TX_BUSY;
10517 }
10518
755735eb
EG
10519 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10520 " gso type %x xmit_type %x\n",
10521 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10522 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10523
632da4d6 10524#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10525 /* First, check if we need to linearize the skb (due to FW
10526 restrictions). No need to check fragmentation if page size > 8K
10527 (there will be no violation to FW restrictions) */
755735eb
EG
10528 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10529 /* Statistics of linearization */
10530 bp->lin_cnt++;
10531 if (skb_linearize(skb) != 0) {
10532 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10533 "silently dropping this SKB\n");
10534 dev_kfree_skb_any(skb);
da5a662a 10535 return NETDEV_TX_OK;
755735eb
EG
10536 }
10537 }
632da4d6 10538#endif
755735eb 10539
a2fbb9ea 10540 /*
755735eb 10541 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10542 then for TSO or xsum we have a parsing info BD,
755735eb 10543 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10544 (don't forget to mark the last one as last,
10545 and to unmap only AFTER you write to the BD ...)
755735eb 10546 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10547 */
10548
10549 pkt_prod = fp->tx_pkt_prod++;
755735eb 10550 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10551
755735eb 10552 /* get a tx_buf and first BD */
a2fbb9ea
ET
10553 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10554 tx_bd = &fp->tx_desc_ring[bd_prod];
10555
10556 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10557 tx_bd->general_data = (UNICAST_ADDRESS <<
10558 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10559 /* header nbd */
10560 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10561
755735eb
EG
10562 /* remember the first BD of the packet */
10563 tx_buf->first_bd = fp->tx_bd_prod;
10564 tx_buf->skb = skb;
a2fbb9ea
ET
10565
10566 DP(NETIF_MSG_TX_QUEUED,
10567 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10568 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10569
0c6671b0
EG
10570#ifdef BCM_VLAN
10571 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10572 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10573 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10574 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10575 vlan_off += 4;
10576 } else
0c6671b0 10577#endif
755735eb 10578 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10579
755735eb 10580 if (xmit_type) {
755735eb 10581 /* turn on parsing and get a BD */
a2fbb9ea
ET
10582 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10583 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10584
10585 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10586 }
10587
10588 if (xmit_type & XMIT_CSUM) {
10589 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10590
10591 /* for now NS flag is not used in Linux */
4781bfad
EG
10592 pbd->global_data =
10593 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10594 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10595
755735eb
EG
10596 pbd->ip_hlen = (skb_transport_header(skb) -
10597 skb_network_header(skb)) / 2;
10598
10599 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10600
755735eb
EG
10601 pbd->total_hlen = cpu_to_le16(hlen);
10602 hlen = hlen*2 - vlan_off;
a2fbb9ea 10603
755735eb
EG
10604 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10605
10606 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10607 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10608 ETH_TX_BD_FLAGS_IP_CSUM;
10609 else
10610 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10611
10612 if (xmit_type & XMIT_CSUM_TCP) {
10613 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10614
10615 } else {
10616 s8 fix = SKB_CS_OFF(skb); /* signed! */
10617
a2fbb9ea 10618 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10619 pbd->cs_offset = fix / 2;
a2fbb9ea 10620
755735eb
EG
10621 DP(NETIF_MSG_TX_QUEUED,
10622 "hlen %d offset %d fix %d csum before fix %x\n",
10623 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10624 SKB_CS(skb));
10625
10626 /* HW bug: fixup the CSUM */
10627 pbd->tcp_pseudo_csum =
10628 bnx2x_csum_fix(skb_transport_header(skb),
10629 SKB_CS(skb), fix);
10630
10631 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10632 pbd->tcp_pseudo_csum);
10633 }
a2fbb9ea
ET
10634 }
10635
10636 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10637 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10638
10639 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10640 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10641 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10642 tx_bd->nbd = cpu_to_le16(nbd);
10643 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10644
10645 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10646 " nbytes %d flags %x vlan %x\n",
10647 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10648 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10649 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10650
755735eb 10651 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10652
10653 DP(NETIF_MSG_TX_QUEUED,
10654 "TSO packet len %d hlen %d total len %d tso size %d\n",
10655 skb->len, hlen, skb_headlen(skb),
10656 skb_shinfo(skb)->gso_size);
10657
10658 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10659
755735eb
EG
10660 if (unlikely(skb_headlen(skb) > hlen))
10661 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10662 bd_prod, ++nbd);
a2fbb9ea
ET
10663
10664 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10665 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10666 pbd->tcp_flags = pbd_tcp_flags(skb);
10667
10668 if (xmit_type & XMIT_GSO_V4) {
10669 pbd->ip_id = swab16(ip_hdr(skb)->id);
10670 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10671 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10672 ip_hdr(skb)->daddr,
10673 0, IPPROTO_TCP, 0));
755735eb
EG
10674
10675 } else
10676 pbd->tcp_pseudo_csum =
10677 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10678 &ipv6_hdr(skb)->daddr,
10679 0, IPPROTO_TCP, 0));
10680
a2fbb9ea
ET
10681 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10682 }
10683
755735eb
EG
10684 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10685 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10686
755735eb
EG
10687 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10688 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10689
755735eb
EG
10690 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10691 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10692
755735eb
EG
10693 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10694 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10695 tx_bd->nbytes = cpu_to_le16(frag->size);
10696 tx_bd->vlan = cpu_to_le16(pkt_prod);
10697 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10698
755735eb
EG
10699 DP(NETIF_MSG_TX_QUEUED,
10700 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10701 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10702 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10703 }
10704
755735eb 10705 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10706 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10707
10708 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10709 tx_bd, tx_bd->bd_flags.as_bitfield);
10710
a2fbb9ea
ET
10711 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10712
755735eb 10713 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10714 * if the packet contains or ends with it
10715 */
10716 if (TX_BD_POFF(bd_prod) < nbd)
10717 nbd++;
10718
10719 if (pbd)
10720 DP(NETIF_MSG_TX_QUEUED,
10721 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10722 " tcp_flags %x xsum %x seq %u hlen %u\n",
10723 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10724 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10725 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10726
755735eb 10727 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10728
58f4c4cf
EG
10729 /*
10730 * Make sure that the BD data is updated before updating the producer
10731 * since FW might read the BD right after the producer is updated.
10732 * This is only applicable for weak-ordered memory model archs such
10733 * as IA-64. The following barrier is also mandatory since FW will
10734 * assumes packets must have BDs.
10735 */
10736 wmb();
10737
4781bfad 10738 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10739 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10740 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10741 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10742
10743 mmiowb();
10744
755735eb 10745 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10746
10747 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10748 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10749 if we put Tx into XOFF state. */
10750 smp_mb();
555f6c78 10751 netif_tx_stop_queue(txq);
de832a55 10752 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10753 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10754 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10755 }
10756 fp->tx_pkt++;
10757
10758 return NETDEV_TX_OK;
10759}
10760
bb2a0f7a 10761/* called with rtnl_lock */
a2fbb9ea
ET
10762static int bnx2x_open(struct net_device *dev)
10763{
10764 struct bnx2x *bp = netdev_priv(dev);
10765
6eccabb3
EG
10766 netif_carrier_off(dev);
10767
a2fbb9ea
ET
10768 bnx2x_set_power_state(bp, PCI_D0);
10769
bb2a0f7a 10770 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10771}
10772
bb2a0f7a 10773/* called with rtnl_lock */
a2fbb9ea
ET
10774static int bnx2x_close(struct net_device *dev)
10775{
a2fbb9ea
ET
10776 struct bnx2x *bp = netdev_priv(dev);
10777
10778 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10779 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10780 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10781 if (!CHIP_REV_IS_SLOW(bp))
10782 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10783
10784 return 0;
10785}
10786
f5372251 10787/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10788static void bnx2x_set_rx_mode(struct net_device *dev)
10789{
10790 struct bnx2x *bp = netdev_priv(dev);
10791 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10792 int port = BP_PORT(bp);
10793
10794 if (bp->state != BNX2X_STATE_OPEN) {
10795 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10796 return;
10797 }
10798
10799 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10800
10801 if (dev->flags & IFF_PROMISC)
10802 rx_mode = BNX2X_RX_MODE_PROMISC;
10803
10804 else if ((dev->flags & IFF_ALLMULTI) ||
10805 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10806 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10807
10808 else { /* some multicasts */
10809 if (CHIP_IS_E1(bp)) {
10810 int i, old, offset;
10811 struct dev_mc_list *mclist;
10812 struct mac_configuration_cmd *config =
10813 bnx2x_sp(bp, mcast_config);
10814
10815 for (i = 0, mclist = dev->mc_list;
10816 mclist && (i < dev->mc_count);
10817 i++, mclist = mclist->next) {
10818
10819 config->config_table[i].
10820 cam_entry.msb_mac_addr =
10821 swab16(*(u16 *)&mclist->dmi_addr[0]);
10822 config->config_table[i].
10823 cam_entry.middle_mac_addr =
10824 swab16(*(u16 *)&mclist->dmi_addr[2]);
10825 config->config_table[i].
10826 cam_entry.lsb_mac_addr =
10827 swab16(*(u16 *)&mclist->dmi_addr[4]);
10828 config->config_table[i].cam_entry.flags =
10829 cpu_to_le16(port);
10830 config->config_table[i].
10831 target_table_entry.flags = 0;
10832 config->config_table[i].
10833 target_table_entry.client_id = 0;
10834 config->config_table[i].
10835 target_table_entry.vlan_id = 0;
10836
10837 DP(NETIF_MSG_IFUP,
10838 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10839 config->config_table[i].
10840 cam_entry.msb_mac_addr,
10841 config->config_table[i].
10842 cam_entry.middle_mac_addr,
10843 config->config_table[i].
10844 cam_entry.lsb_mac_addr);
10845 }
8d9c5f34 10846 old = config->hdr.length;
34f80b04
EG
10847 if (old > i) {
10848 for (; i < old; i++) {
10849 if (CAM_IS_INVALID(config->
10850 config_table[i])) {
af246401 10851 /* already invalidated */
34f80b04
EG
10852 break;
10853 }
10854 /* invalidate */
10855 CAM_INVALIDATE(config->
10856 config_table[i]);
10857 }
10858 }
10859
10860 if (CHIP_REV_IS_SLOW(bp))
10861 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10862 else
10863 offset = BNX2X_MAX_MULTICAST*(1 + port);
10864
8d9c5f34 10865 config->hdr.length = i;
34f80b04 10866 config->hdr.offset = offset;
8d9c5f34 10867 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10868 config->hdr.reserved1 = 0;
10869
10870 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10871 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10872 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10873 0);
10874 } else { /* E1H */
10875 /* Accept one or more multicasts */
10876 struct dev_mc_list *mclist;
10877 u32 mc_filter[MC_HASH_SIZE];
10878 u32 crc, bit, regidx;
10879 int i;
10880
10881 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10882
10883 for (i = 0, mclist = dev->mc_list;
10884 mclist && (i < dev->mc_count);
10885 i++, mclist = mclist->next) {
10886
7c510e4b
JB
10887 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10888 mclist->dmi_addr);
34f80b04
EG
10889
10890 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10891 bit = (crc >> 24) & 0xff;
10892 regidx = bit >> 5;
10893 bit &= 0x1f;
10894 mc_filter[regidx] |= (1 << bit);
10895 }
10896
10897 for (i = 0; i < MC_HASH_SIZE; i++)
10898 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10899 mc_filter[i]);
10900 }
10901 }
10902
10903 bp->rx_mode = rx_mode;
10904 bnx2x_set_storm_rx_mode(bp);
10905}
10906
10907/* called with rtnl_lock */
a2fbb9ea
ET
10908static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10909{
10910 struct sockaddr *addr = p;
10911 struct bnx2x *bp = netdev_priv(dev);
10912
34f80b04 10913 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10914 return -EINVAL;
10915
10916 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10917 if (netif_running(dev)) {
10918 if (CHIP_IS_E1(bp))
3101c2bc 10919 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10920 else
3101c2bc 10921 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10922 }
a2fbb9ea
ET
10923
10924 return 0;
10925}
10926
c18487ee 10927/* called with rtnl_lock */
a2fbb9ea
ET
10928static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10929{
10930 struct mii_ioctl_data *data = if_mii(ifr);
10931 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10932 int port = BP_PORT(bp);
a2fbb9ea
ET
10933 int err;
10934
10935 switch (cmd) {
10936 case SIOCGMIIPHY:
34f80b04 10937 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10938
c14423fe 10939 /* fallthrough */
c18487ee 10940
a2fbb9ea 10941 case SIOCGMIIREG: {
c18487ee 10942 u16 mii_regval;
a2fbb9ea 10943
c18487ee
YR
10944 if (!netif_running(dev))
10945 return -EAGAIN;
a2fbb9ea 10946
34f80b04 10947 mutex_lock(&bp->port.phy_mutex);
3196a88a 10948 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10949 DEFAULT_PHY_DEV_ADDR,
10950 (data->reg_num & 0x1f), &mii_regval);
10951 data->val_out = mii_regval;
34f80b04 10952 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10953 return err;
10954 }
10955
10956 case SIOCSMIIREG:
10957 if (!capable(CAP_NET_ADMIN))
10958 return -EPERM;
10959
c18487ee
YR
10960 if (!netif_running(dev))
10961 return -EAGAIN;
10962
34f80b04 10963 mutex_lock(&bp->port.phy_mutex);
3196a88a 10964 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10965 DEFAULT_PHY_DEV_ADDR,
10966 (data->reg_num & 0x1f), data->val_in);
34f80b04 10967 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10968 return err;
10969
10970 default:
10971 /* do nothing */
10972 break;
10973 }
10974
10975 return -EOPNOTSUPP;
10976}
10977
34f80b04 10978/* called with rtnl_lock */
a2fbb9ea
ET
10979static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10980{
10981 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10982 int rc = 0;
a2fbb9ea
ET
10983
10984 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10985 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10986 return -EINVAL;
10987
10988 /* This does not race with packet allocation
c14423fe 10989 * because the actual alloc size is
a2fbb9ea
ET
10990 * only updated as part of load
10991 */
10992 dev->mtu = new_mtu;
10993
10994 if (netif_running(dev)) {
34f80b04
EG
10995 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10996 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10997 }
34f80b04
EG
10998
10999 return rc;
a2fbb9ea
ET
11000}
11001
11002static void bnx2x_tx_timeout(struct net_device *dev)
11003{
11004 struct bnx2x *bp = netdev_priv(dev);
11005
11006#ifdef BNX2X_STOP_ON_ERROR
11007 if (!bp->panic)
11008 bnx2x_panic();
11009#endif
11010 /* This allows the netif to be shutdown gracefully before resetting */
11011 schedule_work(&bp->reset_task);
11012}
11013
11014#ifdef BCM_VLAN
34f80b04 11015/* called with rtnl_lock */
a2fbb9ea
ET
11016static void bnx2x_vlan_rx_register(struct net_device *dev,
11017 struct vlan_group *vlgrp)
11018{
11019 struct bnx2x *bp = netdev_priv(dev);
11020
11021 bp->vlgrp = vlgrp;
0c6671b0
EG
11022
11023 /* Set flags according to the required capabilities */
11024 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11025
11026 if (dev->features & NETIF_F_HW_VLAN_TX)
11027 bp->flags |= HW_VLAN_TX_FLAG;
11028
11029 if (dev->features & NETIF_F_HW_VLAN_RX)
11030 bp->flags |= HW_VLAN_RX_FLAG;
11031
a2fbb9ea 11032 if (netif_running(dev))
49d66772 11033 bnx2x_set_client_config(bp);
a2fbb9ea 11034}
34f80b04 11035
a2fbb9ea
ET
11036#endif
11037
11038#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11039static void poll_bnx2x(struct net_device *dev)
11040{
11041 struct bnx2x *bp = netdev_priv(dev);
11042
11043 disable_irq(bp->pdev->irq);
11044 bnx2x_interrupt(bp->pdev->irq, dev);
11045 enable_irq(bp->pdev->irq);
11046}
11047#endif
11048
c64213cd
SH
11049static const struct net_device_ops bnx2x_netdev_ops = {
11050 .ndo_open = bnx2x_open,
11051 .ndo_stop = bnx2x_close,
11052 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11053 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11054 .ndo_set_mac_address = bnx2x_change_mac_addr,
11055 .ndo_validate_addr = eth_validate_addr,
11056 .ndo_do_ioctl = bnx2x_ioctl,
11057 .ndo_change_mtu = bnx2x_change_mtu,
11058 .ndo_tx_timeout = bnx2x_tx_timeout,
11059#ifdef BCM_VLAN
11060 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11061#endif
11062#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11063 .ndo_poll_controller = poll_bnx2x,
11064#endif
11065};
11066
34f80b04
EG
11067static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11068 struct net_device *dev)
a2fbb9ea
ET
11069{
11070 struct bnx2x *bp;
11071 int rc;
11072
11073 SET_NETDEV_DEV(dev, &pdev->dev);
11074 bp = netdev_priv(dev);
11075
34f80b04
EG
11076 bp->dev = dev;
11077 bp->pdev = pdev;
a2fbb9ea 11078 bp->flags = 0;
34f80b04 11079 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11080
11081 rc = pci_enable_device(pdev);
11082 if (rc) {
11083 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11084 goto err_out;
11085 }
11086
11087 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11088 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11089 " aborting\n");
11090 rc = -ENODEV;
11091 goto err_out_disable;
11092 }
11093
11094 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11095 printk(KERN_ERR PFX "Cannot find second PCI device"
11096 " base address, aborting\n");
11097 rc = -ENODEV;
11098 goto err_out_disable;
11099 }
11100
34f80b04
EG
11101 if (atomic_read(&pdev->enable_cnt) == 1) {
11102 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11103 if (rc) {
11104 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11105 " aborting\n");
11106 goto err_out_disable;
11107 }
a2fbb9ea 11108
34f80b04
EG
11109 pci_set_master(pdev);
11110 pci_save_state(pdev);
11111 }
a2fbb9ea
ET
11112
11113 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11114 if (bp->pm_cap == 0) {
11115 printk(KERN_ERR PFX "Cannot find power management"
11116 " capability, aborting\n");
11117 rc = -EIO;
11118 goto err_out_release;
11119 }
11120
11121 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11122 if (bp->pcie_cap == 0) {
11123 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11124 " aborting\n");
11125 rc = -EIO;
11126 goto err_out_release;
11127 }
11128
6a35528a 11129 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11130 bp->flags |= USING_DAC_FLAG;
6a35528a 11131 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11132 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11133 " failed, aborting\n");
11134 rc = -EIO;
11135 goto err_out_release;
11136 }
11137
284901a9 11138 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11139 printk(KERN_ERR PFX "System does not support DMA,"
11140 " aborting\n");
11141 rc = -EIO;
11142 goto err_out_release;
11143 }
11144
34f80b04
EG
11145 dev->mem_start = pci_resource_start(pdev, 0);
11146 dev->base_addr = dev->mem_start;
11147 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11148
11149 dev->irq = pdev->irq;
11150
275f165f 11151 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11152 if (!bp->regview) {
11153 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11154 rc = -ENOMEM;
11155 goto err_out_release;
11156 }
11157
34f80b04
EG
11158 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11159 min_t(u64, BNX2X_DB_SIZE,
11160 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11161 if (!bp->doorbells) {
11162 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11163 rc = -ENOMEM;
11164 goto err_out_unmap;
11165 }
11166
11167 bnx2x_set_power_state(bp, PCI_D0);
11168
34f80b04
EG
11169 /* clean indirect addresses */
11170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11171 PCICFG_VENDOR_ID_OFFSET);
11172 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11173 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11174 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11175 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11176
34f80b04 11177 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11178
c64213cd 11179 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11180 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11181 dev->features |= NETIF_F_SG;
11182 dev->features |= NETIF_F_HW_CSUM;
11183 if (bp->flags & USING_DAC_FLAG)
11184 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11185 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11186 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11187#ifdef BCM_VLAN
11188 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11189 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11190
11191 dev->vlan_features |= NETIF_F_SG;
11192 dev->vlan_features |= NETIF_F_HW_CSUM;
11193 if (bp->flags & USING_DAC_FLAG)
11194 dev->vlan_features |= NETIF_F_HIGHDMA;
11195 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11196 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11197#endif
a2fbb9ea
ET
11198
11199 return 0;
11200
11201err_out_unmap:
11202 if (bp->regview) {
11203 iounmap(bp->regview);
11204 bp->regview = NULL;
11205 }
a2fbb9ea
ET
11206 if (bp->doorbells) {
11207 iounmap(bp->doorbells);
11208 bp->doorbells = NULL;
11209 }
11210
11211err_out_release:
34f80b04
EG
11212 if (atomic_read(&pdev->enable_cnt) == 1)
11213 pci_release_regions(pdev);
a2fbb9ea
ET
11214
11215err_out_disable:
11216 pci_disable_device(pdev);
11217 pci_set_drvdata(pdev, NULL);
11218
11219err_out:
11220 return rc;
11221}
11222
25047950
ET
11223static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11224{
11225 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11226
11227 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11228 return val;
11229}
11230
11231/* return value of 1=2.5GHz 2=5GHz */
11232static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11233{
11234 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11235
11236 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11237 return val;
11238}
94a78b79
VZ
11239static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11240{
11241 struct bnx2x_fw_file_hdr *fw_hdr;
11242 struct bnx2x_fw_file_section *sections;
11243 u16 *ops_offsets;
11244 u32 offset, len, num_ops;
11245 int i;
11246 const struct firmware *firmware = bp->firmware;
11247 const u8 * fw_ver;
11248
11249 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11250 return -EINVAL;
11251
11252 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11253 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11254
11255 /* Make sure none of the offsets and sizes make us read beyond
11256 * the end of the firmware data */
11257 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11258 offset = be32_to_cpu(sections[i].offset);
11259 len = be32_to_cpu(sections[i].len);
11260 if (offset + len > firmware->size) {
11261 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11262 return -EINVAL;
11263 }
11264 }
11265
11266 /* Likewise for the init_ops offsets */
11267 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11268 ops_offsets = (u16 *)(firmware->data + offset);
11269 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11270
11271 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11272 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11273 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11274 return -EINVAL;
11275 }
11276 }
11277
11278 /* Check FW version */
11279 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11280 fw_ver = firmware->data + offset;
11281 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11282 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11283 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11284 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11285 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11286 " Should be %d.%d.%d.%d\n",
11287 fw_ver[0], fw_ver[1], fw_ver[2],
11288 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11289 BCM_5710_FW_MINOR_VERSION,
11290 BCM_5710_FW_REVISION_VERSION,
11291 BCM_5710_FW_ENGINEERING_VERSION);
11292 return -EINVAL;
11293 }
11294
11295 return 0;
11296}
11297
11298static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11299{
11300 u32 i;
11301 const __be32 *source = (const __be32*)_source;
11302 u32 *target = (u32*)_target;
11303
11304 for (i = 0; i < n/4; i++)
11305 target[i] = be32_to_cpu(source[i]);
11306}
11307
11308/*
11309 Ops array is stored in the following format:
11310 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11311 */
11312static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11313{
11314 u32 i, j, tmp;
11315 const __be32 *source = (const __be32*)_source;
11316 struct raw_op *target = (struct raw_op*)_target;
11317
11318 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11319 tmp = be32_to_cpu(source[j]);
11320 target[i].op = (tmp >> 24) & 0xff;
11321 target[i].offset = tmp & 0xffffff;
11322 target[i].raw_data = be32_to_cpu(source[j+1]);
11323 }
11324}
11325static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11326{
11327 u32 i;
11328 u16 *target = (u16*)_target;
11329 const __be16 *source = (const __be16*)_source;
11330
11331 for (i = 0; i < n/2; i++)
11332 target[i] = be16_to_cpu(source[i]);
11333}
11334
11335#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11336 do { \
11337 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11338 bp->arr = kmalloc(len, GFP_KERNEL); \
11339 if (!bp->arr) { \
11340 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11341 goto lbl; \
11342 } \
11343 func(bp->firmware->data + \
11344 be32_to_cpu(fw_hdr->arr.offset), \
11345 (u8*)bp->arr, len); \
11346 } while (0)
11347
11348
11349static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11350{
11351 char fw_file_name[40] = {0};
11352 int rc, offset;
11353 struct bnx2x_fw_file_hdr *fw_hdr;
11354
11355 /* Create a FW file name */
11356 if (CHIP_IS_E1(bp))
11357 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11358 else
11359 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11360
11361 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11362 BCM_5710_FW_MAJOR_VERSION,
11363 BCM_5710_FW_MINOR_VERSION,
11364 BCM_5710_FW_REVISION_VERSION,
11365 BCM_5710_FW_ENGINEERING_VERSION);
11366
11367 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11368
11369 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11370 if (rc) {
11371 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11372 goto request_firmware_exit;
11373 }
11374
11375 rc = bnx2x_check_firmware(bp);
11376 if (rc) {
11377 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11378 goto request_firmware_exit;
11379 }
11380
11381 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11382
11383 /* Initialize the pointers to the init arrays */
11384 /* Blob */
11385 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11386
11387 /* Opcodes */
11388 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11389
11390 /* Offsets */
11391 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11392
11393 /* STORMs firmware */
11394 bp->tsem_int_table_data = bp->firmware->data +
11395 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11396 bp->tsem_pram_data = bp->firmware->data +
11397 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11398 bp->usem_int_table_data = bp->firmware->data +
11399 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11400 bp->usem_pram_data = bp->firmware->data +
11401 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11402 bp->xsem_int_table_data = bp->firmware->data +
11403 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11404 bp->xsem_pram_data = bp->firmware->data +
11405 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11406 bp->csem_int_table_data = bp->firmware->data +
11407 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11408 bp->csem_pram_data = bp->firmware->data +
11409 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11410
11411 return 0;
11412init_offsets_alloc_err:
11413 kfree(bp->init_ops);
11414init_ops_alloc_err:
11415 kfree(bp->init_data);
11416request_firmware_exit:
11417 release_firmware(bp->firmware);
11418
11419 return rc;
11420}
11421
11422
25047950 11423
a2fbb9ea
ET
11424static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11425 const struct pci_device_id *ent)
11426{
11427 static int version_printed;
11428 struct net_device *dev = NULL;
11429 struct bnx2x *bp;
25047950 11430 int rc;
a2fbb9ea
ET
11431
11432 if (version_printed++ == 0)
11433 printk(KERN_INFO "%s", version);
11434
11435 /* dev zeroed in init_etherdev */
555f6c78 11436 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11437 if (!dev) {
11438 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11439 return -ENOMEM;
34f80b04 11440 }
a2fbb9ea 11441
a2fbb9ea
ET
11442 bp = netdev_priv(dev);
11443 bp->msglevel = debug;
11444
34f80b04 11445 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11446 if (rc < 0) {
11447 free_netdev(dev);
11448 return rc;
11449 }
11450
a2fbb9ea
ET
11451 pci_set_drvdata(pdev, dev);
11452
34f80b04 11453 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11454 if (rc)
11455 goto init_one_exit;
11456
94a78b79
VZ
11457 /* Set init arrays */
11458 rc = bnx2x_init_firmware(bp, &pdev->dev);
11459 if (rc) {
11460 printk(KERN_ERR PFX "Error loading firmware\n");
11461 goto init_one_exit;
11462 }
11463
693fc0d1 11464 rc = register_netdev(dev);
34f80b04 11465 if (rc) {
693fc0d1 11466 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11467 goto init_one_exit;
11468 }
11469
25047950 11470 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11471 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11472 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11473 bnx2x_get_pcie_width(bp),
11474 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11475 dev->base_addr, bp->pdev->irq);
e174961c 11476 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11477
a2fbb9ea 11478 return 0;
34f80b04
EG
11479
11480init_one_exit:
11481 if (bp->regview)
11482 iounmap(bp->regview);
11483
11484 if (bp->doorbells)
11485 iounmap(bp->doorbells);
11486
11487 free_netdev(dev);
11488
11489 if (atomic_read(&pdev->enable_cnt) == 1)
11490 pci_release_regions(pdev);
11491
11492 pci_disable_device(pdev);
11493 pci_set_drvdata(pdev, NULL);
11494
11495 return rc;
a2fbb9ea
ET
11496}
11497
11498static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11499{
11500 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11501 struct bnx2x *bp;
11502
11503 if (!dev) {
228241eb
ET
11504 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11505 return;
11506 }
228241eb 11507 bp = netdev_priv(dev);
a2fbb9ea 11508
a2fbb9ea
ET
11509 unregister_netdev(dev);
11510
94a78b79
VZ
11511 kfree(bp->init_ops_offsets);
11512 kfree(bp->init_ops);
11513 kfree(bp->init_data);
11514 release_firmware(bp->firmware);
11515
a2fbb9ea
ET
11516 if (bp->regview)
11517 iounmap(bp->regview);
11518
11519 if (bp->doorbells)
11520 iounmap(bp->doorbells);
11521
11522 free_netdev(dev);
34f80b04
EG
11523
11524 if (atomic_read(&pdev->enable_cnt) == 1)
11525 pci_release_regions(pdev);
11526
a2fbb9ea
ET
11527 pci_disable_device(pdev);
11528 pci_set_drvdata(pdev, NULL);
11529}
11530
11531static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11532{
11533 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11534 struct bnx2x *bp;
11535
34f80b04
EG
11536 if (!dev) {
11537 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11538 return -ENODEV;
11539 }
11540 bp = netdev_priv(dev);
a2fbb9ea 11541
34f80b04 11542 rtnl_lock();
a2fbb9ea 11543
34f80b04 11544 pci_save_state(pdev);
228241eb 11545
34f80b04
EG
11546 if (!netif_running(dev)) {
11547 rtnl_unlock();
11548 return 0;
11549 }
a2fbb9ea
ET
11550
11551 netif_device_detach(dev);
a2fbb9ea 11552
da5a662a 11553 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11554
a2fbb9ea 11555 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11556
34f80b04
EG
11557 rtnl_unlock();
11558
a2fbb9ea
ET
11559 return 0;
11560}
11561
11562static int bnx2x_resume(struct pci_dev *pdev)
11563{
11564 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11565 struct bnx2x *bp;
a2fbb9ea
ET
11566 int rc;
11567
228241eb
ET
11568 if (!dev) {
11569 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11570 return -ENODEV;
11571 }
228241eb 11572 bp = netdev_priv(dev);
a2fbb9ea 11573
34f80b04
EG
11574 rtnl_lock();
11575
228241eb 11576 pci_restore_state(pdev);
34f80b04
EG
11577
11578 if (!netif_running(dev)) {
11579 rtnl_unlock();
11580 return 0;
11581 }
11582
a2fbb9ea
ET
11583 bnx2x_set_power_state(bp, PCI_D0);
11584 netif_device_attach(dev);
11585
da5a662a 11586 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11587
34f80b04
EG
11588 rtnl_unlock();
11589
11590 return rc;
a2fbb9ea
ET
11591}
11592
f8ef6e44
YG
11593static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11594{
11595 int i;
11596
11597 bp->state = BNX2X_STATE_ERROR;
11598
11599 bp->rx_mode = BNX2X_RX_MODE_NONE;
11600
11601 bnx2x_netif_stop(bp, 0);
11602
11603 del_timer_sync(&bp->timer);
11604 bp->stats_state = STATS_STATE_DISABLED;
11605 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11606
11607 /* Release IRQs */
11608 bnx2x_free_irq(bp);
11609
11610 if (CHIP_IS_E1(bp)) {
11611 struct mac_configuration_cmd *config =
11612 bnx2x_sp(bp, mcast_config);
11613
8d9c5f34 11614 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11615 CAM_INVALIDATE(config->config_table[i]);
11616 }
11617
11618 /* Free SKBs, SGEs, TPA pool and driver internals */
11619 bnx2x_free_skbs(bp);
555f6c78 11620 for_each_rx_queue(bp, i)
f8ef6e44 11621 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11622 for_each_rx_queue(bp, i)
7cde1c8b 11623 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11624 bnx2x_free_mem(bp);
11625
11626 bp->state = BNX2X_STATE_CLOSED;
11627
11628 netif_carrier_off(bp->dev);
11629
11630 return 0;
11631}
11632
11633static void bnx2x_eeh_recover(struct bnx2x *bp)
11634{
11635 u32 val;
11636
11637 mutex_init(&bp->port.phy_mutex);
11638
11639 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11640 bp->link_params.shmem_base = bp->common.shmem_base;
11641 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11642
11643 if (!bp->common.shmem_base ||
11644 (bp->common.shmem_base < 0xA0000) ||
11645 (bp->common.shmem_base >= 0xC0000)) {
11646 BNX2X_DEV_INFO("MCP not active\n");
11647 bp->flags |= NO_MCP_FLAG;
11648 return;
11649 }
11650
11651 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11652 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11653 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11654 BNX2X_ERR("BAD MCP validity signature\n");
11655
11656 if (!BP_NOMCP(bp)) {
11657 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11658 & DRV_MSG_SEQ_NUMBER_MASK);
11659 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11660 }
11661}
11662
493adb1f
WX
11663/**
11664 * bnx2x_io_error_detected - called when PCI error is detected
11665 * @pdev: Pointer to PCI device
11666 * @state: The current pci connection state
11667 *
11668 * This function is called after a PCI bus error affecting
11669 * this device has been detected.
11670 */
11671static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11672 pci_channel_state_t state)
11673{
11674 struct net_device *dev = pci_get_drvdata(pdev);
11675 struct bnx2x *bp = netdev_priv(dev);
11676
11677 rtnl_lock();
11678
11679 netif_device_detach(dev);
11680
11681 if (netif_running(dev))
f8ef6e44 11682 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11683
11684 pci_disable_device(pdev);
11685
11686 rtnl_unlock();
11687
11688 /* Request a slot reset */
11689 return PCI_ERS_RESULT_NEED_RESET;
11690}
11691
11692/**
11693 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11694 * @pdev: Pointer to PCI device
11695 *
11696 * Restart the card from scratch, as if from a cold-boot.
11697 */
11698static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11699{
11700 struct net_device *dev = pci_get_drvdata(pdev);
11701 struct bnx2x *bp = netdev_priv(dev);
11702
11703 rtnl_lock();
11704
11705 if (pci_enable_device(pdev)) {
11706 dev_err(&pdev->dev,
11707 "Cannot re-enable PCI device after reset\n");
11708 rtnl_unlock();
11709 return PCI_ERS_RESULT_DISCONNECT;
11710 }
11711
11712 pci_set_master(pdev);
11713 pci_restore_state(pdev);
11714
11715 if (netif_running(dev))
11716 bnx2x_set_power_state(bp, PCI_D0);
11717
11718 rtnl_unlock();
11719
11720 return PCI_ERS_RESULT_RECOVERED;
11721}
11722
11723/**
11724 * bnx2x_io_resume - called when traffic can start flowing again
11725 * @pdev: Pointer to PCI device
11726 *
11727 * This callback is called when the error recovery driver tells us that
11728 * its OK to resume normal operation.
11729 */
11730static void bnx2x_io_resume(struct pci_dev *pdev)
11731{
11732 struct net_device *dev = pci_get_drvdata(pdev);
11733 struct bnx2x *bp = netdev_priv(dev);
11734
11735 rtnl_lock();
11736
f8ef6e44
YG
11737 bnx2x_eeh_recover(bp);
11738
493adb1f 11739 if (netif_running(dev))
f8ef6e44 11740 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11741
11742 netif_device_attach(dev);
11743
11744 rtnl_unlock();
11745}
11746
11747static struct pci_error_handlers bnx2x_err_handler = {
11748 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11749 .slot_reset = bnx2x_io_slot_reset,
11750 .resume = bnx2x_io_resume,
493adb1f
WX
11751};
11752
a2fbb9ea 11753static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11754 .name = DRV_MODULE_NAME,
11755 .id_table = bnx2x_pci_tbl,
11756 .probe = bnx2x_init_one,
11757 .remove = __devexit_p(bnx2x_remove_one),
11758 .suspend = bnx2x_suspend,
11759 .resume = bnx2x_resume,
11760 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11761};
11762
11763static int __init bnx2x_init(void)
11764{
dd21ca6d
SG
11765 int ret;
11766
1cf167f2
EG
11767 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11768 if (bnx2x_wq == NULL) {
11769 printk(KERN_ERR PFX "Cannot create workqueue\n");
11770 return -ENOMEM;
11771 }
11772
dd21ca6d
SG
11773 ret = pci_register_driver(&bnx2x_pci_driver);
11774 if (ret) {
11775 printk(KERN_ERR PFX "Cannot register driver\n");
11776 destroy_workqueue(bnx2x_wq);
11777 }
11778 return ret;
a2fbb9ea
ET
11779}
11780
11781static void __exit bnx2x_cleanup(void)
11782{
11783 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11784
11785 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11786}
11787
11788module_init(bnx2x_init);
11789module_exit(bnx2x_cleanup);
11790
94a78b79 11791