]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Adding some mmiowb
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
56ed4351
VZ
59#define DRV_MODULE_VERSION "1.48.105-1"
60#define DRV_MODULE_RELDATE "2009/04/22"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
2059aba7 83MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
555f6c78 84
19680c48 85static int disable_tpa;
19680c48 86module_param(disable_tpa, int, 0);
9898f86d 87MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
88
89static int int_mode;
90module_param(int_mode, int, 0);
91MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
9898f86d 93static int poll;
a2fbb9ea 94module_param(poll, int, 0);
9898f86d 95MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
96
97static int mrrs = -1;
98module_param(mrrs, int, 0);
99MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
9898f86d 101static int debug;
a2fbb9ea 102module_param(debug, int, 0);
9898f86d
EG
103MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 106
1cf167f2 107static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
108
109enum bnx2x_board_type {
110 BCM57710 = 0,
34f80b04
EG
111 BCM57711 = 1,
112 BCM57711E = 2,
a2fbb9ea
ET
113};
114
34f80b04 115/* indexed by board_type, above */
53a10565 116static struct {
a2fbb9ea
ET
117 char *name;
118} board_info[] __devinitdata = {
34f80b04
EG
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
122};
123
34f80b04 124
a2fbb9ea
ET
125static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
132 { 0 }
133};
134
135MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137/****************************************************************************
138* General service functions
139****************************************************************************/
140
141/* used only at init
142 * locking is done by mcp
143 */
144static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145{
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150}
151
a2fbb9ea
ET
152static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153{
154 u32 val;
155
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
160
161 return val;
162}
a2fbb9ea
ET
163
164static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169};
170
171/* copy command into DMAE command memory and set DMAE command go */
172static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173 int idx)
174{
175 u32 cmd_offset;
176 int i;
177
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
ad8d3948
EG
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
184 }
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
186}
187
ad8d3948
EG
188void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189 u32 len32)
a2fbb9ea 190{
ad8d3948 191 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
193 int cnt = 200;
194
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201 return;
202 }
203
204 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
205
206 memset(dmae, 0, sizeof(struct dmae_command));
207
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211#ifdef __BIG_ENDIAN
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
213#else
214 DMAE_CMD_ENDIANITY_DW_SWAP |
215#endif
34f80b04
EG
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
222 dmae->len = len32;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 225 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 226
c3eefaf6 227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
237
238 *wb_comp = 0;
239
34f80b04 240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
241
242 udelay(5);
ad8d3948
EG
243
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
ad8d3948 247 if (!cnt) {
c3eefaf6 248 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
249 break;
250 }
ad8d3948 251 cnt--;
12469401
YG
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
254 msleep(100);
255 else
256 udelay(5);
a2fbb9ea 257 }
ad8d3948
EG
258
259 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
260}
261
c18487ee 262void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 263{
ad8d3948 264 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
266 int cnt = 200;
267
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
270 int i;
271
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276 return;
277 }
278
279 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
280
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
283
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287#ifdef __BIG_ENDIAN
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
289#else
290 DMAE_CMD_ENDIANITY_DW_SWAP |
291#endif
34f80b04
EG
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298 dmae->len = len32;
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 301 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 302
c3eefaf6 303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
310
311 *wb_comp = 0;
312
34f80b04 313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
314
315 udelay(5);
ad8d3948
EG
316
317 while (*wb_comp != DMAE_COMP_VAL) {
318
ad8d3948 319 if (!cnt) {
c3eefaf6 320 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
321 break;
322 }
ad8d3948 323 cnt--;
12469401
YG
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
326 msleep(100);
327 else
328 udelay(5);
a2fbb9ea 329 }
ad8d3948 330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
333
334 mutex_unlock(&bp->dmae_mutex);
335}
336
337/* used only for slowpath so not inlined */
338static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339{
340 u32 wb_write[2];
341
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 345}
a2fbb9ea 346
ad8d3948
EG
347#ifdef USE_WB_RD
348static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349{
350 u32 wb_data[2];
351
352 REG_RD_DMAE(bp, reg, wb_data, 2);
353
354 return HILO_U64(wb_data[0], wb_data[1]);
355}
356#endif
357
a2fbb9ea
ET
358static int bnx2x_mc_assert(struct bnx2x *bp)
359{
a2fbb9ea 360 char last_idx;
34f80b04
EG
361 int i, rc = 0;
362 u32 row0, row1, row2, row3;
363
364 /* XSTORM */
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
367 if (last_idx)
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
386 rc++;
387 } else {
388 break;
389 }
390 }
391
392 /* TSTORM */
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
395 if (last_idx)
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
414 rc++;
415 } else {
416 break;
417 }
418 }
419
420 /* CSTORM */
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
423 if (last_idx)
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
442 rc++;
443 } else {
444 break;
445 }
446 }
447
448 /* USTORM */
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
451 if (last_idx)
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
470 rc++;
471 } else {
472 break;
a2fbb9ea
ET
473 }
474 }
34f80b04 475
a2fbb9ea
ET
476 return rc;
477}
c14423fe 478
a2fbb9ea
ET
479static void bnx2x_fw_dump(struct bnx2x *bp)
480{
481 u32 mark, offset;
4781bfad 482 __be32 data[9];
a2fbb9ea
ET
483 int word;
484
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
488
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499 offset + 4*word));
500 data[8] = 0x0;
49d66772 501 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
502 }
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
504}
505
506static void bnx2x_panic_dump(struct bnx2x *bp)
507{
508 int i;
509 u16 j, start, end;
510
66e855f3
YG
511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
a2fbb9ea
ET
514 BNX2X_ERR("begin crash dump -----------------\n");
515
8440d2b6
EG
516 /* Indices */
517 /* Common */
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524 /* Rx */
525 for_each_rx_queue(bp, i) {
a2fbb9ea 526 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 527
c3eefaf6 528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 531 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
539 }
a2fbb9ea 540
8440d2b6
EG
541 /* Tx */
542 for_each_tx_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 545
c3eefaf6 546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
8440d2b6
EG
551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552 fp->status_blk->c_status_block.status_block_index,
553 hw_prods->packets_prod, hw_prods->bds_prod);
554 }
a2fbb9ea 555
8440d2b6
EG
556 /* Rings */
557 /* Rx */
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
560
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 563 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
c3eefaf6
EG
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
569 }
570
3196a88a
EG
571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
8440d2b6 573 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
c3eefaf6
EG
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
579 }
580
a2fbb9ea
ET
581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
c3eefaf6
EG
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
588 }
589 }
590
8440d2b6
EG
591 /* Tx */
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
c3eefaf6
EG
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
602 }
603
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
611 }
612 }
a2fbb9ea 613
34f80b04 614 bnx2x_fw_dump(bp);
a2fbb9ea
ET
615 bnx2x_mc_assert(bp);
616 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
617}
618
615f8fd9 619static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 620{
34f80b04 621 int port = BP_PORT(bp);
a2fbb9ea
ET
622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
626
627 if (msix) {
8badd27a
EG
628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
632 } else if (msi) {
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
637 } else {
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 642
8badd27a
EG
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
615f8fd9
ET
645
646 REG_WR(bp, addr, val);
647
a2fbb9ea
ET
648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649 }
650
8badd27a
EG
651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
653
654 REG_WR(bp, addr, val);
37dbbf32
EG
655 /*
656 * Ensure that HC_CONFIG is written before leading/trailing edge config
657 */
658 mmiowb();
659 barrier();
34f80b04
EG
660
661 if (CHIP_IS_E1H(bp)) {
662 /* init leading/trailing edge */
663 if (IS_E1HMF(bp)) {
8badd27a 664 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 665 if (bp->port.pmf)
4acac6a5
EG
666 /* enable nig and gpio3 attention */
667 val |= 0x1100;
34f80b04
EG
668 } else
669 val = 0xffff;
670
671 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
672 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
673 }
37dbbf32
EG
674
675 /* Make sure that interrupts are indeed enabled from here on */
676 mmiowb();
a2fbb9ea
ET
677}
678
615f8fd9 679static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 680{
34f80b04 681 int port = BP_PORT(bp);
a2fbb9ea
ET
682 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
683 u32 val = REG_RD(bp, addr);
684
685 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
686 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
687 HC_CONFIG_0_REG_INT_LINE_EN_0 |
688 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
689
690 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
691 val, port, addr);
692
8badd27a
EG
693 /* flush all outstanding writes */
694 mmiowb();
695
a2fbb9ea
ET
696 REG_WR(bp, addr, val);
697 if (REG_RD(bp, addr) != val)
698 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 699
a2fbb9ea
ET
700}
701
f8ef6e44 702static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 703{
a2fbb9ea 704 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 705 int i, offset;
a2fbb9ea 706
34f80b04 707 /* disable interrupt handling */
a2fbb9ea 708 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
709 if (disable_hw)
710 /* prevent the HW from sending interrupts */
711 bnx2x_int_disable(bp);
a2fbb9ea
ET
712
713 /* make sure all ISRs are done */
714 if (msix) {
8badd27a
EG
715 synchronize_irq(bp->msix_table[0].vector);
716 offset = 1;
a2fbb9ea 717 for_each_queue(bp, i)
8badd27a 718 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
719 } else
720 synchronize_irq(bp->pdev->irq);
721
722 /* make sure sp_task is not running */
1cf167f2
EG
723 cancel_delayed_work(&bp->sp_task);
724 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
725}
726
34f80b04 727/* fast path */
a2fbb9ea
ET
728
729/*
34f80b04 730 * General service functions
a2fbb9ea
ET
731 */
732
34f80b04 733static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
734 u8 storm, u16 index, u8 op, u8 update)
735{
5c862848
EG
736 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
737 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
738 struct igu_ack_register igu_ack;
739
740 igu_ack.status_block_index = index;
741 igu_ack.sb_id_and_flags =
34f80b04 742 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
743 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
744 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
745 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
746
5c862848
EG
747 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
748 (*(u32 *)&igu_ack), hc_addr);
749 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
750
751 /* Make sure that ACK is written */
752 mmiowb();
753 barrier();
a2fbb9ea
ET
754}
755
756static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
757{
758 struct host_status_block *fpsb = fp->status_blk;
759 u16 rc = 0;
760
761 barrier(); /* status block is written to by the chip */
762 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
763 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
764 rc |= 1;
765 }
766 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
767 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
768 rc |= 2;
769 }
770 return rc;
771}
772
a2fbb9ea
ET
773static u16 bnx2x_ack_int(struct bnx2x *bp)
774{
5c862848
EG
775 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
776 COMMAND_REG_SIMD_MASK);
777 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 778
5c862848
EG
779 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
780 result, hc_addr);
a2fbb9ea 781
a2fbb9ea
ET
782 return result;
783}
784
785
786/*
787 * fast path service functions
788 */
789
237907c1
EG
790static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
791{
792 u16 tx_cons_sb;
793
794 /* Tell compiler that status block fields can change */
795 barrier();
796 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
797 return (fp->tx_pkt_cons != tx_cons_sb);
798}
799
800static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
801{
802 /* Tell compiler that consumer and producer can change */
803 barrier();
804 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
805}
806
a2fbb9ea
ET
807/* free skb in the packet ring at pos idx
808 * return idx of last bd freed
809 */
810static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
811 u16 idx)
812{
813 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
814 struct eth_tx_bd *tx_bd;
815 struct sk_buff *skb = tx_buf->skb;
34f80b04 816 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
817 int nbd;
818
819 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
820 idx, tx_buf, skb);
821
822 /* unmap first bd */
823 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
824 tx_bd = &fp->tx_desc_ring[bd_idx];
825 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
826 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
827
828 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 829 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
830#ifdef BNX2X_STOP_ON_ERROR
831 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 832 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
833 bnx2x_panic();
834 }
835#endif
836
837 /* Skip a parse bd and the TSO split header bd
838 since they have no mapping */
839 if (nbd)
840 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
841
842 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
843 ETH_TX_BD_FLAGS_TCP_CSUM |
844 ETH_TX_BD_FLAGS_SW_LSO)) {
845 if (--nbd)
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847 tx_bd = &fp->tx_desc_ring[bd_idx];
848 /* is this a TSO split header bd? */
849 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
850 if (--nbd)
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852 }
853 }
854
855 /* now free frags */
856 while (nbd > 0) {
857
858 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
859 tx_bd = &fp->tx_desc_ring[bd_idx];
860 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
861 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
862 if (--nbd)
863 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
864 }
865
866 /* release skb */
53e5e96e 867 WARN_ON(!skb);
a2fbb9ea
ET
868 dev_kfree_skb(skb);
869 tx_buf->first_bd = 0;
870 tx_buf->skb = NULL;
871
34f80b04 872 return new_cons;
a2fbb9ea
ET
873}
874
34f80b04 875static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 876{
34f80b04
EG
877 s16 used;
878 u16 prod;
879 u16 cons;
a2fbb9ea 880
34f80b04 881 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
882 prod = fp->tx_bd_prod;
883 cons = fp->tx_bd_cons;
884
34f80b04
EG
885 /* NUM_TX_RINGS = number of "next-page" entries
886 It will be used as a threshold */
887 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 888
34f80b04 889#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
890 WARN_ON(used < 0);
891 WARN_ON(used > fp->bp->tx_ring_size);
892 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 893#endif
a2fbb9ea 894
34f80b04 895 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
896}
897
7961f791 898static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
899{
900 struct bnx2x *bp = fp->bp;
555f6c78 901 struct netdev_queue *txq;
a2fbb9ea
ET
902 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
903 int done = 0;
904
905#ifdef BNX2X_STOP_ON_ERROR
906 if (unlikely(bp->panic))
907 return;
908#endif
909
555f6c78 910 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
911 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
912 sw_cons = fp->tx_pkt_cons;
913
914 while (sw_cons != hw_cons) {
915 u16 pkt_cons;
916
917 pkt_cons = TX_BD(sw_cons);
918
919 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
920
34f80b04 921 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
922 hw_cons, sw_cons, pkt_cons);
923
34f80b04 924/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
925 rmb();
926 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
927 }
928*/
929 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
930 sw_cons++;
931 done++;
a2fbb9ea
ET
932 }
933
934 fp->tx_pkt_cons = sw_cons;
935 fp->tx_bd_cons = bd_cons;
936
a2fbb9ea 937 /* TBD need a thresh? */
555f6c78 938 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 939
555f6c78 940 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 941
6044735d
EG
942 /* Need to make the tx_bd_cons update visible to start_xmit()
943 * before checking for netif_tx_queue_stopped(). Without the
944 * memory barrier, there is a small possibility that
945 * start_xmit() will miss it and cause the queue to be stopped
946 * forever.
947 */
948 smp_mb();
949
555f6c78 950 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 951 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 952 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 953 netif_tx_wake_queue(txq);
a2fbb9ea 954
555f6c78 955 __netif_tx_unlock(txq);
a2fbb9ea
ET
956 }
957}
958
3196a88a 959
a2fbb9ea
ET
960static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
961 union eth_rx_cqe *rr_cqe)
962{
963 struct bnx2x *bp = fp->bp;
964 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
965 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
966
34f80b04 967 DP(BNX2X_MSG_SP,
a2fbb9ea 968 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 969 fp->index, cid, command, bp->state,
34f80b04 970 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
971
972 bp->spq_left++;
973
0626b899 974 if (fp->index) {
a2fbb9ea
ET
975 switch (command | fp->state) {
976 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
977 BNX2X_FP_STATE_OPENING):
978 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
979 cid);
980 fp->state = BNX2X_FP_STATE_OPEN;
981 break;
982
983 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
984 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
985 cid);
986 fp->state = BNX2X_FP_STATE_HALTED;
987 break;
988
989 default:
34f80b04
EG
990 BNX2X_ERR("unexpected MC reply (%d) "
991 "fp->state is %x\n", command, fp->state);
992 break;
a2fbb9ea 993 }
34f80b04 994 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
995 return;
996 }
c14423fe 997
a2fbb9ea
ET
998 switch (command | bp->state) {
999 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1000 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1001 bp->state = BNX2X_STATE_OPEN;
1002 break;
1003
1004 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1005 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1006 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1007 fp->state = BNX2X_FP_STATE_HALTED;
1008 break;
1009
a2fbb9ea 1010 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1011 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1012 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1013 break;
1014
3196a88a 1015
a2fbb9ea 1016 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1018 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1019 bp->set_mac_pending = 0;
a2fbb9ea
ET
1020 break;
1021
49d66772 1022 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1023 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1024 break;
1025
a2fbb9ea 1026 default:
34f80b04 1027 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1028 command, bp->state);
34f80b04 1029 break;
a2fbb9ea 1030 }
34f80b04 1031 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1032}
1033
7a9b2557
VZ
1034static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1035 struct bnx2x_fastpath *fp, u16 index)
1036{
1037 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1038 struct page *page = sw_buf->page;
1039 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1040
1041 /* Skip "next page" elements */
1042 if (!page)
1043 return;
1044
1045 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1046 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1047 __free_pages(page, PAGES_PER_SGE_SHIFT);
1048
1049 sw_buf->page = NULL;
1050 sge->addr_hi = 0;
1051 sge->addr_lo = 0;
1052}
1053
1054static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1055 struct bnx2x_fastpath *fp, int last)
1056{
1057 int i;
1058
1059 for (i = 0; i < last; i++)
1060 bnx2x_free_rx_sge(bp, fp, i);
1061}
1062
1063static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1064 struct bnx2x_fastpath *fp, u16 index)
1065{
1066 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1067 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1068 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1069 dma_addr_t mapping;
1070
1071 if (unlikely(page == NULL))
1072 return -ENOMEM;
1073
4f40f2cb 1074 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1075 PCI_DMA_FROMDEVICE);
8d8bb39b 1076 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1077 __free_pages(page, PAGES_PER_SGE_SHIFT);
1078 return -ENOMEM;
1079 }
1080
1081 sw_buf->page = page;
1082 pci_unmap_addr_set(sw_buf, mapping, mapping);
1083
1084 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1085 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1086
1087 return 0;
1088}
1089
a2fbb9ea
ET
1090static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1091 struct bnx2x_fastpath *fp, u16 index)
1092{
1093 struct sk_buff *skb;
1094 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1095 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1096 dma_addr_t mapping;
1097
1098 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1099 if (unlikely(skb == NULL))
1100 return -ENOMEM;
1101
437cf2f1 1102 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1103 PCI_DMA_FROMDEVICE);
8d8bb39b 1104 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1105 dev_kfree_skb(skb);
1106 return -ENOMEM;
1107 }
1108
1109 rx_buf->skb = skb;
1110 pci_unmap_addr_set(rx_buf, mapping, mapping);
1111
1112 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1113 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1114
1115 return 0;
1116}
1117
1118/* note that we are not allocating a new skb,
1119 * we are just moving one from cons to prod
1120 * we are not creating a new mapping,
1121 * so there is no need to check for dma_mapping_error().
1122 */
1123static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1124 struct sk_buff *skb, u16 cons, u16 prod)
1125{
1126 struct bnx2x *bp = fp->bp;
1127 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1128 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1129 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1130 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1131
1132 pci_dma_sync_single_for_device(bp->pdev,
1133 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1134 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1135
1136 prod_rx_buf->skb = cons_rx_buf->skb;
1137 pci_unmap_addr_set(prod_rx_buf, mapping,
1138 pci_unmap_addr(cons_rx_buf, mapping));
1139 *prod_bd = *cons_bd;
1140}
1141
7a9b2557
VZ
1142static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1143 u16 idx)
1144{
1145 u16 last_max = fp->last_max_sge;
1146
1147 if (SUB_S16(idx, last_max) > 0)
1148 fp->last_max_sge = idx;
1149}
1150
1151static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1152{
1153 int i, j;
1154
1155 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1156 int idx = RX_SGE_CNT * i - 1;
1157
1158 for (j = 0; j < 2; j++) {
1159 SGE_MASK_CLEAR_BIT(fp, idx);
1160 idx--;
1161 }
1162 }
1163}
1164
1165static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1166 struct eth_fast_path_rx_cqe *fp_cqe)
1167{
1168 struct bnx2x *bp = fp->bp;
4f40f2cb 1169 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1170 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1171 SGE_PAGE_SHIFT;
7a9b2557
VZ
1172 u16 last_max, last_elem, first_elem;
1173 u16 delta = 0;
1174 u16 i;
1175
1176 if (!sge_len)
1177 return;
1178
1179 /* First mark all used pages */
1180 for (i = 0; i < sge_len; i++)
1181 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1182
1183 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1184 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 /* Here we assume that the last SGE index is the biggest */
1187 prefetch((void *)(fp->sge_mask));
1188 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1189
1190 last_max = RX_SGE(fp->last_max_sge);
1191 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1192 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1193
1194 /* If ring is not full */
1195 if (last_elem + 1 != first_elem)
1196 last_elem++;
1197
1198 /* Now update the prod */
1199 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1200 if (likely(fp->sge_mask[i]))
1201 break;
1202
1203 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1204 delta += RX_SGE_MASK_ELEM_SZ;
1205 }
1206
1207 if (delta > 0) {
1208 fp->rx_sge_prod += delta;
1209 /* clear page-end entries */
1210 bnx2x_clear_sge_mask_next_elems(fp);
1211 }
1212
1213 DP(NETIF_MSG_RX_STATUS,
1214 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1215 fp->last_max_sge, fp->rx_sge_prod);
1216}
1217
1218static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1219{
1220 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1221 memset(fp->sge_mask, 0xff,
1222 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1223
33471629
EG
1224 /* Clear the two last indices in the page to 1:
1225 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1226 hence will never be indicated and should be removed from
1227 the calculations. */
1228 bnx2x_clear_sge_mask_next_elems(fp);
1229}
1230
1231static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1232 struct sk_buff *skb, u16 cons, u16 prod)
1233{
1234 struct bnx2x *bp = fp->bp;
1235 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1236 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1237 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1238 dma_addr_t mapping;
1239
1240 /* move empty skb from pool to prod and map it */
1241 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1242 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1243 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1244 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1245
1246 /* move partial skb from cons to pool (don't unmap yet) */
1247 fp->tpa_pool[queue] = *cons_rx_buf;
1248
1249 /* mark bin state as start - print error if current state != stop */
1250 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1251 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1252
1253 fp->tpa_state[queue] = BNX2X_TPA_START;
1254
1255 /* point prod_bd to new skb */
1256 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1257 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1258
1259#ifdef BNX2X_STOP_ON_ERROR
1260 fp->tpa_queue_used |= (1 << queue);
1261#ifdef __powerpc64__
1262 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1263#else
1264 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1265#endif
1266 fp->tpa_queue_used);
1267#endif
1268}
1269
1270static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1271 struct sk_buff *skb,
1272 struct eth_fast_path_rx_cqe *fp_cqe,
1273 u16 cqe_idx)
1274{
1275 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1276 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1277 u32 i, frag_len, frag_size, pages;
1278 int err;
1279 int j;
1280
1281 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1282 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1283
1284 /* This is needed in order to enable forwarding support */
1285 if (frag_size)
4f40f2cb 1286 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1287 max(frag_size, (u32)len_on_bd));
1288
1289#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1290 if (pages >
1291 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1292 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1293 pages, cqe_idx);
1294 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1295 fp_cqe->pkt_len, len_on_bd);
1296 bnx2x_panic();
1297 return -EINVAL;
1298 }
1299#endif
1300
1301 /* Run through the SGL and compose the fragmented skb */
1302 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1303 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1304
1305 /* FW gives the indices of the SGE as if the ring is an array
1306 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1307 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1308 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1309 old_rx_pg = *rx_pg;
1310
1311 /* If we fail to allocate a substitute page, we simply stop
1312 where we are and drop the whole packet */
1313 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1314 if (unlikely(err)) {
de832a55 1315 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1316 return err;
1317 }
1318
1319 /* Unmap the page as we r going to pass it to the stack */
1320 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1321 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1322
1323 /* Add one frag and update the appropriate fields in the skb */
1324 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1325
1326 skb->data_len += frag_len;
1327 skb->truesize += frag_len;
1328 skb->len += frag_len;
1329
1330 frag_size -= frag_len;
1331 }
1332
1333 return 0;
1334}
1335
1336static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1337 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1338 u16 cqe_idx)
1339{
1340 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1341 struct sk_buff *skb = rx_buf->skb;
1342 /* alloc new skb */
1343 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1344
1345 /* Unmap skb in the pool anyway, as we are going to change
1346 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1347 fails. */
1348 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1349 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1350
7a9b2557 1351 if (likely(new_skb)) {
66e855f3
YG
1352 /* fix ip xsum and give it to the stack */
1353 /* (no need to map the new skb) */
0c6671b0
EG
1354#ifdef BCM_VLAN
1355 int is_vlan_cqe =
1356 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1357 PARSING_FLAGS_VLAN);
1358 int is_not_hwaccel_vlan_cqe =
1359 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1360#endif
7a9b2557
VZ
1361
1362 prefetch(skb);
1363 prefetch(((char *)(skb)) + 128);
1364
7a9b2557
VZ
1365#ifdef BNX2X_STOP_ON_ERROR
1366 if (pad + len > bp->rx_buf_size) {
1367 BNX2X_ERR("skb_put is about to fail... "
1368 "pad %d len %d rx_buf_size %d\n",
1369 pad, len, bp->rx_buf_size);
1370 bnx2x_panic();
1371 return;
1372 }
1373#endif
1374
1375 skb_reserve(skb, pad);
1376 skb_put(skb, len);
1377
1378 skb->protocol = eth_type_trans(skb, bp->dev);
1379 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380
1381 {
1382 struct iphdr *iph;
1383
1384 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1385#ifdef BCM_VLAN
1386 /* If there is no Rx VLAN offloading -
1387 take VLAN tag into an account */
1388 if (unlikely(is_not_hwaccel_vlan_cqe))
1389 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1390#endif
7a9b2557
VZ
1391 iph->check = 0;
1392 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1393 }
1394
1395 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1396 &cqe->fast_path_cqe, cqe_idx)) {
1397#ifdef BCM_VLAN
0c6671b0
EG
1398 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1399 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1400 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1401 le16_to_cpu(cqe->fast_path_cqe.
1402 vlan_tag));
1403 else
1404#endif
1405 netif_receive_skb(skb);
1406 } else {
1407 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1408 " - dropping packet!\n");
1409 dev_kfree_skb(skb);
1410 }
1411
7a9b2557
VZ
1412
1413 /* put new skb in bin */
1414 fp->tpa_pool[queue].skb = new_skb;
1415
1416 } else {
66e855f3 1417 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1418 DP(NETIF_MSG_RX_STATUS,
1419 "Failed to allocate new skb - dropping packet!\n");
de832a55 1420 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1421 }
1422
1423 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1424}
1425
1426static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1427 struct bnx2x_fastpath *fp,
1428 u16 bd_prod, u16 rx_comp_prod,
1429 u16 rx_sge_prod)
1430{
8d9c5f34 1431 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1432 int i;
1433
1434 /* Update producers */
1435 rx_prods.bd_prod = bd_prod;
1436 rx_prods.cqe_prod = rx_comp_prod;
1437 rx_prods.sge_prod = rx_sge_prod;
1438
58f4c4cf
EG
1439 /*
1440 * Make sure that the BD and SGE data is updated before updating the
1441 * producers since FW might read the BD/SGE right after the producer
1442 * is updated.
1443 * This is only applicable for weak-ordered memory model archs such
1444 * as IA-64. The following barrier is also mandatory since FW will
1445 * assumes BDs must have buffers.
1446 */
1447 wmb();
1448
8d9c5f34
EG
1449 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1450 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1451 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1452 ((u32 *)&rx_prods)[i]);
1453
58f4c4cf
EG
1454 mmiowb(); /* keep prod updates ordered */
1455
7a9b2557 1456 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1457 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1458 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1459}
1460
a2fbb9ea
ET
1461static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1462{
1463 struct bnx2x *bp = fp->bp;
34f80b04 1464 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1465 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1466 int rx_pkt = 0;
1467
1468#ifdef BNX2X_STOP_ON_ERROR
1469 if (unlikely(bp->panic))
1470 return 0;
1471#endif
1472
34f80b04
EG
1473 /* CQ "next element" is of the size of the regular element,
1474 that's why it's ok here */
a2fbb9ea
ET
1475 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1476 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1477 hw_comp_cons++;
1478
1479 bd_cons = fp->rx_bd_cons;
1480 bd_prod = fp->rx_bd_prod;
34f80b04 1481 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1482 sw_comp_cons = fp->rx_comp_cons;
1483 sw_comp_prod = fp->rx_comp_prod;
1484
1485 /* Memory barrier necessary as speculative reads of the rx
1486 * buffer can be ahead of the index in the status block
1487 */
1488 rmb();
1489
1490 DP(NETIF_MSG_RX_STATUS,
1491 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1492 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1493
1494 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1495 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1496 struct sk_buff *skb;
1497 union eth_rx_cqe *cqe;
34f80b04
EG
1498 u8 cqe_fp_flags;
1499 u16 len, pad;
a2fbb9ea
ET
1500
1501 comp_ring_cons = RCQ_BD(sw_comp_cons);
1502 bd_prod = RX_BD(bd_prod);
1503 bd_cons = RX_BD(bd_cons);
1504
1505 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1506 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1507
a2fbb9ea 1508 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1509 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1510 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1511 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1512 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1513 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1514
1515 /* is this a slowpath msg? */
34f80b04 1516 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1517 bnx2x_sp_event(fp, cqe);
1518 goto next_cqe;
1519
1520 /* this is an rx packet */
1521 } else {
1522 rx_buf = &fp->rx_buf_ring[bd_cons];
1523 skb = rx_buf->skb;
a2fbb9ea
ET
1524 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1525 pad = cqe->fast_path_cqe.placement_offset;
1526
7a9b2557
VZ
1527 /* If CQE is marked both TPA_START and TPA_END
1528 it is a non-TPA CQE */
1529 if ((!fp->disable_tpa) &&
1530 (TPA_TYPE(cqe_fp_flags) !=
1531 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1532 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1533
1534 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1535 DP(NETIF_MSG_RX_STATUS,
1536 "calling tpa_start on queue %d\n",
1537 queue);
1538
1539 bnx2x_tpa_start(fp, queue, skb,
1540 bd_cons, bd_prod);
1541 goto next_rx;
1542 }
1543
1544 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1545 DP(NETIF_MSG_RX_STATUS,
1546 "calling tpa_stop on queue %d\n",
1547 queue);
1548
1549 if (!BNX2X_RX_SUM_FIX(cqe))
1550 BNX2X_ERR("STOP on none TCP "
1551 "data\n");
1552
1553 /* This is a size of the linear data
1554 on this skb */
1555 len = le16_to_cpu(cqe->fast_path_cqe.
1556 len_on_bd);
1557 bnx2x_tpa_stop(bp, fp, queue, pad,
1558 len, cqe, comp_ring_cons);
1559#ifdef BNX2X_STOP_ON_ERROR
1560 if (bp->panic)
17cb4006 1561 return 0;
7a9b2557
VZ
1562#endif
1563
1564 bnx2x_update_sge_prod(fp,
1565 &cqe->fast_path_cqe);
1566 goto next_cqe;
1567 }
1568 }
1569
a2fbb9ea
ET
1570 pci_dma_sync_single_for_device(bp->pdev,
1571 pci_unmap_addr(rx_buf, mapping),
1572 pad + RX_COPY_THRESH,
1573 PCI_DMA_FROMDEVICE);
1574 prefetch(skb);
1575 prefetch(((char *)(skb)) + 128);
1576
1577 /* is this an error packet? */
34f80b04 1578 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1579 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1580 "ERROR flags %x rx packet %u\n",
1581 cqe_fp_flags, sw_comp_cons);
de832a55 1582 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1583 goto reuse_rx;
1584 }
1585
1586 /* Since we don't have a jumbo ring
1587 * copy small packets if mtu > 1500
1588 */
1589 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1590 (len <= RX_COPY_THRESH)) {
1591 struct sk_buff *new_skb;
1592
1593 new_skb = netdev_alloc_skb(bp->dev,
1594 len + pad);
1595 if (new_skb == NULL) {
1596 DP(NETIF_MSG_RX_ERR,
34f80b04 1597 "ERROR packet dropped "
a2fbb9ea 1598 "because of alloc failure\n");
de832a55 1599 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1600 goto reuse_rx;
1601 }
1602
1603 /* aligned copy */
1604 skb_copy_from_linear_data_offset(skb, pad,
1605 new_skb->data + pad, len);
1606 skb_reserve(new_skb, pad);
1607 skb_put(new_skb, len);
1608
1609 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1610
1611 skb = new_skb;
1612
1613 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1614 pci_unmap_single(bp->pdev,
1615 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1616 bp->rx_buf_size,
a2fbb9ea
ET
1617 PCI_DMA_FROMDEVICE);
1618 skb_reserve(skb, pad);
1619 skb_put(skb, len);
1620
1621 } else {
1622 DP(NETIF_MSG_RX_ERR,
34f80b04 1623 "ERROR packet dropped because "
a2fbb9ea 1624 "of alloc failure\n");
de832a55 1625 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1626reuse_rx:
1627 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1628 goto next_rx;
1629 }
1630
1631 skb->protocol = eth_type_trans(skb, bp->dev);
1632
1633 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1634 if (bp->rx_csum) {
1adcd8be
EG
1635 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1636 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1637 else
de832a55 1638 fp->eth_q_stats.hw_csum_err++;
66e855f3 1639 }
a2fbb9ea
ET
1640 }
1641
748e5439 1642 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1643#ifdef BCM_VLAN
0c6671b0 1644 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1645 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1646 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1647 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1648 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1649 else
1650#endif
34f80b04 1651 netif_receive_skb(skb);
a2fbb9ea 1652
a2fbb9ea
ET
1653
1654next_rx:
1655 rx_buf->skb = NULL;
1656
1657 bd_cons = NEXT_RX_IDX(bd_cons);
1658 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1659 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1660 rx_pkt++;
a2fbb9ea
ET
1661next_cqe:
1662 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1663 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1664
34f80b04 1665 if (rx_pkt == budget)
a2fbb9ea
ET
1666 break;
1667 } /* while */
1668
1669 fp->rx_bd_cons = bd_cons;
34f80b04 1670 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1671 fp->rx_comp_cons = sw_comp_cons;
1672 fp->rx_comp_prod = sw_comp_prod;
1673
7a9b2557
VZ
1674 /* Update producers */
1675 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1676 fp->rx_sge_prod);
a2fbb9ea
ET
1677
1678 fp->rx_pkt += rx_pkt;
1679 fp->rx_calls++;
1680
1681 return rx_pkt;
1682}
1683
1684static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1685{
1686 struct bnx2x_fastpath *fp = fp_cookie;
1687 struct bnx2x *bp = fp->bp;
0626b899 1688 int index = fp->index;
a2fbb9ea 1689
da5a662a
VZ
1690 /* Return here if interrupt is disabled */
1691 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1692 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1693 return IRQ_HANDLED;
1694 }
1695
34f80b04 1696 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1697 index, fp->sb_id);
1698 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1699
1700#ifdef BNX2X_STOP_ON_ERROR
1701 if (unlikely(bp->panic))
1702 return IRQ_HANDLED;
1703#endif
1704
1705 prefetch(fp->rx_cons_sb);
1706 prefetch(fp->tx_cons_sb);
1707 prefetch(&fp->status_blk->c_status_block.status_block_index);
1708 prefetch(&fp->status_blk->u_status_block.status_block_index);
1709
288379f0 1710 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1711
a2fbb9ea
ET
1712 return IRQ_HANDLED;
1713}
1714
1715static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1716{
555f6c78 1717 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1718 u16 status = bnx2x_ack_int(bp);
34f80b04 1719 u16 mask;
a2fbb9ea 1720
34f80b04 1721 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1722 if (unlikely(status == 0)) {
1723 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1724 return IRQ_NONE;
1725 }
f5372251 1726 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1727
34f80b04 1728 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1729 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1730 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1731 return IRQ_HANDLED;
1732 }
1733
3196a88a
EG
1734#ifdef BNX2X_STOP_ON_ERROR
1735 if (unlikely(bp->panic))
1736 return IRQ_HANDLED;
1737#endif
1738
34f80b04
EG
1739 mask = 0x2 << bp->fp[0].sb_id;
1740 if (status & mask) {
a2fbb9ea
ET
1741 struct bnx2x_fastpath *fp = &bp->fp[0];
1742
1743 prefetch(fp->rx_cons_sb);
1744 prefetch(fp->tx_cons_sb);
1745 prefetch(&fp->status_blk->c_status_block.status_block_index);
1746 prefetch(&fp->status_blk->u_status_block.status_block_index);
1747
288379f0 1748 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1749
34f80b04 1750 status &= ~mask;
a2fbb9ea
ET
1751 }
1752
a2fbb9ea 1753
34f80b04 1754 if (unlikely(status & 0x1)) {
1cf167f2 1755 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1756
1757 status &= ~0x1;
1758 if (!status)
1759 return IRQ_HANDLED;
1760 }
1761
34f80b04
EG
1762 if (status)
1763 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1764 status);
a2fbb9ea 1765
c18487ee 1766 return IRQ_HANDLED;
a2fbb9ea
ET
1767}
1768
c18487ee 1769/* end of fast path */
a2fbb9ea 1770
bb2a0f7a 1771static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1772
c18487ee
YR
1773/* Link */
1774
1775/*
1776 * General service functions
1777 */
a2fbb9ea 1778
4a37fb66 1779static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1780{
1781 u32 lock_status;
1782 u32 resource_bit = (1 << resource);
4a37fb66
YG
1783 int func = BP_FUNC(bp);
1784 u32 hw_lock_control_reg;
c18487ee 1785 int cnt;
a2fbb9ea 1786
c18487ee
YR
1787 /* Validating that the resource is within range */
1788 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1789 DP(NETIF_MSG_HW,
1790 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1791 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1792 return -EINVAL;
1793 }
a2fbb9ea 1794
4a37fb66
YG
1795 if (func <= 5) {
1796 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1797 } else {
1798 hw_lock_control_reg =
1799 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1800 }
1801
c18487ee 1802 /* Validating that the resource is not already taken */
4a37fb66 1803 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1804 if (lock_status & resource_bit) {
1805 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1806 lock_status, resource_bit);
1807 return -EEXIST;
1808 }
a2fbb9ea 1809
46230476
EG
1810 /* Try for 5 second every 5ms */
1811 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1812 /* Try to acquire the lock */
4a37fb66
YG
1813 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1814 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1815 if (lock_status & resource_bit)
1816 return 0;
a2fbb9ea 1817
c18487ee 1818 msleep(5);
a2fbb9ea 1819 }
c18487ee
YR
1820 DP(NETIF_MSG_HW, "Timeout\n");
1821 return -EAGAIN;
1822}
a2fbb9ea 1823
4a37fb66 1824static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1825{
1826 u32 lock_status;
1827 u32 resource_bit = (1 << resource);
4a37fb66
YG
1828 int func = BP_FUNC(bp);
1829 u32 hw_lock_control_reg;
a2fbb9ea 1830
c18487ee
YR
1831 /* Validating that the resource is within range */
1832 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1833 DP(NETIF_MSG_HW,
1834 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1835 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1836 return -EINVAL;
1837 }
1838
4a37fb66
YG
1839 if (func <= 5) {
1840 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1841 } else {
1842 hw_lock_control_reg =
1843 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1844 }
1845
c18487ee 1846 /* Validating that the resource is currently taken */
4a37fb66 1847 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1848 if (!(lock_status & resource_bit)) {
1849 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1850 lock_status, resource_bit);
1851 return -EFAULT;
a2fbb9ea
ET
1852 }
1853
4a37fb66 1854 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1855 return 0;
1856}
1857
1858/* HW Lock for shared dual port PHYs */
4a37fb66 1859static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1860{
34f80b04 1861 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1862
46c6a674
EG
1863 if (bp->port.need_hw_lock)
1864 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1865}
a2fbb9ea 1866
4a37fb66 1867static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1868{
46c6a674
EG
1869 if (bp->port.need_hw_lock)
1870 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1871
34f80b04 1872 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1873}
a2fbb9ea 1874
4acac6a5
EG
1875int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1876{
1877 /* The GPIO should be swapped if swap register is set and active */
1878 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1879 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1880 int gpio_shift = gpio_num +
1881 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1882 u32 gpio_mask = (1 << gpio_shift);
1883 u32 gpio_reg;
1884 int value;
1885
1886 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1887 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1888 return -EINVAL;
1889 }
1890
1891 /* read GPIO value */
1892 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1893
1894 /* get the requested pin value */
1895 if ((gpio_reg & gpio_mask) == gpio_mask)
1896 value = 1;
1897 else
1898 value = 0;
1899
1900 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1901
1902 return value;
1903}
1904
17de50b7 1905int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1906{
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1909 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1910 int gpio_shift = gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912 u32 gpio_mask = (1 << gpio_shift);
1913 u32 gpio_reg;
a2fbb9ea 1914
c18487ee
YR
1915 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1916 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1917 return -EINVAL;
1918 }
a2fbb9ea 1919
4a37fb66 1920 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1921 /* read GPIO and mask except the float bits */
1922 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1923
c18487ee
YR
1924 switch (mode) {
1925 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1926 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1927 gpio_num, gpio_shift);
1928 /* clear FLOAT and set CLR */
1929 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1930 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1931 break;
a2fbb9ea 1932
c18487ee
YR
1933 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1934 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1935 gpio_num, gpio_shift);
1936 /* clear FLOAT and set SET */
1937 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1938 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1939 break;
a2fbb9ea 1940
17de50b7 1941 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1942 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1943 gpio_num, gpio_shift);
1944 /* set FLOAT */
1945 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1946 break;
a2fbb9ea 1947
c18487ee
YR
1948 default:
1949 break;
a2fbb9ea
ET
1950 }
1951
c18487ee 1952 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1953 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1954
c18487ee 1955 return 0;
a2fbb9ea
ET
1956}
1957
4acac6a5
EG
1958int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1959{
1960 /* The GPIO should be swapped if swap register is set and active */
1961 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1962 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1963 int gpio_shift = gpio_num +
1964 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1965 u32 gpio_mask = (1 << gpio_shift);
1966 u32 gpio_reg;
1967
1968 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1969 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1970 return -EINVAL;
1971 }
1972
1973 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1974 /* read GPIO int */
1975 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1976
1977 switch (mode) {
1978 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1979 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1980 "output low\n", gpio_num, gpio_shift);
1981 /* clear SET and set CLR */
1982 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1983 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1984 break;
1985
1986 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1987 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1988 "output high\n", gpio_num, gpio_shift);
1989 /* clear CLR and set SET */
1990 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1991 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1992 break;
1993
1994 default:
1995 break;
1996 }
1997
1998 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1999 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2000
2001 return 0;
2002}
2003
c18487ee 2004static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2005{
c18487ee
YR
2006 u32 spio_mask = (1 << spio_num);
2007 u32 spio_reg;
a2fbb9ea 2008
c18487ee
YR
2009 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2010 (spio_num > MISC_REGISTERS_SPIO_7)) {
2011 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2012 return -EINVAL;
a2fbb9ea
ET
2013 }
2014
4a37fb66 2015 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2016 /* read SPIO and mask except the float bits */
2017 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2018
c18487ee 2019 switch (mode) {
6378c025 2020 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2021 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2022 /* clear FLOAT and set CLR */
2023 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2024 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2025 break;
a2fbb9ea 2026
6378c025 2027 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2028 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2029 /* clear FLOAT and set SET */
2030 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2031 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2032 break;
a2fbb9ea 2033
c18487ee
YR
2034 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2035 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2036 /* set FLOAT */
2037 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2038 break;
a2fbb9ea 2039
c18487ee
YR
2040 default:
2041 break;
a2fbb9ea
ET
2042 }
2043
c18487ee 2044 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2045 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2046
a2fbb9ea
ET
2047 return 0;
2048}
2049
c18487ee 2050static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2051{
ad33ea3a
EG
2052 switch (bp->link_vars.ieee_fc &
2053 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2054 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2055 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2056 ADVERTISED_Pause);
2057 break;
356e2385 2058
c18487ee 2059 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2060 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2061 ADVERTISED_Pause);
2062 break;
356e2385 2063
c18487ee 2064 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2065 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2066 break;
356e2385 2067
c18487ee 2068 default:
34f80b04 2069 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2070 ADVERTISED_Pause);
2071 break;
2072 }
2073}
f1410647 2074
c18487ee
YR
2075static void bnx2x_link_report(struct bnx2x *bp)
2076{
2077 if (bp->link_vars.link_up) {
2078 if (bp->state == BNX2X_STATE_OPEN)
2079 netif_carrier_on(bp->dev);
2080 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2081
c18487ee 2082 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2083
c18487ee
YR
2084 if (bp->link_vars.duplex == DUPLEX_FULL)
2085 printk("full duplex");
2086 else
2087 printk("half duplex");
f1410647 2088
c0700f90
DM
2089 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2090 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2091 printk(", receive ");
356e2385
EG
2092 if (bp->link_vars.flow_ctrl &
2093 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2094 printk("& transmit ");
2095 } else {
2096 printk(", transmit ");
2097 }
2098 printk("flow control ON");
2099 }
2100 printk("\n");
f1410647 2101
c18487ee
YR
2102 } else { /* link_down */
2103 netif_carrier_off(bp->dev);
2104 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2105 }
c18487ee
YR
2106}
2107
b5bf9068 2108static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2109{
19680c48
EG
2110 if (!BP_NOMCP(bp)) {
2111 u8 rc;
a2fbb9ea 2112
19680c48 2113 /* Initialize link parameters structure variables */
8c99e7b0
YR
2114 /* It is recommended to turn off RX FC for jumbo frames
2115 for better performance */
2116 if (IS_E1HMF(bp))
c0700f90 2117 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2118 else if (bp->dev->mtu > 5000)
c0700f90 2119 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2120 else
c0700f90 2121 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2122
4a37fb66 2123 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2124
2125 if (load_mode == LOAD_DIAG)
2126 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2127
19680c48 2128 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2129
4a37fb66 2130 bnx2x_release_phy_lock(bp);
a2fbb9ea 2131
3c96c68b
EG
2132 bnx2x_calc_fc_adv(bp);
2133
b5bf9068
EG
2134 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2135 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2136 bnx2x_link_report(bp);
b5bf9068 2137 }
34f80b04 2138
19680c48
EG
2139 return rc;
2140 }
f5372251 2141 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2142 return -EINVAL;
a2fbb9ea
ET
2143}
2144
c18487ee 2145static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2146{
19680c48 2147 if (!BP_NOMCP(bp)) {
4a37fb66 2148 bnx2x_acquire_phy_lock(bp);
19680c48 2149 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2150 bnx2x_release_phy_lock(bp);
a2fbb9ea 2151
19680c48
EG
2152 bnx2x_calc_fc_adv(bp);
2153 } else
f5372251 2154 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2155}
a2fbb9ea 2156
c18487ee
YR
2157static void bnx2x__link_reset(struct bnx2x *bp)
2158{
19680c48 2159 if (!BP_NOMCP(bp)) {
4a37fb66 2160 bnx2x_acquire_phy_lock(bp);
589abe3a 2161 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2162 bnx2x_release_phy_lock(bp);
19680c48 2163 } else
f5372251 2164 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2165}
a2fbb9ea 2166
c18487ee
YR
2167static u8 bnx2x_link_test(struct bnx2x *bp)
2168{
2169 u8 rc;
a2fbb9ea 2170
4a37fb66 2171 bnx2x_acquire_phy_lock(bp);
c18487ee 2172 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2173 bnx2x_release_phy_lock(bp);
a2fbb9ea 2174
c18487ee
YR
2175 return rc;
2176}
a2fbb9ea 2177
8a1c38d1 2178static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2179{
8a1c38d1
EG
2180 u32 r_param = bp->link_vars.line_speed / 8;
2181 u32 fair_periodic_timeout_usec;
2182 u32 t_fair;
34f80b04 2183
8a1c38d1
EG
2184 memset(&(bp->cmng.rs_vars), 0,
2185 sizeof(struct rate_shaping_vars_per_port));
2186 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2187
8a1c38d1
EG
2188 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2189 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2190
8a1c38d1
EG
2191 /* this is the threshold below which no timer arming will occur
2192 1.25 coefficient is for the threshold to be a little bigger
2193 than the real time, to compensate for timer in-accuracy */
2194 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2195 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2196
8a1c38d1
EG
2197 /* resolution of fairness timer */
2198 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2199 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2200 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2201
8a1c38d1
EG
2202 /* this is the threshold below which we won't arm the timer anymore */
2203 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2204
8a1c38d1
EG
2205 /* we multiply by 1e3/8 to get bytes/msec.
2206 We don't want the credits to pass a credit
2207 of the t_fair*FAIR_MEM (algorithm resolution) */
2208 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2209 /* since each tick is 4 usec */
2210 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2211}
2212
8a1c38d1 2213static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2214{
2215 struct rate_shaping_vars_per_vn m_rs_vn;
2216 struct fairness_vars_per_vn m_fair_vn;
2217 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2218 u16 vn_min_rate, vn_max_rate;
2219 int i;
2220
2221 /* If function is hidden - set min and max to zeroes */
2222 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2223 vn_min_rate = 0;
2224 vn_max_rate = 0;
2225
2226 } else {
2227 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2228 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2229 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2230 if current min rate is zero - set it to 1.
33471629 2231 This is a requirement of the algorithm. */
8a1c38d1 2232 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2233 vn_min_rate = DEF_MIN_RATE;
2234 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2235 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2236 }
2237
8a1c38d1
EG
2238 DP(NETIF_MSG_IFUP,
2239 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2240 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2241
2242 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2243 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2244
2245 /* global vn counter - maximal Mbps for this vn */
2246 m_rs_vn.vn_counter.rate = vn_max_rate;
2247
2248 /* quota - number of bytes transmitted in this period */
2249 m_rs_vn.vn_counter.quota =
2250 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2251
8a1c38d1 2252 if (bp->vn_weight_sum) {
34f80b04
EG
2253 /* credit for each period of the fairness algorithm:
2254 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2255 vn_weight_sum should not be larger than 10000, thus
2256 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2257 than zero */
34f80b04 2258 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2259 max((u32)(vn_min_rate * (T_FAIR_COEF /
2260 (8 * bp->vn_weight_sum))),
2261 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2262 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2263 m_fair_vn.vn_credit_delta);
2264 }
2265
34f80b04
EG
2266 /* Store it to internal memory */
2267 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2268 REG_WR(bp, BAR_XSTRORM_INTMEM +
2269 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2270 ((u32 *)(&m_rs_vn))[i]);
2271
2272 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2273 REG_WR(bp, BAR_XSTRORM_INTMEM +
2274 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2275 ((u32 *)(&m_fair_vn))[i]);
2276}
2277
8a1c38d1 2278
c18487ee
YR
2279/* This function is called upon link interrupt */
2280static void bnx2x_link_attn(struct bnx2x *bp)
2281{
bb2a0f7a
YG
2282 /* Make sure that we are synced with the current statistics */
2283 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2284
c18487ee 2285 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2286
bb2a0f7a
YG
2287 if (bp->link_vars.link_up) {
2288
1c06328c
EG
2289 /* dropless flow control */
2290 if (CHIP_IS_E1H(bp)) {
2291 int port = BP_PORT(bp);
2292 u32 pause_enabled = 0;
2293
2294 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2295 pause_enabled = 1;
2296
2297 REG_WR(bp, BAR_USTRORM_INTMEM +
2298 USTORM_PAUSE_ENABLED_OFFSET(port),
2299 pause_enabled);
2300 }
2301
bb2a0f7a
YG
2302 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2303 struct host_port_stats *pstats;
2304
2305 pstats = bnx2x_sp(bp, port_stats);
2306 /* reset old bmac stats */
2307 memset(&(pstats->mac_stx[0]), 0,
2308 sizeof(struct mac_stx));
2309 }
2310 if ((bp->state == BNX2X_STATE_OPEN) ||
2311 (bp->state == BNX2X_STATE_DISABLED))
2312 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2313 }
2314
c18487ee
YR
2315 /* indicate link status */
2316 bnx2x_link_report(bp);
34f80b04
EG
2317
2318 if (IS_E1HMF(bp)) {
8a1c38d1 2319 int port = BP_PORT(bp);
34f80b04 2320 int func;
8a1c38d1 2321 int vn;
34f80b04
EG
2322
2323 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2324 if (vn == BP_E1HVN(bp))
2325 continue;
2326
8a1c38d1 2327 func = ((vn << 1) | port);
34f80b04
EG
2328
2329 /* Set the attention towards other drivers
2330 on the same port */
2331 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2332 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2333 }
34f80b04 2334
8a1c38d1
EG
2335 if (bp->link_vars.link_up) {
2336 int i;
2337
2338 /* Init rate shaping and fairness contexts */
2339 bnx2x_init_port_minmax(bp);
34f80b04 2340
34f80b04 2341 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2342 bnx2x_init_vn_minmax(bp, 2*vn + port);
2343
2344 /* Store it to internal memory */
2345 for (i = 0;
2346 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2347 REG_WR(bp, BAR_XSTRORM_INTMEM +
2348 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2349 ((u32 *)(&bp->cmng))[i]);
2350 }
34f80b04 2351 }
c18487ee 2352}
a2fbb9ea 2353
c18487ee
YR
2354static void bnx2x__link_status_update(struct bnx2x *bp)
2355{
2356 if (bp->state != BNX2X_STATE_OPEN)
2357 return;
a2fbb9ea 2358
c18487ee 2359 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2360
bb2a0f7a
YG
2361 if (bp->link_vars.link_up)
2362 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2363 else
2364 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2365
c18487ee
YR
2366 /* indicate link status */
2367 bnx2x_link_report(bp);
a2fbb9ea 2368}
a2fbb9ea 2369
34f80b04
EG
2370static void bnx2x_pmf_update(struct bnx2x *bp)
2371{
2372 int port = BP_PORT(bp);
2373 u32 val;
2374
2375 bp->port.pmf = 1;
2376 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2377
2378 /* enable nig attention */
2379 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2380 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2381 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2382
2383 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2384}
2385
c18487ee 2386/* end of Link */
a2fbb9ea
ET
2387
2388/* slow path */
2389
2390/*
2391 * General service functions
2392 */
2393
2394/* the slow path queue is odd since completions arrive on the fastpath ring */
2395static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2396 u32 data_hi, u32 data_lo, int common)
2397{
34f80b04 2398 int func = BP_FUNC(bp);
a2fbb9ea 2399
34f80b04
EG
2400 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2401 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2402 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2403 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2404 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2405
2406#ifdef BNX2X_STOP_ON_ERROR
2407 if (unlikely(bp->panic))
2408 return -EIO;
2409#endif
2410
34f80b04 2411 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2412
2413 if (!bp->spq_left) {
2414 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2415 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2416 bnx2x_panic();
2417 return -EBUSY;
2418 }
f1410647 2419
a2fbb9ea
ET
2420 /* CID needs port number to be encoded int it */
2421 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2422 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2423 HW_CID(bp, cid)));
2424 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2425 if (common)
2426 bp->spq_prod_bd->hdr.type |=
2427 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2428
2429 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2430 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2431
2432 bp->spq_left--;
2433
2434 if (bp->spq_prod_bd == bp->spq_last_bd) {
2435 bp->spq_prod_bd = bp->spq;
2436 bp->spq_prod_idx = 0;
2437 DP(NETIF_MSG_TIMER, "end of spq\n");
2438
2439 } else {
2440 bp->spq_prod_bd++;
2441 bp->spq_prod_idx++;
2442 }
2443
37dbbf32
EG
2444 /* Make sure that BD data is updated before writing the producer */
2445 wmb();
2446
34f80b04 2447 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2448 bp->spq_prod_idx);
2449
37dbbf32
EG
2450 mmiowb();
2451
34f80b04 2452 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2453 return 0;
2454}
2455
2456/* acquire split MCP access lock register */
4a37fb66 2457static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2458{
a2fbb9ea 2459 u32 i, j, val;
34f80b04 2460 int rc = 0;
a2fbb9ea
ET
2461
2462 might_sleep();
2463 i = 100;
2464 for (j = 0; j < i*10; j++) {
2465 val = (1UL << 31);
2466 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2467 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2468 if (val & (1L << 31))
2469 break;
2470
2471 msleep(5);
2472 }
a2fbb9ea 2473 if (!(val & (1L << 31))) {
19680c48 2474 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2475 rc = -EBUSY;
2476 }
2477
2478 return rc;
2479}
2480
4a37fb66
YG
2481/* release split MCP access lock register */
2482static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2483{
2484 u32 val = 0;
2485
2486 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2487}
2488
2489static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2490{
2491 struct host_def_status_block *def_sb = bp->def_status_blk;
2492 u16 rc = 0;
2493
2494 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2495 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2496 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2497 rc |= 1;
2498 }
2499 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2500 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2501 rc |= 2;
2502 }
2503 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2504 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2505 rc |= 4;
2506 }
2507 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2508 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2509 rc |= 8;
2510 }
2511 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2512 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2513 rc |= 16;
2514 }
2515 return rc;
2516}
2517
2518/*
2519 * slow path service functions
2520 */
2521
2522static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2523{
34f80b04 2524 int port = BP_PORT(bp);
5c862848
EG
2525 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2526 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2527 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2528 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2529 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2530 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2531 u32 aeu_mask;
87942b46 2532 u32 nig_mask = 0;
a2fbb9ea 2533
a2fbb9ea
ET
2534 if (bp->attn_state & asserted)
2535 BNX2X_ERR("IGU ERROR\n");
2536
3fcaf2e5
EG
2537 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2538 aeu_mask = REG_RD(bp, aeu_addr);
2539
a2fbb9ea 2540 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2541 aeu_mask, asserted);
2542 aeu_mask &= ~(asserted & 0xff);
2543 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2544
3fcaf2e5
EG
2545 REG_WR(bp, aeu_addr, aeu_mask);
2546 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2547
3fcaf2e5 2548 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2549 bp->attn_state |= asserted;
3fcaf2e5 2550 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2551
2552 if (asserted & ATTN_HARD_WIRED_MASK) {
2553 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2554
a5e9a7cf
EG
2555 bnx2x_acquire_phy_lock(bp);
2556
877e9aa4 2557 /* save nig interrupt mask */
87942b46 2558 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2559 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2560
c18487ee 2561 bnx2x_link_attn(bp);
a2fbb9ea
ET
2562
2563 /* handle unicore attn? */
2564 }
2565 if (asserted & ATTN_SW_TIMER_4_FUNC)
2566 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2567
2568 if (asserted & GPIO_2_FUNC)
2569 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2570
2571 if (asserted & GPIO_3_FUNC)
2572 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2573
2574 if (asserted & GPIO_4_FUNC)
2575 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2576
2577 if (port == 0) {
2578 if (asserted & ATTN_GENERAL_ATTN_1) {
2579 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2580 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2581 }
2582 if (asserted & ATTN_GENERAL_ATTN_2) {
2583 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2584 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2585 }
2586 if (asserted & ATTN_GENERAL_ATTN_3) {
2587 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2588 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2589 }
2590 } else {
2591 if (asserted & ATTN_GENERAL_ATTN_4) {
2592 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2593 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2594 }
2595 if (asserted & ATTN_GENERAL_ATTN_5) {
2596 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2597 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2598 }
2599 if (asserted & ATTN_GENERAL_ATTN_6) {
2600 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2601 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2602 }
2603 }
2604
2605 } /* if hardwired */
2606
5c862848
EG
2607 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2608 asserted, hc_addr);
2609 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2610
2611 /* now set back the mask */
a5e9a7cf 2612 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2613 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2614 bnx2x_release_phy_lock(bp);
2615 }
a2fbb9ea
ET
2616}
2617
fd4ef40d
EG
2618static inline void bnx2x_fan_failure(struct bnx2x *bp)
2619{
2620 int port = BP_PORT(bp);
2621
2622 /* mark the failure */
2623 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2624 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2625 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2626 bp->link_params.ext_phy_config);
2627
2628 /* log the failure */
2629 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2630 " the driver to shutdown the card to prevent permanent"
2631 " damage. Please contact Dell Support for assistance\n",
2632 bp->dev->name);
2633}
877e9aa4 2634static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2635{
34f80b04 2636 int port = BP_PORT(bp);
877e9aa4
ET
2637 int reg_offset;
2638 u32 val;
2639
34f80b04
EG
2640 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2641 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2642
34f80b04 2643 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2644
2645 val = REG_RD(bp, reg_offset);
2646 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2647 REG_WR(bp, reg_offset, val);
2648
2649 BNX2X_ERR("SPIO5 hw attention\n");
2650
fd4ef40d 2651 /* Fan failure attention */
35b19ba5
EG
2652 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2653 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2654 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2655 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2656 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2657 /* The PHY reset is controlled by GPIO 1 */
2658 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2659 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2660 break;
2661
2662 default:
2663 break;
2664 }
fd4ef40d 2665 bnx2x_fan_failure(bp);
877e9aa4 2666 }
34f80b04 2667
589abe3a
EG
2668 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2669 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2670 bnx2x_acquire_phy_lock(bp);
2671 bnx2x_handle_module_detect_int(&bp->link_params);
2672 bnx2x_release_phy_lock(bp);
2673 }
2674
34f80b04
EG
2675 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2676
2677 val = REG_RD(bp, reg_offset);
2678 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2679 REG_WR(bp, reg_offset, val);
2680
2681 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2682 (attn & HW_INTERRUT_ASSERT_SET_0));
2683 bnx2x_panic();
2684 }
877e9aa4
ET
2685}
2686
2687static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2688{
2689 u32 val;
2690
0626b899 2691 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2692
2693 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2694 BNX2X_ERR("DB hw attention 0x%x\n", val);
2695 /* DORQ discard attention */
2696 if (val & 0x2)
2697 BNX2X_ERR("FATAL error from DORQ\n");
2698 }
34f80b04
EG
2699
2700 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2701
2702 int port = BP_PORT(bp);
2703 int reg_offset;
2704
2705 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2706 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2707
2708 val = REG_RD(bp, reg_offset);
2709 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2710 REG_WR(bp, reg_offset, val);
2711
2712 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2713 (attn & HW_INTERRUT_ASSERT_SET_1));
2714 bnx2x_panic();
2715 }
877e9aa4
ET
2716}
2717
2718static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2719{
2720 u32 val;
2721
2722 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2723
2724 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2725 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2726 /* CFC error attention */
2727 if (val & 0x2)
2728 BNX2X_ERR("FATAL error from CFC\n");
2729 }
2730
2731 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2732
2733 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2734 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2735 /* RQ_USDMDP_FIFO_OVERFLOW */
2736 if (val & 0x18000)
2737 BNX2X_ERR("FATAL error from PXP\n");
2738 }
34f80b04
EG
2739
2740 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2741
2742 int port = BP_PORT(bp);
2743 int reg_offset;
2744
2745 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2746 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2747
2748 val = REG_RD(bp, reg_offset);
2749 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2750 REG_WR(bp, reg_offset, val);
2751
2752 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2753 (attn & HW_INTERRUT_ASSERT_SET_2));
2754 bnx2x_panic();
2755 }
877e9aa4
ET
2756}
2757
2758static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2759{
34f80b04
EG
2760 u32 val;
2761
877e9aa4
ET
2762 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2763
34f80b04
EG
2764 if (attn & BNX2X_PMF_LINK_ASSERT) {
2765 int func = BP_FUNC(bp);
2766
2767 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2768 bnx2x__link_status_update(bp);
2769 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2770 DRV_STATUS_PMF)
2771 bnx2x_pmf_update(bp);
2772
2773 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2774
2775 BNX2X_ERR("MC assert!\n");
2776 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2777 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2778 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2779 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2780 bnx2x_panic();
2781
2782 } else if (attn & BNX2X_MCP_ASSERT) {
2783
2784 BNX2X_ERR("MCP assert!\n");
2785 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2786 bnx2x_fw_dump(bp);
877e9aa4
ET
2787
2788 } else
2789 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2790 }
2791
2792 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2793 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2794 if (attn & BNX2X_GRC_TIMEOUT) {
2795 val = CHIP_IS_E1H(bp) ?
2796 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2797 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2798 }
2799 if (attn & BNX2X_GRC_RSV) {
2800 val = CHIP_IS_E1H(bp) ?
2801 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2802 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2803 }
877e9aa4 2804 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2805 }
2806}
2807
2808static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2809{
a2fbb9ea
ET
2810 struct attn_route attn;
2811 struct attn_route group_mask;
34f80b04 2812 int port = BP_PORT(bp);
877e9aa4 2813 int index;
a2fbb9ea
ET
2814 u32 reg_addr;
2815 u32 val;
3fcaf2e5 2816 u32 aeu_mask;
a2fbb9ea
ET
2817
2818 /* need to take HW lock because MCP or other port might also
2819 try to handle this event */
4a37fb66 2820 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2821
2822 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2823 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2824 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2825 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2826 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2827 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2828
2829 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2830 if (deasserted & (1 << index)) {
2831 group_mask = bp->attn_group[index];
2832
34f80b04
EG
2833 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2834 index, group_mask.sig[0], group_mask.sig[1],
2835 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2836
877e9aa4
ET
2837 bnx2x_attn_int_deasserted3(bp,
2838 attn.sig[3] & group_mask.sig[3]);
2839 bnx2x_attn_int_deasserted1(bp,
2840 attn.sig[1] & group_mask.sig[1]);
2841 bnx2x_attn_int_deasserted2(bp,
2842 attn.sig[2] & group_mask.sig[2]);
2843 bnx2x_attn_int_deasserted0(bp,
2844 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2845
a2fbb9ea
ET
2846 if ((attn.sig[0] & group_mask.sig[0] &
2847 HW_PRTY_ASSERT_SET_0) ||
2848 (attn.sig[1] & group_mask.sig[1] &
2849 HW_PRTY_ASSERT_SET_1) ||
2850 (attn.sig[2] & group_mask.sig[2] &
2851 HW_PRTY_ASSERT_SET_2))
6378c025 2852 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2853 }
2854 }
2855
4a37fb66 2856 bnx2x_release_alr(bp);
a2fbb9ea 2857
5c862848 2858 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2859
2860 val = ~deasserted;
3fcaf2e5
EG
2861 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2862 val, reg_addr);
5c862848 2863 REG_WR(bp, reg_addr, val);
a2fbb9ea 2864
a2fbb9ea 2865 if (~bp->attn_state & deasserted)
3fcaf2e5 2866 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2867
2868 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2869 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2870
3fcaf2e5
EG
2871 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2872 aeu_mask = REG_RD(bp, reg_addr);
2873
2874 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2875 aeu_mask, deasserted);
2876 aeu_mask |= (deasserted & 0xff);
2877 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2878
3fcaf2e5
EG
2879 REG_WR(bp, reg_addr, aeu_mask);
2880 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2881
2882 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2883 bp->attn_state &= ~deasserted;
2884 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2885}
2886
2887static void bnx2x_attn_int(struct bnx2x *bp)
2888{
2889 /* read local copy of bits */
68d59484
EG
2890 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2891 attn_bits);
2892 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2893 attn_bits_ack);
a2fbb9ea
ET
2894 u32 attn_state = bp->attn_state;
2895
2896 /* look for changed bits */
2897 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2898 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2899
2900 DP(NETIF_MSG_HW,
2901 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2902 attn_bits, attn_ack, asserted, deasserted);
2903
2904 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2905 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2906
2907 /* handle bits that were raised */
2908 if (asserted)
2909 bnx2x_attn_int_asserted(bp, asserted);
2910
2911 if (deasserted)
2912 bnx2x_attn_int_deasserted(bp, deasserted);
2913}
2914
2915static void bnx2x_sp_task(struct work_struct *work)
2916{
1cf167f2 2917 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2918 u16 status;
2919
34f80b04 2920
a2fbb9ea
ET
2921 /* Return here if interrupt is disabled */
2922 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2923 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2924 return;
2925 }
2926
2927 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2928/* if (status == 0) */
2929/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2930
3196a88a 2931 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2932
877e9aa4
ET
2933 /* HW attentions */
2934 if (status & 0x1)
a2fbb9ea 2935 bnx2x_attn_int(bp);
a2fbb9ea 2936
68d59484 2937 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2938 IGU_INT_NOP, 1);
2939 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2940 IGU_INT_NOP, 1);
2941 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2942 IGU_INT_NOP, 1);
2943 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2944 IGU_INT_NOP, 1);
2945 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2946 IGU_INT_ENABLE, 1);
877e9aa4 2947
a2fbb9ea
ET
2948}
2949
2950static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2951{
2952 struct net_device *dev = dev_instance;
2953 struct bnx2x *bp = netdev_priv(dev);
2954
2955 /* Return here if interrupt is disabled */
2956 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2957 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2958 return IRQ_HANDLED;
2959 }
2960
8d9c5f34 2961 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2962
2963#ifdef BNX2X_STOP_ON_ERROR
2964 if (unlikely(bp->panic))
2965 return IRQ_HANDLED;
2966#endif
2967
1cf167f2 2968 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2969
2970 return IRQ_HANDLED;
2971}
2972
2973/* end of slow path */
2974
2975/* Statistics */
2976
2977/****************************************************************************
2978* Macros
2979****************************************************************************/
2980
a2fbb9ea
ET
2981/* sum[hi:lo] += add[hi:lo] */
2982#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2983 do { \
2984 s_lo += a_lo; \
f5ba6772 2985 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2986 } while (0)
2987
2988/* difference = minuend - subtrahend */
2989#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2990 do { \
bb2a0f7a
YG
2991 if (m_lo < s_lo) { \
2992 /* underflow */ \
a2fbb9ea 2993 d_hi = m_hi - s_hi; \
bb2a0f7a 2994 if (d_hi > 0) { \
6378c025 2995 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2996 d_hi--; \
2997 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2998 } else { \
6378c025 2999 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3000 d_hi = 0; \
3001 d_lo = 0; \
3002 } \
bb2a0f7a
YG
3003 } else { \
3004 /* m_lo >= s_lo */ \
a2fbb9ea 3005 if (m_hi < s_hi) { \
bb2a0f7a
YG
3006 d_hi = 0; \
3007 d_lo = 0; \
3008 } else { \
6378c025 3009 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3010 d_hi = m_hi - s_hi; \
3011 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3012 } \
3013 } \
3014 } while (0)
3015
bb2a0f7a 3016#define UPDATE_STAT64(s, t) \
a2fbb9ea 3017 do { \
bb2a0f7a
YG
3018 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3019 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3020 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3021 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3022 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3023 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3024 } while (0)
3025
bb2a0f7a 3026#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3027 do { \
bb2a0f7a
YG
3028 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3029 diff.lo, new->s##_lo, old->s##_lo); \
3030 ADD_64(estats->t##_hi, diff.hi, \
3031 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3032 } while (0)
3033
3034/* sum[hi:lo] += add */
3035#define ADD_EXTEND_64(s_hi, s_lo, a) \
3036 do { \
3037 s_lo += a; \
3038 s_hi += (s_lo < a) ? 1 : 0; \
3039 } while (0)
3040
bb2a0f7a 3041#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3042 do { \
bb2a0f7a
YG
3043 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3044 pstats->mac_stx[1].s##_lo, \
3045 new->s); \
a2fbb9ea
ET
3046 } while (0)
3047
bb2a0f7a 3048#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3049 do { \
4781bfad
EG
3050 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3051 old_tclient->s = tclient->s; \
de832a55
EG
3052 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3053 } while (0)
3054
3055#define UPDATE_EXTEND_USTAT(s, t) \
3056 do { \
3057 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3058 old_uclient->s = uclient->s; \
3059 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3060 } while (0)
3061
3062#define UPDATE_EXTEND_XSTAT(s, t) \
3063 do { \
4781bfad
EG
3064 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3065 old_xclient->s = xclient->s; \
de832a55
EG
3066 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3067 } while (0)
3068
3069/* minuend -= subtrahend */
3070#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3071 do { \
3072 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3073 } while (0)
3074
3075/* minuend[hi:lo] -= subtrahend */
3076#define SUB_EXTEND_64(m_hi, m_lo, s) \
3077 do { \
3078 SUB_64(m_hi, 0, m_lo, s); \
3079 } while (0)
3080
3081#define SUB_EXTEND_USTAT(s, t) \
3082 do { \
3083 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3084 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3085 } while (0)
3086
3087/*
3088 * General service functions
3089 */
3090
3091static inline long bnx2x_hilo(u32 *hiref)
3092{
3093 u32 lo = *(hiref + 1);
3094#if (BITS_PER_LONG == 64)
3095 u32 hi = *hiref;
3096
3097 return HILO_U64(hi, lo);
3098#else
3099 return lo;
3100#endif
3101}
3102
3103/*
3104 * Init service functions
3105 */
3106
bb2a0f7a
YG
3107static void bnx2x_storm_stats_post(struct bnx2x *bp)
3108{
3109 if (!bp->stats_pending) {
3110 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3111 int i, rc;
bb2a0f7a
YG
3112
3113 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3114 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3115 for_each_queue(bp, i)
3116 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3117
3118 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3119 ((u32 *)&ramrod_data)[1],
3120 ((u32 *)&ramrod_data)[0], 0);
3121 if (rc == 0) {
3122 /* stats ramrod has it's own slot on the spq */
3123 bp->spq_left++;
3124 bp->stats_pending = 1;
3125 }
3126 }
3127}
3128
3129static void bnx2x_stats_init(struct bnx2x *bp)
3130{
3131 int port = BP_PORT(bp);
de832a55 3132 int i;
bb2a0f7a 3133
de832a55 3134 bp->stats_pending = 0;
bb2a0f7a
YG
3135 bp->executer_idx = 0;
3136 bp->stats_counter = 0;
3137
3138 /* port stats */
3139 if (!BP_NOMCP(bp))
3140 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3141 else
3142 bp->port.port_stx = 0;
3143 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3144
3145 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3146 bp->port.old_nig_stats.brb_discard =
3147 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3148 bp->port.old_nig_stats.brb_truncate =
3149 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3150 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3151 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3152 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3153 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3154
3155 /* function stats */
de832a55
EG
3156 for_each_queue(bp, i) {
3157 struct bnx2x_fastpath *fp = &bp->fp[i];
3158
3159 memset(&fp->old_tclient, 0,
3160 sizeof(struct tstorm_per_client_stats));
3161 memset(&fp->old_uclient, 0,
3162 sizeof(struct ustorm_per_client_stats));
3163 memset(&fp->old_xclient, 0,
3164 sizeof(struct xstorm_per_client_stats));
3165 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3166 }
3167
bb2a0f7a 3168 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3169 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3170
3171 bp->stats_state = STATS_STATE_DISABLED;
3172 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3173 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3174}
3175
3176static void bnx2x_hw_stats_post(struct bnx2x *bp)
3177{
3178 struct dmae_command *dmae = &bp->stats_dmae;
3179 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180
3181 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3182 if (CHIP_REV_IS_SLOW(bp))
3183 return;
bb2a0f7a
YG
3184
3185 /* loader */
3186 if (bp->executer_idx) {
3187 int loader_idx = PMF_DMAE_C(bp);
3188
3189 memset(dmae, 0, sizeof(struct dmae_command));
3190
3191 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3192 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3193 DMAE_CMD_DST_RESET |
3194#ifdef __BIG_ENDIAN
3195 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3196#else
3197 DMAE_CMD_ENDIANITY_DW_SWAP |
3198#endif
3199 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3200 DMAE_CMD_PORT_0) |
3201 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3202 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3203 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3204 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3205 sizeof(struct dmae_command) *
3206 (loader_idx + 1)) >> 2;
3207 dmae->dst_addr_hi = 0;
3208 dmae->len = sizeof(struct dmae_command) >> 2;
3209 if (CHIP_IS_E1(bp))
3210 dmae->len--;
3211 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3212 dmae->comp_addr_hi = 0;
3213 dmae->comp_val = 1;
3214
3215 *stats_comp = 0;
3216 bnx2x_post_dmae(bp, dmae, loader_idx);
3217
3218 } else if (bp->func_stx) {
3219 *stats_comp = 0;
3220 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3221 }
3222}
3223
3224static int bnx2x_stats_comp(struct bnx2x *bp)
3225{
3226 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3227 int cnt = 10;
3228
3229 might_sleep();
3230 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3231 if (!cnt) {
3232 BNX2X_ERR("timeout waiting for stats finished\n");
3233 break;
3234 }
3235 cnt--;
12469401 3236 msleep(1);
bb2a0f7a
YG
3237 }
3238 return 1;
3239}
3240
3241/*
3242 * Statistics service functions
3243 */
3244
3245static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3246{
3247 struct dmae_command *dmae;
3248 u32 opcode;
3249 int loader_idx = PMF_DMAE_C(bp);
3250 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3251
3252 /* sanity */
3253 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3254 BNX2X_ERR("BUG!\n");
3255 return;
3256 }
3257
3258 bp->executer_idx = 0;
3259
3260 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3261 DMAE_CMD_C_ENABLE |
3262 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3263#ifdef __BIG_ENDIAN
3264 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3265#else
3266 DMAE_CMD_ENDIANITY_DW_SWAP |
3267#endif
3268 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3269 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3270
3271 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3272 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3273 dmae->src_addr_lo = bp->port.port_stx >> 2;
3274 dmae->src_addr_hi = 0;
3275 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3276 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3277 dmae->len = DMAE_LEN32_RD_MAX;
3278 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3279 dmae->comp_addr_hi = 0;
3280 dmae->comp_val = 1;
3281
3282 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3283 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3284 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3285 dmae->src_addr_hi = 0;
7a9b2557
VZ
3286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3287 DMAE_LEN32_RD_MAX * 4);
3288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3289 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3290 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3293 dmae->comp_val = DMAE_COMP_VAL;
3294
3295 *stats_comp = 0;
3296 bnx2x_hw_stats_post(bp);
3297 bnx2x_stats_comp(bp);
3298}
3299
3300static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3301{
3302 struct dmae_command *dmae;
34f80b04 3303 int port = BP_PORT(bp);
bb2a0f7a 3304 int vn = BP_E1HVN(bp);
a2fbb9ea 3305 u32 opcode;
bb2a0f7a 3306 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3307 u32 mac_addr;
bb2a0f7a
YG
3308 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3309
3310 /* sanity */
3311 if (!bp->link_vars.link_up || !bp->port.pmf) {
3312 BNX2X_ERR("BUG!\n");
3313 return;
3314 }
a2fbb9ea
ET
3315
3316 bp->executer_idx = 0;
bb2a0f7a
YG
3317
3318 /* MCP */
3319 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3320 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3321 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3322#ifdef __BIG_ENDIAN
bb2a0f7a 3323 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3324#else
bb2a0f7a 3325 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3326#endif
bb2a0f7a
YG
3327 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3328 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3329
bb2a0f7a 3330 if (bp->port.port_stx) {
a2fbb9ea
ET
3331
3332 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3333 dmae->opcode = opcode;
bb2a0f7a
YG
3334 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3335 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3336 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3337 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3338 dmae->len = sizeof(struct host_port_stats) >> 2;
3339 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3340 dmae->comp_addr_hi = 0;
3341 dmae->comp_val = 1;
a2fbb9ea
ET
3342 }
3343
bb2a0f7a
YG
3344 if (bp->func_stx) {
3345
3346 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3347 dmae->opcode = opcode;
3348 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3349 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3350 dmae->dst_addr_lo = bp->func_stx >> 2;
3351 dmae->dst_addr_hi = 0;
3352 dmae->len = sizeof(struct host_func_stats) >> 2;
3353 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3354 dmae->comp_addr_hi = 0;
3355 dmae->comp_val = 1;
a2fbb9ea
ET
3356 }
3357
bb2a0f7a 3358 /* MAC */
a2fbb9ea
ET
3359 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3360 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3361 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3362#ifdef __BIG_ENDIAN
3363 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3364#else
3365 DMAE_CMD_ENDIANITY_DW_SWAP |
3366#endif
bb2a0f7a
YG
3367 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3368 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3369
c18487ee 3370 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3371
3372 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3373 NIG_REG_INGRESS_BMAC0_MEM);
3374
3375 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3376 BIGMAC_REGISTER_TX_STAT_GTBYT */
3377 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3378 dmae->opcode = opcode;
3379 dmae->src_addr_lo = (mac_addr +
3380 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3381 dmae->src_addr_hi = 0;
3382 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3383 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3384 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3385 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3386 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3387 dmae->comp_addr_hi = 0;
3388 dmae->comp_val = 1;
3389
3390 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3391 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3392 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3393 dmae->opcode = opcode;
3394 dmae->src_addr_lo = (mac_addr +
3395 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3396 dmae->src_addr_hi = 0;
3397 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3398 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3399 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3400 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3401 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3402 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3404 dmae->comp_addr_hi = 0;
3405 dmae->comp_val = 1;
3406
c18487ee 3407 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3408
3409 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3410
3411 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3412 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3413 dmae->opcode = opcode;
3414 dmae->src_addr_lo = (mac_addr +
3415 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3416 dmae->src_addr_hi = 0;
3417 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3418 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3419 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3420 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3421 dmae->comp_addr_hi = 0;
3422 dmae->comp_val = 1;
3423
3424 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3425 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3426 dmae->opcode = opcode;
3427 dmae->src_addr_lo = (mac_addr +
3428 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3429 dmae->src_addr_hi = 0;
3430 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3431 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3432 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3433 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3434 dmae->len = 1;
3435 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3436 dmae->comp_addr_hi = 0;
3437 dmae->comp_val = 1;
3438
3439 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3440 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3441 dmae->opcode = opcode;
3442 dmae->src_addr_lo = (mac_addr +
3443 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3444 dmae->src_addr_hi = 0;
3445 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3446 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3447 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3448 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3449 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3450 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3451 dmae->comp_addr_hi = 0;
3452 dmae->comp_val = 1;
3453 }
3454
3455 /* NIG */
bb2a0f7a
YG
3456 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3457 dmae->opcode = opcode;
3458 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3459 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3460 dmae->src_addr_hi = 0;
3461 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3462 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3463 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3464 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3465 dmae->comp_addr_hi = 0;
3466 dmae->comp_val = 1;
3467
3468 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3469 dmae->opcode = opcode;
3470 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3471 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3472 dmae->src_addr_hi = 0;
3473 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3474 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3475 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3476 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3477 dmae->len = (2*sizeof(u32)) >> 2;
3478 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3479 dmae->comp_addr_hi = 0;
3480 dmae->comp_val = 1;
3481
a2fbb9ea
ET
3482 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3483 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3484 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3485 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3486#ifdef __BIG_ENDIAN
3487 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3488#else
3489 DMAE_CMD_ENDIANITY_DW_SWAP |
3490#endif
bb2a0f7a
YG
3491 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3492 (vn << DMAE_CMD_E1HVN_SHIFT));
3493 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3494 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3495 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3496 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3497 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3498 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3499 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3500 dmae->len = (2*sizeof(u32)) >> 2;
3501 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3502 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3503 dmae->comp_val = DMAE_COMP_VAL;
3504
3505 *stats_comp = 0;
a2fbb9ea
ET
3506}
3507
bb2a0f7a 3508static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3509{
bb2a0f7a
YG
3510 struct dmae_command *dmae = &bp->stats_dmae;
3511 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3512
bb2a0f7a
YG
3513 /* sanity */
3514 if (!bp->func_stx) {
3515 BNX2X_ERR("BUG!\n");
3516 return;
3517 }
a2fbb9ea 3518
bb2a0f7a
YG
3519 bp->executer_idx = 0;
3520 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3521
bb2a0f7a
YG
3522 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3523 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3524 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3525#ifdef __BIG_ENDIAN
3526 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3527#else
3528 DMAE_CMD_ENDIANITY_DW_SWAP |
3529#endif
3530 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3531 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3532 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3533 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3534 dmae->dst_addr_lo = bp->func_stx >> 2;
3535 dmae->dst_addr_hi = 0;
3536 dmae->len = sizeof(struct host_func_stats) >> 2;
3537 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3538 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3539 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3540
bb2a0f7a
YG
3541 *stats_comp = 0;
3542}
a2fbb9ea 3543
bb2a0f7a
YG
3544static void bnx2x_stats_start(struct bnx2x *bp)
3545{
3546 if (bp->port.pmf)
3547 bnx2x_port_stats_init(bp);
3548
3549 else if (bp->func_stx)
3550 bnx2x_func_stats_init(bp);
3551
3552 bnx2x_hw_stats_post(bp);
3553 bnx2x_storm_stats_post(bp);
3554}
3555
3556static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3557{
3558 bnx2x_stats_comp(bp);
3559 bnx2x_stats_pmf_update(bp);
3560 bnx2x_stats_start(bp);
3561}
3562
3563static void bnx2x_stats_restart(struct bnx2x *bp)
3564{
3565 bnx2x_stats_comp(bp);
3566 bnx2x_stats_start(bp);
3567}
3568
3569static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3570{
3571 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3572 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3573 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3574 struct {
3575 u32 lo;
3576 u32 hi;
3577 } diff;
bb2a0f7a
YG
3578
3579 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3580 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3581 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3582 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3583 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3584 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3585 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3586 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3587 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3588 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3589 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3590 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3591 UPDATE_STAT64(tx_stat_gt127,
3592 tx_stat_etherstatspkts65octetsto127octets);
3593 UPDATE_STAT64(tx_stat_gt255,
3594 tx_stat_etherstatspkts128octetsto255octets);
3595 UPDATE_STAT64(tx_stat_gt511,
3596 tx_stat_etherstatspkts256octetsto511octets);
3597 UPDATE_STAT64(tx_stat_gt1023,
3598 tx_stat_etherstatspkts512octetsto1023octets);
3599 UPDATE_STAT64(tx_stat_gt1518,
3600 tx_stat_etherstatspkts1024octetsto1522octets);
3601 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3602 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3603 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3604 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3605 UPDATE_STAT64(tx_stat_gterr,
3606 tx_stat_dot3statsinternalmactransmiterrors);
3607 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3608
3609 estats->pause_frames_received_hi =
3610 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3611 estats->pause_frames_received_lo =
3612 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3613
3614 estats->pause_frames_sent_hi =
3615 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3616 estats->pause_frames_sent_lo =
3617 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3618}
3619
3620static void bnx2x_emac_stats_update(struct bnx2x *bp)
3621{
3622 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3623 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3624 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3625
3626 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3627 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3628 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3629 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3630 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3631 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3632 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3633 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3634 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3635 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3636 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3637 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3638 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3639 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3640 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3641 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3642 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3643 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3644 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3645 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3646 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3647 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3648 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3649 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3650 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3651 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3652 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3653 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3654 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3655 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3656 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3657
3658 estats->pause_frames_received_hi =
3659 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3660 estats->pause_frames_received_lo =
3661 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3662 ADD_64(estats->pause_frames_received_hi,
3663 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3664 estats->pause_frames_received_lo,
3665 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3666
3667 estats->pause_frames_sent_hi =
3668 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3669 estats->pause_frames_sent_lo =
3670 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3671 ADD_64(estats->pause_frames_sent_hi,
3672 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3673 estats->pause_frames_sent_lo,
3674 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3675}
3676
3677static int bnx2x_hw_stats_update(struct bnx2x *bp)
3678{
3679 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3680 struct nig_stats *old = &(bp->port.old_nig_stats);
3681 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3682 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3683 struct {
3684 u32 lo;
3685 u32 hi;
3686 } diff;
de832a55 3687 u32 nig_timer_max;
bb2a0f7a
YG
3688
3689 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3690 bnx2x_bmac_stats_update(bp);
3691
3692 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3693 bnx2x_emac_stats_update(bp);
3694
3695 else { /* unreached */
c3eefaf6 3696 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3697 return -1;
3698 }
a2fbb9ea 3699
bb2a0f7a
YG
3700 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3701 new->brb_discard - old->brb_discard);
66e855f3
YG
3702 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3703 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3704
bb2a0f7a
YG
3705 UPDATE_STAT64_NIG(egress_mac_pkt0,
3706 etherstatspkts1024octetsto1522octets);
3707 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3708
bb2a0f7a 3709 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3710
bb2a0f7a
YG
3711 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3712 sizeof(struct mac_stx));
3713 estats->brb_drop_hi = pstats->brb_drop_hi;
3714 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3715
bb2a0f7a 3716 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3717
de832a55
EG
3718 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3719 if (nig_timer_max != estats->nig_timer_max) {
3720 estats->nig_timer_max = nig_timer_max;
3721 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3722 }
3723
bb2a0f7a 3724 return 0;
a2fbb9ea
ET
3725}
3726
bb2a0f7a 3727static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3728{
3729 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3730 struct tstorm_per_port_stats *tport =
de832a55 3731 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3732 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3733 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3734 int i;
3735
3736 memset(&(fstats->total_bytes_received_hi), 0,
3737 sizeof(struct host_func_stats) - 2*sizeof(u32));
3738 estats->error_bytes_received_hi = 0;
3739 estats->error_bytes_received_lo = 0;
3740 estats->etherstatsoverrsizepkts_hi = 0;
3741 estats->etherstatsoverrsizepkts_lo = 0;
3742 estats->no_buff_discard_hi = 0;
3743 estats->no_buff_discard_lo = 0;
a2fbb9ea 3744
de832a55
EG
3745 for_each_queue(bp, i) {
3746 struct bnx2x_fastpath *fp = &bp->fp[i];
3747 int cl_id = fp->cl_id;
3748 struct tstorm_per_client_stats *tclient =
3749 &stats->tstorm_common.client_statistics[cl_id];
3750 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3751 struct ustorm_per_client_stats *uclient =
3752 &stats->ustorm_common.client_statistics[cl_id];
3753 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3754 struct xstorm_per_client_stats *xclient =
3755 &stats->xstorm_common.client_statistics[cl_id];
3756 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3757 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3758 u32 diff;
3759
3760 /* are storm stats valid? */
3761 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3762 bp->stats_counter) {
de832a55
EG
3763 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3764 " xstorm counter (%d) != stats_counter (%d)\n",
3765 i, xclient->stats_counter, bp->stats_counter);
3766 return -1;
3767 }
3768 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3769 bp->stats_counter) {
de832a55
EG
3770 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3771 " tstorm counter (%d) != stats_counter (%d)\n",
3772 i, tclient->stats_counter, bp->stats_counter);
3773 return -2;
3774 }
3775 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3776 bp->stats_counter) {
3777 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3778 " ustorm counter (%d) != stats_counter (%d)\n",
3779 i, uclient->stats_counter, bp->stats_counter);
3780 return -4;
3781 }
a2fbb9ea 3782
de832a55
EG
3783 qstats->total_bytes_received_hi =
3784 qstats->valid_bytes_received_hi =
a2fbb9ea 3785 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3786 qstats->total_bytes_received_lo =
3787 qstats->valid_bytes_received_lo =
a2fbb9ea 3788 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3789
de832a55 3790 qstats->error_bytes_received_hi =
bb2a0f7a 3791 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3792 qstats->error_bytes_received_lo =
bb2a0f7a 3793 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3794
de832a55
EG
3795 ADD_64(qstats->total_bytes_received_hi,
3796 qstats->error_bytes_received_hi,
3797 qstats->total_bytes_received_lo,
3798 qstats->error_bytes_received_lo);
3799
3800 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3801 total_unicast_packets_received);
3802 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3803 total_multicast_packets_received);
3804 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3805 total_broadcast_packets_received);
3806 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3807 etherstatsoverrsizepkts);
3808 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3809
3810 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3811 total_unicast_packets_received);
3812 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3813 total_multicast_packets_received);
3814 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3815 total_broadcast_packets_received);
3816 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3817 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3818 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3819
3820 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3821 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3822 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3823 le32_to_cpu(xclient->total_sent_bytes.lo);
3824
de832a55
EG
3825 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3826 total_unicast_packets_transmitted);
3827 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3828 total_multicast_packets_transmitted);
3829 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3830 total_broadcast_packets_transmitted);
3831
3832 old_tclient->checksum_discard = tclient->checksum_discard;
3833 old_tclient->ttl0_discard = tclient->ttl0_discard;
3834
3835 ADD_64(fstats->total_bytes_received_hi,
3836 qstats->total_bytes_received_hi,
3837 fstats->total_bytes_received_lo,
3838 qstats->total_bytes_received_lo);
3839 ADD_64(fstats->total_bytes_transmitted_hi,
3840 qstats->total_bytes_transmitted_hi,
3841 fstats->total_bytes_transmitted_lo,
3842 qstats->total_bytes_transmitted_lo);
3843 ADD_64(fstats->total_unicast_packets_received_hi,
3844 qstats->total_unicast_packets_received_hi,
3845 fstats->total_unicast_packets_received_lo,
3846 qstats->total_unicast_packets_received_lo);
3847 ADD_64(fstats->total_multicast_packets_received_hi,
3848 qstats->total_multicast_packets_received_hi,
3849 fstats->total_multicast_packets_received_lo,
3850 qstats->total_multicast_packets_received_lo);
3851 ADD_64(fstats->total_broadcast_packets_received_hi,
3852 qstats->total_broadcast_packets_received_hi,
3853 fstats->total_broadcast_packets_received_lo,
3854 qstats->total_broadcast_packets_received_lo);
3855 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3856 qstats->total_unicast_packets_transmitted_hi,
3857 fstats->total_unicast_packets_transmitted_lo,
3858 qstats->total_unicast_packets_transmitted_lo);
3859 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3860 qstats->total_multicast_packets_transmitted_hi,
3861 fstats->total_multicast_packets_transmitted_lo,
3862 qstats->total_multicast_packets_transmitted_lo);
3863 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3864 qstats->total_broadcast_packets_transmitted_hi,
3865 fstats->total_broadcast_packets_transmitted_lo,
3866 qstats->total_broadcast_packets_transmitted_lo);
3867 ADD_64(fstats->valid_bytes_received_hi,
3868 qstats->valid_bytes_received_hi,
3869 fstats->valid_bytes_received_lo,
3870 qstats->valid_bytes_received_lo);
3871
3872 ADD_64(estats->error_bytes_received_hi,
3873 qstats->error_bytes_received_hi,
3874 estats->error_bytes_received_lo,
3875 qstats->error_bytes_received_lo);
3876 ADD_64(estats->etherstatsoverrsizepkts_hi,
3877 qstats->etherstatsoverrsizepkts_hi,
3878 estats->etherstatsoverrsizepkts_lo,
3879 qstats->etherstatsoverrsizepkts_lo);
3880 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3881 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3882 }
3883
3884 ADD_64(fstats->total_bytes_received_hi,
3885 estats->rx_stat_ifhcinbadoctets_hi,
3886 fstats->total_bytes_received_lo,
3887 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3888
3889 memcpy(estats, &(fstats->total_bytes_received_hi),
3890 sizeof(struct host_func_stats) - 2*sizeof(u32));
3891
de832a55
EG
3892 ADD_64(estats->etherstatsoverrsizepkts_hi,
3893 estats->rx_stat_dot3statsframestoolong_hi,
3894 estats->etherstatsoverrsizepkts_lo,
3895 estats->rx_stat_dot3statsframestoolong_lo);
3896 ADD_64(estats->error_bytes_received_hi,
3897 estats->rx_stat_ifhcinbadoctets_hi,
3898 estats->error_bytes_received_lo,
3899 estats->rx_stat_ifhcinbadoctets_lo);
3900
3901 if (bp->port.pmf) {
3902 estats->mac_filter_discard =
3903 le32_to_cpu(tport->mac_filter_discard);
3904 estats->xxoverflow_discard =
3905 le32_to_cpu(tport->xxoverflow_discard);
3906 estats->brb_truncate_discard =
bb2a0f7a 3907 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3908 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3909 }
bb2a0f7a
YG
3910
3911 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3912
de832a55
EG
3913 bp->stats_pending = 0;
3914
a2fbb9ea
ET
3915 return 0;
3916}
3917
bb2a0f7a 3918static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3919{
bb2a0f7a 3920 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3921 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3922 int i;
a2fbb9ea
ET
3923
3924 nstats->rx_packets =
3925 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3926 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3927 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3928
3929 nstats->tx_packets =
3930 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3931 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3932 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3933
de832a55 3934 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3935
0e39e645 3936 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3937
de832a55
EG
3938 nstats->rx_dropped = estats->mac_discard;
3939 for_each_queue(bp, i)
3940 nstats->rx_dropped +=
3941 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3942
a2fbb9ea
ET
3943 nstats->tx_dropped = 0;
3944
3945 nstats->multicast =
de832a55 3946 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3947
bb2a0f7a 3948 nstats->collisions =
de832a55 3949 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3950
3951 nstats->rx_length_errors =
de832a55
EG
3952 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3953 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3954 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3955 bnx2x_hilo(&estats->brb_truncate_hi);
3956 nstats->rx_crc_errors =
3957 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3958 nstats->rx_frame_errors =
3959 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3960 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3961 nstats->rx_missed_errors = estats->xxoverflow_discard;
3962
3963 nstats->rx_errors = nstats->rx_length_errors +
3964 nstats->rx_over_errors +
3965 nstats->rx_crc_errors +
3966 nstats->rx_frame_errors +
0e39e645
ET
3967 nstats->rx_fifo_errors +
3968 nstats->rx_missed_errors;
a2fbb9ea 3969
bb2a0f7a 3970 nstats->tx_aborted_errors =
de832a55
EG
3971 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3972 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3973 nstats->tx_carrier_errors =
3974 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3975 nstats->tx_fifo_errors = 0;
3976 nstats->tx_heartbeat_errors = 0;
3977 nstats->tx_window_errors = 0;
3978
3979 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3980 nstats->tx_carrier_errors +
3981 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3982}
3983
3984static void bnx2x_drv_stats_update(struct bnx2x *bp)
3985{
3986 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3987 int i;
3988
3989 estats->driver_xoff = 0;
3990 estats->rx_err_discard_pkt = 0;
3991 estats->rx_skb_alloc_failed = 0;
3992 estats->hw_csum_err = 0;
3993 for_each_queue(bp, i) {
3994 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3995
3996 estats->driver_xoff += qstats->driver_xoff;
3997 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3998 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3999 estats->hw_csum_err += qstats->hw_csum_err;
4000 }
a2fbb9ea
ET
4001}
4002
bb2a0f7a 4003static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4004{
bb2a0f7a 4005 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4006
bb2a0f7a
YG
4007 if (*stats_comp != DMAE_COMP_VAL)
4008 return;
4009
4010 if (bp->port.pmf)
de832a55 4011 bnx2x_hw_stats_update(bp);
a2fbb9ea 4012
de832a55
EG
4013 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4014 BNX2X_ERR("storm stats were not updated for 3 times\n");
4015 bnx2x_panic();
4016 return;
a2fbb9ea
ET
4017 }
4018
de832a55
EG
4019 bnx2x_net_stats_update(bp);
4020 bnx2x_drv_stats_update(bp);
4021
a2fbb9ea 4022 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
4023 struct tstorm_per_client_stats *old_tclient =
4024 &bp->fp->old_tclient;
4025 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4026 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4027 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4028 int i;
a2fbb9ea
ET
4029
4030 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4031 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4032 " tx pkt (%lx)\n",
4033 bnx2x_tx_avail(bp->fp),
7a9b2557 4034 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4035 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4036 " rx pkt (%lx)\n",
7a9b2557
VZ
4037 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4038 bp->fp->rx_comp_cons),
4039 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4040 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4041 "brb truncate %u\n",
4042 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4043 qstats->driver_xoff,
4044 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4045 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4046 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4047 "mac_discard %u mac_filter_discard %u "
4048 "xxovrflow_discard %u brb_truncate_discard %u "
4049 "ttl0_discard %u\n",
4781bfad 4050 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4051 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4052 bnx2x_hilo(&qstats->no_buff_discard_hi),
4053 estats->mac_discard, estats->mac_filter_discard,
4054 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4055 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4056
4057 for_each_queue(bp, i) {
4058 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4059 bnx2x_fp(bp, i, tx_pkt),
4060 bnx2x_fp(bp, i, rx_pkt),
4061 bnx2x_fp(bp, i, rx_calls));
4062 }
4063 }
4064
bb2a0f7a
YG
4065 bnx2x_hw_stats_post(bp);
4066 bnx2x_storm_stats_post(bp);
4067}
a2fbb9ea 4068
bb2a0f7a
YG
4069static void bnx2x_port_stats_stop(struct bnx2x *bp)
4070{
4071 struct dmae_command *dmae;
4072 u32 opcode;
4073 int loader_idx = PMF_DMAE_C(bp);
4074 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4075
bb2a0f7a 4076 bp->executer_idx = 0;
a2fbb9ea 4077
bb2a0f7a
YG
4078 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4079 DMAE_CMD_C_ENABLE |
4080 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4081#ifdef __BIG_ENDIAN
bb2a0f7a 4082 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4083#else
bb2a0f7a 4084 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4085#endif
bb2a0f7a
YG
4086 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4087 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4088
4089 if (bp->port.port_stx) {
4090
4091 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4092 if (bp->func_stx)
4093 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4094 else
4095 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4096 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4097 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4098 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4099 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4100 dmae->len = sizeof(struct host_port_stats) >> 2;
4101 if (bp->func_stx) {
4102 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4103 dmae->comp_addr_hi = 0;
4104 dmae->comp_val = 1;
4105 } else {
4106 dmae->comp_addr_lo =
4107 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4108 dmae->comp_addr_hi =
4109 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4110 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4111
bb2a0f7a
YG
4112 *stats_comp = 0;
4113 }
a2fbb9ea
ET
4114 }
4115
bb2a0f7a
YG
4116 if (bp->func_stx) {
4117
4118 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4119 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4120 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4121 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4122 dmae->dst_addr_lo = bp->func_stx >> 2;
4123 dmae->dst_addr_hi = 0;
4124 dmae->len = sizeof(struct host_func_stats) >> 2;
4125 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4126 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4127 dmae->comp_val = DMAE_COMP_VAL;
4128
4129 *stats_comp = 0;
a2fbb9ea 4130 }
bb2a0f7a
YG
4131}
4132
4133static void bnx2x_stats_stop(struct bnx2x *bp)
4134{
4135 int update = 0;
4136
4137 bnx2x_stats_comp(bp);
4138
4139 if (bp->port.pmf)
4140 update = (bnx2x_hw_stats_update(bp) == 0);
4141
4142 update |= (bnx2x_storm_stats_update(bp) == 0);
4143
4144 if (update) {
4145 bnx2x_net_stats_update(bp);
a2fbb9ea 4146
bb2a0f7a
YG
4147 if (bp->port.pmf)
4148 bnx2x_port_stats_stop(bp);
4149
4150 bnx2x_hw_stats_post(bp);
4151 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4152 }
4153}
4154
bb2a0f7a
YG
4155static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4156{
4157}
4158
4159static const struct {
4160 void (*action)(struct bnx2x *bp);
4161 enum bnx2x_stats_state next_state;
4162} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4163/* state event */
4164{
4165/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4166/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4167/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4168/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4169},
4170{
4171/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4172/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4173/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4174/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4175}
4176};
4177
4178static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4179{
4180 enum bnx2x_stats_state state = bp->stats_state;
4181
4182 bnx2x_stats_stm[state][event].action(bp);
4183 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4184
4185 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4186 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4187 state, event, bp->stats_state);
4188}
4189
a2fbb9ea
ET
4190static void bnx2x_timer(unsigned long data)
4191{
4192 struct bnx2x *bp = (struct bnx2x *) data;
4193
4194 if (!netif_running(bp->dev))
4195 return;
4196
4197 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4198 goto timer_restart;
a2fbb9ea
ET
4199
4200 if (poll) {
4201 struct bnx2x_fastpath *fp = &bp->fp[0];
4202 int rc;
4203
7961f791 4204 bnx2x_tx_int(fp);
a2fbb9ea
ET
4205 rc = bnx2x_rx_int(fp, 1000);
4206 }
4207
34f80b04
EG
4208 if (!BP_NOMCP(bp)) {
4209 int func = BP_FUNC(bp);
a2fbb9ea
ET
4210 u32 drv_pulse;
4211 u32 mcp_pulse;
4212
4213 ++bp->fw_drv_pulse_wr_seq;
4214 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4215 /* TBD - add SYSTEM_TIME */
4216 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4217 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4218
34f80b04 4219 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4220 MCP_PULSE_SEQ_MASK);
4221 /* The delta between driver pulse and mcp response
4222 * should be 1 (before mcp response) or 0 (after mcp response)
4223 */
4224 if ((drv_pulse != mcp_pulse) &&
4225 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4226 /* someone lost a heartbeat... */
4227 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4228 drv_pulse, mcp_pulse);
4229 }
4230 }
4231
bb2a0f7a
YG
4232 if ((bp->state == BNX2X_STATE_OPEN) ||
4233 (bp->state == BNX2X_STATE_DISABLED))
4234 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4235
f1410647 4236timer_restart:
a2fbb9ea
ET
4237 mod_timer(&bp->timer, jiffies + bp->current_interval);
4238}
4239
4240/* end of Statistics */
4241
4242/* nic init */
4243
4244/*
4245 * nic init service functions
4246 */
4247
34f80b04 4248static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4249{
34f80b04
EG
4250 int port = BP_PORT(bp);
4251
490c3c9b 4252 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04 4253 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4254 sizeof(struct ustorm_status_block)/4);
490c3c9b 4255 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04 4256 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4257 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4258}
4259
5c862848
EG
4260static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4261 dma_addr_t mapping, int sb_id)
34f80b04
EG
4262{
4263 int port = BP_PORT(bp);
bb2a0f7a 4264 int func = BP_FUNC(bp);
a2fbb9ea 4265 int index;
34f80b04 4266 u64 section;
a2fbb9ea
ET
4267
4268 /* USTORM */
4269 section = ((u64)mapping) + offsetof(struct host_status_block,
4270 u_status_block);
34f80b04 4271 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4272
4273 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4274 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4275 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4276 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4277 U64_HI(section));
bb2a0f7a
YG
4278 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4279 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4280
4281 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4282 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4283 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4284
4285 /* CSTORM */
4286 section = ((u64)mapping) + offsetof(struct host_status_block,
4287 c_status_block);
34f80b04 4288 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4289
4290 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4291 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4292 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4293 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4294 U64_HI(section));
7a9b2557
VZ
4295 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4296 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4297
4298 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4299 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4300 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4301
4302 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4303}
4304
4305static void bnx2x_zero_def_sb(struct bnx2x *bp)
4306{
4307 int func = BP_FUNC(bp);
a2fbb9ea 4308
490c3c9b
EG
4309 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4310 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4311 sizeof(struct tstorm_def_status_block)/4);
4312 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04
EG
4313 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4314 sizeof(struct ustorm_def_status_block)/4);
490c3c9b 4315 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04
EG
4316 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4317 sizeof(struct cstorm_def_status_block)/4);
490c3c9b 4318 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
34f80b04
EG
4319 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4320 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4321}
4322
4323static void bnx2x_init_def_sb(struct bnx2x *bp,
4324 struct host_def_status_block *def_sb,
34f80b04 4325 dma_addr_t mapping, int sb_id)
a2fbb9ea 4326{
34f80b04
EG
4327 int port = BP_PORT(bp);
4328 int func = BP_FUNC(bp);
a2fbb9ea
ET
4329 int index, val, reg_offset;
4330 u64 section;
4331
4332 /* ATTN */
4333 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4334 atten_status_block);
34f80b04 4335 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4336
49d66772
ET
4337 bp->attn_state = 0;
4338
a2fbb9ea
ET
4339 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4340 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4341
34f80b04 4342 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4343 bp->attn_group[index].sig[0] = REG_RD(bp,
4344 reg_offset + 0x10*index);
4345 bp->attn_group[index].sig[1] = REG_RD(bp,
4346 reg_offset + 0x4 + 0x10*index);
4347 bp->attn_group[index].sig[2] = REG_RD(bp,
4348 reg_offset + 0x8 + 0x10*index);
4349 bp->attn_group[index].sig[3] = REG_RD(bp,
4350 reg_offset + 0xc + 0x10*index);
4351 }
4352
a2fbb9ea
ET
4353 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4354 HC_REG_ATTN_MSG0_ADDR_L);
4355
4356 REG_WR(bp, reg_offset, U64_LO(section));
4357 REG_WR(bp, reg_offset + 4, U64_HI(section));
4358
4359 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4360
4361 val = REG_RD(bp, reg_offset);
34f80b04 4362 val |= sb_id;
a2fbb9ea
ET
4363 REG_WR(bp, reg_offset, val);
4364
4365 /* USTORM */
4366 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4367 u_def_status_block);
34f80b04 4368 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4369
4370 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4371 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4372 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4373 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4374 U64_HI(section));
5c862848 4375 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4376 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4377
4378 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4379 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4380 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4381
4382 /* CSTORM */
4383 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4384 c_def_status_block);
34f80b04 4385 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4386
4387 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4388 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4389 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4390 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4391 U64_HI(section));
5c862848 4392 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4393 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4394
4395 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4396 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4397 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4398
4399 /* TSTORM */
4400 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4401 t_def_status_block);
34f80b04 4402 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4403
4404 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4405 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4406 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4407 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4408 U64_HI(section));
5c862848 4409 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4410 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4411
4412 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4413 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4414 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4415
4416 /* XSTORM */
4417 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4418 x_def_status_block);
34f80b04 4419 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4420
4421 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4422 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4423 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4424 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4425 U64_HI(section));
5c862848 4426 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4427 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4428
4429 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4430 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4431 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4432
bb2a0f7a 4433 bp->stats_pending = 0;
66e855f3 4434 bp->set_mac_pending = 0;
bb2a0f7a 4435
34f80b04 4436 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4437}
4438
4439static void bnx2x_update_coalesce(struct bnx2x *bp)
4440{
34f80b04 4441 int port = BP_PORT(bp);
a2fbb9ea
ET
4442 int i;
4443
4444 for_each_queue(bp, i) {
34f80b04 4445 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4446
4447 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4448 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4449 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4450 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4451 bp->rx_ticks/12);
a2fbb9ea 4452 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4453 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4454 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4455 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4456
4457 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4458 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4459 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4460 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4461 bp->tx_ticks/12);
a2fbb9ea 4462 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4463 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4464 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4465 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4466 }
4467}
4468
7a9b2557
VZ
4469static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4470 struct bnx2x_fastpath *fp, int last)
4471{
4472 int i;
4473
4474 for (i = 0; i < last; i++) {
4475 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4476 struct sk_buff *skb = rx_buf->skb;
4477
4478 if (skb == NULL) {
4479 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4480 continue;
4481 }
4482
4483 if (fp->tpa_state[i] == BNX2X_TPA_START)
4484 pci_unmap_single(bp->pdev,
4485 pci_unmap_addr(rx_buf, mapping),
356e2385 4486 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4487
4488 dev_kfree_skb(skb);
4489 rx_buf->skb = NULL;
4490 }
4491}
4492
a2fbb9ea
ET
4493static void bnx2x_init_rx_rings(struct bnx2x *bp)
4494{
7a9b2557 4495 int func = BP_FUNC(bp);
32626230
EG
4496 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4497 ETH_MAX_AGGREGATION_QUEUES_E1H;
4498 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4499 int i, j;
a2fbb9ea 4500
87942b46 4501 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4502 DP(NETIF_MSG_IFUP,
4503 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4504
7a9b2557 4505 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4506
555f6c78 4507 for_each_rx_queue(bp, j) {
32626230 4508 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4509
32626230 4510 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4511 fp->tpa_pool[i].skb =
4512 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4513 if (!fp->tpa_pool[i].skb) {
4514 BNX2X_ERR("Failed to allocate TPA "
4515 "skb pool for queue[%d] - "
4516 "disabling TPA on this "
4517 "queue!\n", j);
4518 bnx2x_free_tpa_pool(bp, fp, i);
4519 fp->disable_tpa = 1;
4520 break;
4521 }
4522 pci_unmap_addr_set((struct sw_rx_bd *)
4523 &bp->fp->tpa_pool[i],
4524 mapping, 0);
4525 fp->tpa_state[i] = BNX2X_TPA_STOP;
4526 }
4527 }
4528 }
4529
555f6c78 4530 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4531 struct bnx2x_fastpath *fp = &bp->fp[j];
4532
4533 fp->rx_bd_cons = 0;
4534 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4535 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4536
4537 /* "next page" elements initialization */
4538 /* SGE ring */
4539 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4540 struct eth_rx_sge *sge;
4541
4542 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4543 sge->addr_hi =
4544 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4545 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4546 sge->addr_lo =
4547 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4548 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4549 }
4550
4551 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4552
7a9b2557 4553 /* RX BD ring */
a2fbb9ea
ET
4554 for (i = 1; i <= NUM_RX_RINGS; i++) {
4555 struct eth_rx_bd *rx_bd;
4556
4557 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4558 rx_bd->addr_hi =
4559 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4560 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4561 rx_bd->addr_lo =
4562 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4563 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4564 }
4565
34f80b04 4566 /* CQ ring */
a2fbb9ea
ET
4567 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4568 struct eth_rx_cqe_next_page *nextpg;
4569
4570 nextpg = (struct eth_rx_cqe_next_page *)
4571 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4572 nextpg->addr_hi =
4573 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4574 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4575 nextpg->addr_lo =
4576 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4577 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4578 }
4579
7a9b2557
VZ
4580 /* Allocate SGEs and initialize the ring elements */
4581 for (i = 0, ring_prod = 0;
4582 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4583
7a9b2557
VZ
4584 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4585 BNX2X_ERR("was only able to allocate "
4586 "%d rx sges\n", i);
4587 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4588 /* Cleanup already allocated elements */
4589 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4590 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4591 fp->disable_tpa = 1;
4592 ring_prod = 0;
4593 break;
4594 }
4595 ring_prod = NEXT_SGE_IDX(ring_prod);
4596 }
4597 fp->rx_sge_prod = ring_prod;
4598
4599 /* Allocate BDs and initialize BD ring */
66e855f3 4600 fp->rx_comp_cons = 0;
7a9b2557 4601 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4602 for (i = 0; i < bp->rx_ring_size; i++) {
4603 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4604 BNX2X_ERR("was only able to allocate "
de832a55
EG
4605 "%d rx skbs on queue[%d]\n", i, j);
4606 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4607 break;
4608 }
4609 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4610 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4611 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4612 }
4613
7a9b2557
VZ
4614 fp->rx_bd_prod = ring_prod;
4615 /* must not have more available CQEs than BDs */
4616 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4617 cqe_ring_prod);
a2fbb9ea
ET
4618 fp->rx_pkt = fp->rx_calls = 0;
4619
7a9b2557
VZ
4620 /* Warning!
4621 * this will generate an interrupt (to the TSTORM)
4622 * must only be done after chip is initialized
4623 */
4624 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4625 fp->rx_sge_prod);
a2fbb9ea
ET
4626 if (j != 0)
4627 continue;
4628
4629 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4630 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4631 U64_LO(fp->rx_comp_mapping));
4632 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4633 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4634 U64_HI(fp->rx_comp_mapping));
4635 }
4636}
4637
4638static void bnx2x_init_tx_ring(struct bnx2x *bp)
4639{
4640 int i, j;
4641
555f6c78 4642 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4643 struct bnx2x_fastpath *fp = &bp->fp[j];
4644
4645 for (i = 1; i <= NUM_TX_RINGS; i++) {
4646 struct eth_tx_bd *tx_bd =
4647 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4648
4649 tx_bd->addr_hi =
4650 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4651 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4652 tx_bd->addr_lo =
4653 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4654 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4655 }
4656
4657 fp->tx_pkt_prod = 0;
4658 fp->tx_pkt_cons = 0;
4659 fp->tx_bd_prod = 0;
4660 fp->tx_bd_cons = 0;
4661 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4662 fp->tx_pkt = 0;
4663 }
4664}
4665
4666static void bnx2x_init_sp_ring(struct bnx2x *bp)
4667{
34f80b04 4668 int func = BP_FUNC(bp);
a2fbb9ea
ET
4669
4670 spin_lock_init(&bp->spq_lock);
4671
4672 bp->spq_left = MAX_SPQ_PENDING;
4673 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4674 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4675 bp->spq_prod_bd = bp->spq;
4676 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4677
34f80b04 4678 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4679 U64_LO(bp->spq_mapping));
34f80b04
EG
4680 REG_WR(bp,
4681 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4682 U64_HI(bp->spq_mapping));
4683
34f80b04 4684 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4685 bp->spq_prod_idx);
4686}
4687
4688static void bnx2x_init_context(struct bnx2x *bp)
4689{
4690 int i;
4691
4692 for_each_queue(bp, i) {
4693 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4694 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4695 u8 cl_id = fp->cl_id;
0626b899 4696 u8 sb_id = fp->sb_id;
a2fbb9ea 4697
34f80b04
EG
4698 context->ustorm_st_context.common.sb_index_numbers =
4699 BNX2X_RX_SB_INDEX_NUM;
0626b899 4700 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4701 context->ustorm_st_context.common.status_block_id = sb_id;
4702 context->ustorm_st_context.common.flags =
de832a55
EG
4703 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4704 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4705 context->ustorm_st_context.common.statistics_counter_id =
4706 cl_id;
8d9c5f34 4707 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4708 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4709 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4710 bp->rx_buf_size;
34f80b04 4711 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4712 U64_HI(fp->rx_desc_mapping);
34f80b04 4713 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4714 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4715 if (!fp->disable_tpa) {
4716 context->ustorm_st_context.common.flags |=
4717 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4718 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4719 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4720 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4721 (u32)0xffff);
7a9b2557
VZ
4722 context->ustorm_st_context.common.sge_page_base_hi =
4723 U64_HI(fp->rx_sge_mapping);
4724 context->ustorm_st_context.common.sge_page_base_lo =
4725 U64_LO(fp->rx_sge_mapping);
4726 }
4727
8d9c5f34
EG
4728 context->ustorm_ag_context.cdu_usage =
4729 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4730 CDU_REGION_NUMBER_UCM_AG,
4731 ETH_CONNECTION_TYPE);
4732
4733 context->xstorm_st_context.tx_bd_page_base_hi =
4734 U64_HI(fp->tx_desc_mapping);
4735 context->xstorm_st_context.tx_bd_page_base_lo =
4736 U64_LO(fp->tx_desc_mapping);
4737 context->xstorm_st_context.db_data_addr_hi =
4738 U64_HI(fp->tx_prods_mapping);
4739 context->xstorm_st_context.db_data_addr_lo =
4740 U64_LO(fp->tx_prods_mapping);
0626b899 4741 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4742 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4743 context->cstorm_st_context.sb_index_number =
5c862848 4744 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4745 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4746
4747 context->xstorm_ag_context.cdu_reserved =
4748 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4749 CDU_REGION_NUMBER_XCM_AG,
4750 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4751 }
4752}
4753
4754static void bnx2x_init_ind_table(struct bnx2x *bp)
4755{
26c8fa4d 4756 int func = BP_FUNC(bp);
a2fbb9ea
ET
4757 int i;
4758
555f6c78 4759 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4760 return;
4761
555f6c78
EG
4762 DP(NETIF_MSG_IFUP,
4763 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4764 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4765 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4766 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4767 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4768}
4769
49d66772
ET
4770static void bnx2x_set_client_config(struct bnx2x *bp)
4771{
49d66772 4772 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4773 int port = BP_PORT(bp);
4774 int i;
49d66772 4775
e7799c5f 4776 tstorm_client.mtu = bp->dev->mtu;
49d66772 4777 tstorm_client.config_flags =
de832a55
EG
4778 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4779 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4780#ifdef BCM_VLAN
0c6671b0 4781 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4782 tstorm_client.config_flags |=
8d9c5f34 4783 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4784 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4785 }
4786#endif
49d66772 4787
7a9b2557
VZ
4788 if (bp->flags & TPA_ENABLE_FLAG) {
4789 tstorm_client.max_sges_for_packet =
4f40f2cb 4790 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4791 tstorm_client.max_sges_for_packet =
4792 ((tstorm_client.max_sges_for_packet +
4793 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4794 PAGES_PER_SGE_SHIFT;
4795
4796 tstorm_client.config_flags |=
4797 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4798 }
4799
49d66772 4800 for_each_queue(bp, i) {
de832a55
EG
4801 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4802
49d66772 4803 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4804 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4805 ((u32 *)&tstorm_client)[0]);
4806 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4807 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4808 ((u32 *)&tstorm_client)[1]);
4809 }
4810
34f80b04
EG
4811 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4812 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4813}
4814
a2fbb9ea
ET
4815static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4816{
a2fbb9ea 4817 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4818 int mode = bp->rx_mode;
4819 int mask = (1 << BP_L_ID(bp));
4820 int func = BP_FUNC(bp);
a2fbb9ea
ET
4821 int i;
4822
3196a88a 4823 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4824
4825 switch (mode) {
4826 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4827 tstorm_mac_filter.ucast_drop_all = mask;
4828 tstorm_mac_filter.mcast_drop_all = mask;
4829 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4830 break;
356e2385 4831
a2fbb9ea 4832 case BNX2X_RX_MODE_NORMAL:
34f80b04 4833 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4834 break;
356e2385 4835
a2fbb9ea 4836 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4837 tstorm_mac_filter.mcast_accept_all = mask;
4838 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4839 break;
356e2385 4840
a2fbb9ea 4841 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4842 tstorm_mac_filter.ucast_accept_all = mask;
4843 tstorm_mac_filter.mcast_accept_all = mask;
4844 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4845 break;
356e2385 4846
a2fbb9ea 4847 default:
34f80b04
EG
4848 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4849 break;
a2fbb9ea
ET
4850 }
4851
4852 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4853 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4854 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4855 ((u32 *)&tstorm_mac_filter)[i]);
4856
34f80b04 4857/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4858 ((u32 *)&tstorm_mac_filter)[i]); */
4859 }
a2fbb9ea 4860
49d66772
ET
4861 if (mode != BNX2X_RX_MODE_NONE)
4862 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4863}
4864
471de716
EG
4865static void bnx2x_init_internal_common(struct bnx2x *bp)
4866{
4867 int i;
4868
3cdf1db7
YG
4869 if (bp->flags & TPA_ENABLE_FLAG) {
4870 struct tstorm_eth_tpa_exist tpa = {0};
4871
4872 tpa.tpa_exist = 1;
4873
4874 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4875 ((u32 *)&tpa)[0]);
4876 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4877 ((u32 *)&tpa)[1]);
4878 }
4879
471de716
EG
4880 /* Zero this manually as its initialization is
4881 currently missing in the initTool */
4882 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4883 REG_WR(bp, BAR_USTRORM_INTMEM +
4884 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4885}
4886
4887static void bnx2x_init_internal_port(struct bnx2x *bp)
4888{
4889 int port = BP_PORT(bp);
4890
4891 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4892 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4893 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4894 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4895}
4896
8a1c38d1
EG
4897/* Calculates the sum of vn_min_rates.
4898 It's needed for further normalizing of the min_rates.
4899 Returns:
4900 sum of vn_min_rates.
4901 or
4902 0 - if all the min_rates are 0.
4903 In the later case fainess algorithm should be deactivated.
4904 If not all min_rates are zero then those that are zeroes will be set to 1.
4905 */
4906static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4907{
4908 int all_zero = 1;
4909 int port = BP_PORT(bp);
4910 int vn;
4911
4912 bp->vn_weight_sum = 0;
4913 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4914 int func = 2*vn + port;
4915 u32 vn_cfg =
4916 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4917 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4918 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4919
4920 /* Skip hidden vns */
4921 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4922 continue;
4923
4924 /* If min rate is zero - set it to 1 */
4925 if (!vn_min_rate)
4926 vn_min_rate = DEF_MIN_RATE;
4927 else
4928 all_zero = 0;
4929
4930 bp->vn_weight_sum += vn_min_rate;
4931 }
4932
4933 /* ... only if all min rates are zeros - disable fairness */
4934 if (all_zero)
4935 bp->vn_weight_sum = 0;
4936}
4937
471de716 4938static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4939{
a2fbb9ea
ET
4940 struct tstorm_eth_function_common_config tstorm_config = {0};
4941 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4942 int port = BP_PORT(bp);
4943 int func = BP_FUNC(bp);
de832a55
EG
4944 int i, j;
4945 u32 offset;
471de716 4946 u16 max_agg_size;
a2fbb9ea
ET
4947
4948 if (is_multi(bp)) {
555f6c78 4949 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4950 tstorm_config.rss_result_mask = MULTI_MASK;
4951 }
8d9c5f34
EG
4952 if (IS_E1HMF(bp))
4953 tstorm_config.config_flags |=
4954 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4955
34f80b04
EG
4956 tstorm_config.leading_client_id = BP_L_ID(bp);
4957
a2fbb9ea 4958 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4959 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4960 (*(u32 *)&tstorm_config));
4961
c14423fe 4962 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4963 bnx2x_set_storm_rx_mode(bp);
4964
de832a55
EG
4965 for_each_queue(bp, i) {
4966 u8 cl_id = bp->fp[i].cl_id;
4967
4968 /* reset xstorm per client statistics */
4969 offset = BAR_XSTRORM_INTMEM +
4970 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4971 for (j = 0;
4972 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4973 REG_WR(bp, offset + j*4, 0);
4974
4975 /* reset tstorm per client statistics */
4976 offset = BAR_TSTRORM_INTMEM +
4977 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4978 for (j = 0;
4979 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4980 REG_WR(bp, offset + j*4, 0);
4981
4982 /* reset ustorm per client statistics */
4983 offset = BAR_USTRORM_INTMEM +
4984 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4985 for (j = 0;
4986 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4987 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4988 }
4989
4990 /* Init statistics related context */
34f80b04 4991 stats_flags.collect_eth = 1;
a2fbb9ea 4992
66e855f3 4993 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4994 ((u32 *)&stats_flags)[0]);
66e855f3 4995 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4996 ((u32 *)&stats_flags)[1]);
4997
66e855f3 4998 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4999 ((u32 *)&stats_flags)[0]);
66e855f3 5000 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5001 ((u32 *)&stats_flags)[1]);
5002
de832a55
EG
5003 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5004 ((u32 *)&stats_flags)[0]);
5005 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5006 ((u32 *)&stats_flags)[1]);
5007
66e855f3 5008 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5009 ((u32 *)&stats_flags)[0]);
66e855f3 5010 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5011 ((u32 *)&stats_flags)[1]);
5012
66e855f3
YG
5013 REG_WR(bp, BAR_XSTRORM_INTMEM +
5014 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5015 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5016 REG_WR(bp, BAR_XSTRORM_INTMEM +
5017 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5018 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5019
5020 REG_WR(bp, BAR_TSTRORM_INTMEM +
5021 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5022 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5023 REG_WR(bp, BAR_TSTRORM_INTMEM +
5024 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5025 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5026
de832a55
EG
5027 REG_WR(bp, BAR_USTRORM_INTMEM +
5028 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5029 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5030 REG_WR(bp, BAR_USTRORM_INTMEM +
5031 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5032 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5033
34f80b04
EG
5034 if (CHIP_IS_E1H(bp)) {
5035 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5036 IS_E1HMF(bp));
5037 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5038 IS_E1HMF(bp));
5039 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5040 IS_E1HMF(bp));
5041 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5042 IS_E1HMF(bp));
5043
7a9b2557
VZ
5044 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5045 bp->e1hov);
34f80b04
EG
5046 }
5047
4f40f2cb
EG
5048 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5049 max_agg_size =
5050 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5051 SGE_PAGE_SIZE * PAGES_PER_SGE),
5052 (u32)0xffff);
555f6c78 5053 for_each_rx_queue(bp, i) {
7a9b2557 5054 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5055
5056 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5057 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5058 U64_LO(fp->rx_comp_mapping));
5059 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5060 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5061 U64_HI(fp->rx_comp_mapping));
5062
7a9b2557 5063 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5064 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5065 max_agg_size);
5066 }
8a1c38d1 5067
1c06328c
EG
5068 /* dropless flow control */
5069 if (CHIP_IS_E1H(bp)) {
5070 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5071
5072 rx_pause.bd_thr_low = 250;
5073 rx_pause.cqe_thr_low = 250;
5074 rx_pause.cos = 1;
5075 rx_pause.sge_thr_low = 0;
5076 rx_pause.bd_thr_high = 350;
5077 rx_pause.cqe_thr_high = 350;
5078 rx_pause.sge_thr_high = 0;
5079
5080 for_each_rx_queue(bp, i) {
5081 struct bnx2x_fastpath *fp = &bp->fp[i];
5082
5083 if (!fp->disable_tpa) {
5084 rx_pause.sge_thr_low = 150;
5085 rx_pause.sge_thr_high = 250;
5086 }
5087
5088
5089 offset = BAR_USTRORM_INTMEM +
5090 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5091 fp->cl_id);
5092 for (j = 0;
5093 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5094 j++)
5095 REG_WR(bp, offset + j*4,
5096 ((u32 *)&rx_pause)[j]);
5097 }
5098 }
5099
8a1c38d1
EG
5100 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5101
5102 /* Init rate shaping and fairness contexts */
5103 if (IS_E1HMF(bp)) {
5104 int vn;
5105
5106 /* During init there is no active link
5107 Until link is up, set link rate to 10Gbps */
5108 bp->link_vars.line_speed = SPEED_10000;
5109 bnx2x_init_port_minmax(bp);
5110
5111 bnx2x_calc_vn_weight_sum(bp);
5112
5113 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5114 bnx2x_init_vn_minmax(bp, 2*vn + port);
5115
5116 /* Enable rate shaping and fairness */
5117 bp->cmng.flags.cmng_enables =
5118 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5119 if (bp->vn_weight_sum)
5120 bp->cmng.flags.cmng_enables |=
5121 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5122 else
5123 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5124 " fairness will be disabled\n");
5125 } else {
5126 /* rate shaping and fairness are disabled */
5127 DP(NETIF_MSG_IFUP,
5128 "single function mode minmax will be disabled\n");
5129 }
5130
5131
5132 /* Store it to internal memory */
5133 if (bp->port.pmf)
5134 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5135 REG_WR(bp, BAR_XSTRORM_INTMEM +
5136 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5137 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5138}
5139
471de716
EG
5140static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5141{
5142 switch (load_code) {
5143 case FW_MSG_CODE_DRV_LOAD_COMMON:
5144 bnx2x_init_internal_common(bp);
5145 /* no break */
5146
5147 case FW_MSG_CODE_DRV_LOAD_PORT:
5148 bnx2x_init_internal_port(bp);
5149 /* no break */
5150
5151 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5152 bnx2x_init_internal_func(bp);
5153 break;
5154
5155 default:
5156 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5157 break;
5158 }
5159}
5160
5161static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5162{
5163 int i;
5164
5165 for_each_queue(bp, i) {
5166 struct bnx2x_fastpath *fp = &bp->fp[i];
5167
34f80b04 5168 fp->bp = bp;
a2fbb9ea 5169 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5170 fp->index = i;
34f80b04
EG
5171 fp->cl_id = BP_L_ID(bp) + i;
5172 fp->sb_id = fp->cl_id;
5173 DP(NETIF_MSG_IFUP,
f5372251
EG
5174 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5175 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5176 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5177 fp->sb_id);
5c862848 5178 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5179 }
5180
16119785
EG
5181 /* ensure status block indices were read */
5182 rmb();
5183
5184
5c862848
EG
5185 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5186 DEF_SB_ID);
5187 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5188 bnx2x_update_coalesce(bp);
5189 bnx2x_init_rx_rings(bp);
5190 bnx2x_init_tx_ring(bp);
5191 bnx2x_init_sp_ring(bp);
5192 bnx2x_init_context(bp);
471de716 5193 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5194 bnx2x_init_ind_table(bp);
0ef00459
EG
5195 bnx2x_stats_init(bp);
5196
5197 /* At this point, we are ready for interrupts */
5198 atomic_set(&bp->intr_sem, 0);
5199
5200 /* flush all before enabling interrupts */
5201 mb();
5202 mmiowb();
5203
615f8fd9 5204 bnx2x_int_enable(bp);
eb8da205
EG
5205
5206 /* Check for SPIO5 */
5207 bnx2x_attn_int_deasserted0(bp,
5208 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5209 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5210}
5211
5212/* end of nic init */
5213
5214/*
5215 * gzip service functions
5216 */
5217
5218static int bnx2x_gunzip_init(struct bnx2x *bp)
5219{
5220 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5221 &bp->gunzip_mapping);
5222 if (bp->gunzip_buf == NULL)
5223 goto gunzip_nomem1;
5224
5225 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5226 if (bp->strm == NULL)
5227 goto gunzip_nomem2;
5228
5229 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5230 GFP_KERNEL);
5231 if (bp->strm->workspace == NULL)
5232 goto gunzip_nomem3;
5233
5234 return 0;
5235
5236gunzip_nomem3:
5237 kfree(bp->strm);
5238 bp->strm = NULL;
5239
5240gunzip_nomem2:
5241 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5242 bp->gunzip_mapping);
5243 bp->gunzip_buf = NULL;
5244
5245gunzip_nomem1:
5246 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5247 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5248 return -ENOMEM;
5249}
5250
5251static void bnx2x_gunzip_end(struct bnx2x *bp)
5252{
5253 kfree(bp->strm->workspace);
5254
5255 kfree(bp->strm);
5256 bp->strm = NULL;
5257
5258 if (bp->gunzip_buf) {
5259 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5260 bp->gunzip_mapping);
5261 bp->gunzip_buf = NULL;
5262 }
5263}
5264
94a78b79 5265static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5266{
5267 int n, rc;
5268
5269 /* check gzip header */
94a78b79
VZ
5270 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5271 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5272 return -EINVAL;
94a78b79 5273 }
a2fbb9ea
ET
5274
5275 n = 10;
5276
34f80b04 5277#define FNAME 0x8
a2fbb9ea
ET
5278
5279 if (zbuf[3] & FNAME)
5280 while ((zbuf[n++] != 0) && (n < len));
5281
94a78b79 5282 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5283 bp->strm->avail_in = len - n;
5284 bp->strm->next_out = bp->gunzip_buf;
5285 bp->strm->avail_out = FW_BUF_SIZE;
5286
5287 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5288 if (rc != Z_OK)
5289 return rc;
5290
5291 rc = zlib_inflate(bp->strm, Z_FINISH);
5292 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5293 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5294 bp->dev->name, bp->strm->msg);
5295
5296 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5297 if (bp->gunzip_outlen & 0x3)
5298 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5299 " gunzip_outlen (%d) not aligned\n",
5300 bp->dev->name, bp->gunzip_outlen);
5301 bp->gunzip_outlen >>= 2;
5302
5303 zlib_inflateEnd(bp->strm);
5304
5305 if (rc == Z_STREAM_END)
5306 return 0;
5307
5308 return rc;
5309}
5310
5311/* nic load/unload */
5312
5313/*
34f80b04 5314 * General service functions
a2fbb9ea
ET
5315 */
5316
5317/* send a NIG loopback debug packet */
5318static void bnx2x_lb_pckt(struct bnx2x *bp)
5319{
a2fbb9ea 5320 u32 wb_write[3];
a2fbb9ea
ET
5321
5322 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5323 wb_write[0] = 0x55555555;
5324 wb_write[1] = 0x55555555;
34f80b04 5325 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5326 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5327
5328 /* NON-IP protocol */
a2fbb9ea
ET
5329 wb_write[0] = 0x09000000;
5330 wb_write[1] = 0x55555555;
34f80b04 5331 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5332 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5333}
5334
5335/* some of the internal memories
5336 * are not directly readable from the driver
5337 * to test them we send debug packets
5338 */
5339static int bnx2x_int_mem_test(struct bnx2x *bp)
5340{
5341 int factor;
5342 int count, i;
5343 u32 val = 0;
5344
ad8d3948 5345 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5346 factor = 120;
ad8d3948
EG
5347 else if (CHIP_REV_IS_EMUL(bp))
5348 factor = 200;
5349 else
a2fbb9ea 5350 factor = 1;
a2fbb9ea
ET
5351
5352 DP(NETIF_MSG_HW, "start part1\n");
5353
5354 /* Disable inputs of parser neighbor blocks */
5355 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5356 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5357 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5358 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5359
5360 /* Write 0 to parser credits for CFC search request */
5361 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5362
5363 /* send Ethernet packet */
5364 bnx2x_lb_pckt(bp);
5365
5366 /* TODO do i reset NIG statistic? */
5367 /* Wait until NIG register shows 1 packet of size 0x10 */
5368 count = 1000 * factor;
5369 while (count) {
34f80b04 5370
a2fbb9ea
ET
5371 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5372 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5373 if (val == 0x10)
5374 break;
5375
5376 msleep(10);
5377 count--;
5378 }
5379 if (val != 0x10) {
5380 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5381 return -1;
5382 }
5383
5384 /* Wait until PRS register shows 1 packet */
5385 count = 1000 * factor;
5386 while (count) {
5387 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5388 if (val == 1)
5389 break;
5390
5391 msleep(10);
5392 count--;
5393 }
5394 if (val != 0x1) {
5395 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5396 return -2;
5397 }
5398
5399 /* Reset and init BRB, PRS */
34f80b04 5400 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5401 msleep(50);
34f80b04 5402 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5403 msleep(50);
94a78b79
VZ
5404 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5405 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5406
5407 DP(NETIF_MSG_HW, "part2\n");
5408
5409 /* Disable inputs of parser neighbor blocks */
5410 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5411 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5412 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5413 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5414
5415 /* Write 0 to parser credits for CFC search request */
5416 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5417
5418 /* send 10 Ethernet packets */
5419 for (i = 0; i < 10; i++)
5420 bnx2x_lb_pckt(bp);
5421
5422 /* Wait until NIG register shows 10 + 1
5423 packets of size 11*0x10 = 0xb0 */
5424 count = 1000 * factor;
5425 while (count) {
34f80b04 5426
a2fbb9ea
ET
5427 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5428 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5429 if (val == 0xb0)
5430 break;
5431
5432 msleep(10);
5433 count--;
5434 }
5435 if (val != 0xb0) {
5436 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5437 return -3;
5438 }
5439
5440 /* Wait until PRS register shows 2 packets */
5441 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5442 if (val != 2)
5443 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5444
5445 /* Write 1 to parser credits for CFC search request */
5446 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5447
5448 /* Wait until PRS register shows 3 packets */
5449 msleep(10 * factor);
5450 /* Wait until NIG register shows 1 packet of size 0x10 */
5451 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5452 if (val != 3)
5453 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5454
5455 /* clear NIG EOP FIFO */
5456 for (i = 0; i < 11; i++)
5457 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5458 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5459 if (val != 1) {
5460 BNX2X_ERR("clear of NIG failed\n");
5461 return -4;
5462 }
5463
5464 /* Reset and init BRB, PRS, NIG */
5465 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5466 msleep(50);
5467 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5468 msleep(50);
94a78b79
VZ
5469 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5470 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5471#ifndef BCM_ISCSI
5472 /* set NIC mode */
5473 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5474#endif
5475
5476 /* Enable inputs of parser neighbor blocks */
5477 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5478 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5479 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5480 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5481
5482 DP(NETIF_MSG_HW, "done\n");
5483
5484 return 0; /* OK */
5485}
5486
5487static void enable_blocks_attention(struct bnx2x *bp)
5488{
5489 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5490 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5491 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5492 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5493 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5494 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5495 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5496 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5497 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5498/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5499/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5500 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5501 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5502 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5503/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5504/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5505 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5506 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5507 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5508 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5509/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5510/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5511 if (CHIP_REV_IS_FPGA(bp))
5512 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5513 else
5514 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5515 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5516 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5517 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5518/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5519/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5520 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5521 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5522/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5523 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5524}
5525
34f80b04 5526
81f75bbf
EG
5527static void bnx2x_reset_common(struct bnx2x *bp)
5528{
5529 /* reset_common */
5530 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5531 0xd3ffff7f);
5532 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5533}
5534
fd4ef40d
EG
5535
5536static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5537{
5538 u32 val;
5539 u8 port;
5540 u8 is_required = 0;
5541
5542 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5543 SHARED_HW_CFG_FAN_FAILURE_MASK;
5544
5545 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5546 is_required = 1;
5547
5548 /*
5549 * The fan failure mechanism is usually related to the PHY type since
5550 * the power consumption of the board is affected by the PHY. Currently,
5551 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5552 */
5553 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5554 for (port = PORT_0; port < PORT_MAX; port++) {
5555 u32 phy_type =
5556 SHMEM_RD(bp, dev_info.port_hw_config[port].
5557 external_phy_config) &
5558 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5559 is_required |=
5560 ((phy_type ==
5561 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5562 (phy_type ==
5563 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5564 }
5565
5566 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5567
5568 if (is_required == 0)
5569 return;
5570
5571 /* Fan failure is indicated by SPIO 5 */
5572 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5573 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5574
5575 /* set to active low mode */
5576 val = REG_RD(bp, MISC_REG_SPIO_INT);
5577 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5578 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5579 REG_WR(bp, MISC_REG_SPIO_INT, val);
5580
5581 /* enable interrupt to signal the IGU */
5582 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5583 val |= (1 << MISC_REGISTERS_SPIO_5);
5584 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5585}
5586
34f80b04 5587static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5588{
a2fbb9ea 5589 u32 val, i;
a2fbb9ea 5590
34f80b04 5591 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5592
81f75bbf 5593 bnx2x_reset_common(bp);
34f80b04
EG
5594 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5595 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5596
94a78b79 5597 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5598 if (CHIP_IS_E1H(bp))
5599 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5600
34f80b04
EG
5601 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5602 msleep(30);
5603 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5604
94a78b79 5605 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5606 if (CHIP_IS_E1(bp)) {
5607 /* enable HW interrupt from PXP on USDM overflow
5608 bit 16 on INT_MASK_0 */
5609 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5610 }
a2fbb9ea 5611
94a78b79 5612 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5613 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5614
5615#ifdef __BIG_ENDIAN
34f80b04
EG
5616 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5617 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5618 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5619 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5620 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5621 /* make sure this value is 0 */
5622 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5623
5624/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5625 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5626 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5627 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5628 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5629#endif
5630
34f80b04 5631 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5632#ifdef BCM_ISCSI
34f80b04
EG
5633 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5634 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5635 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5636#endif
5637
34f80b04
EG
5638 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5639 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5640
34f80b04
EG
5641 /* let the HW do it's magic ... */
5642 msleep(100);
5643 /* finish PXP init */
5644 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5645 if (val != 1) {
5646 BNX2X_ERR("PXP2 CFG failed\n");
5647 return -EBUSY;
5648 }
5649 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5650 if (val != 1) {
5651 BNX2X_ERR("PXP2 RD_INIT failed\n");
5652 return -EBUSY;
5653 }
a2fbb9ea 5654
34f80b04
EG
5655 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5656 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5657
94a78b79 5658 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5659
34f80b04
EG
5660 /* clean the DMAE memory */
5661 bp->dmae_ready = 1;
5662 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5663
94a78b79
VZ
5664 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5665 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5666 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5667 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5668
34f80b04
EG
5669 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5670 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5671 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5672 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5673
94a78b79 5674 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
5675 /* soft reset pulse */
5676 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5677 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5678
5679#ifdef BCM_ISCSI
94a78b79 5680 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5681#endif
a2fbb9ea 5682
94a78b79 5683 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5684 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5685 if (!CHIP_REV_IS_SLOW(bp)) {
5686 /* enable hw interrupt from doorbell Q */
5687 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5688 }
a2fbb9ea 5689
94a78b79
VZ
5690 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5691 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5692 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5693 /* set NIC mode */
5694 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5695 if (CHIP_IS_E1H(bp))
5696 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5697
94a78b79
VZ
5698 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5699 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5700 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5701 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5702
490c3c9b
EG
5703 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5704 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5705 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5706 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5707
94a78b79
VZ
5708 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5709 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5710 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5711 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5712
34f80b04
EG
5713 /* sync semi rtc */
5714 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5715 0x80000000);
5716 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5717 0x80000000);
a2fbb9ea 5718
94a78b79
VZ
5719 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5720 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5721 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5722
34f80b04
EG
5723 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5724 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5725 REG_WR(bp, i, 0xc0cac01a);
5726 /* TODO: replace with something meaningful */
5727 }
94a78b79 5728 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 5729 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5730
34f80b04
EG
5731 if (sizeof(union cdu_context) != 1024)
5732 /* we currently assume that a context is 1024 bytes */
5733 printk(KERN_ALERT PFX "please adjust the size of"
5734 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5735
94a78b79 5736 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5737 val = (4 << 24) + (0 << 12) + 1024;
5738 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5739 if (CHIP_IS_E1(bp)) {
5740 /* !!! fix pxp client crdit until excel update */
5741 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5742 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5743 }
a2fbb9ea 5744
94a78b79 5745 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5746 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5747 /* enable context validation interrupt from CFC */
5748 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5749
5750 /* set the thresholds to prevent CFC/CDU race */
5751 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5752
94a78b79
VZ
5753 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5754 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5755
34f80b04 5756 /* PXPCS COMMON comes here */
94a78b79 5757 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5758 /* Reset PCIE errors for debug */
5759 REG_WR(bp, 0x2814, 0xffffffff);
5760 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5761
34f80b04 5762 /* EMAC0 COMMON comes here */
94a78b79 5763 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
34f80b04 5764 /* EMAC1 COMMON comes here */
94a78b79 5765 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
34f80b04 5766 /* DBU COMMON comes here */
94a78b79 5767 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
34f80b04 5768 /* DBG COMMON comes here */
94a78b79 5769 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5770
94a78b79 5771 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
5772 if (CHIP_IS_E1H(bp)) {
5773 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5774 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5775 }
5776
5777 if (CHIP_REV_IS_SLOW(bp))
5778 msleep(200);
5779
5780 /* finish CFC init */
5781 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5782 if (val != 1) {
5783 BNX2X_ERR("CFC LL_INIT failed\n");
5784 return -EBUSY;
5785 }
5786 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5787 if (val != 1) {
5788 BNX2X_ERR("CFC AC_INIT failed\n");
5789 return -EBUSY;
5790 }
5791 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5792 if (val != 1) {
5793 BNX2X_ERR("CFC CAM_INIT failed\n");
5794 return -EBUSY;
5795 }
5796 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5797
34f80b04
EG
5798 /* read NIG statistic
5799 to see if this is our first up since powerup */
5800 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5801 val = *bnx2x_sp(bp, wb_data[0]);
5802
5803 /* do internal memory self test */
5804 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5805 BNX2X_ERR("internal mem self test failed\n");
5806 return -EBUSY;
5807 }
5808
35b19ba5 5809 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5810 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5811 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5812 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5813 bp->port.need_hw_lock = 1;
5814 break;
5815
34f80b04
EG
5816 default:
5817 break;
5818 }
f1410647 5819
fd4ef40d
EG
5820 bnx2x_setup_fan_failure_detection(bp);
5821
34f80b04
EG
5822 /* clear PXP2 attentions */
5823 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5824
34f80b04 5825 enable_blocks_attention(bp);
a2fbb9ea 5826
6bbca910
YR
5827 if (!BP_NOMCP(bp)) {
5828 bnx2x_acquire_phy_lock(bp);
5829 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5830 bnx2x_release_phy_lock(bp);
5831 } else
5832 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5833
34f80b04
EG
5834 return 0;
5835}
a2fbb9ea 5836
34f80b04
EG
5837static int bnx2x_init_port(struct bnx2x *bp)
5838{
5839 int port = BP_PORT(bp);
94a78b79 5840 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5841 u32 low, high;
34f80b04 5842 u32 val;
a2fbb9ea 5843
34f80b04
EG
5844 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5845
5846 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5847
5848 /* Port PXP comes here */
94a78b79 5849 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
a2fbb9ea 5850 /* Port PXP2 comes here */
94a78b79 5851 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
a2fbb9ea
ET
5852#ifdef BCM_ISCSI
5853 /* Port0 1
5854 * Port1 385 */
5855 i++;
5856 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5857 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5858 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5859 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5860
5861 /* Port0 2
5862 * Port1 386 */
5863 i++;
5864 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5865 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5866 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5867 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5868
5869 /* Port0 3
5870 * Port1 387 */
5871 i++;
5872 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5873 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5874 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5875 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5876#endif
34f80b04 5877 /* Port CMs come here */
94a78b79 5878 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea
ET
5879
5880 /* Port QM comes here */
a2fbb9ea
ET
5881#ifdef BCM_ISCSI
5882 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5883 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5884
94a78b79 5885 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea
ET
5886#endif
5887 /* Port DQ comes here */
94a78b79 5888 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5889
94a78b79 5890 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
5891 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5892 /* no pause for emulation and FPGA */
5893 low = 0;
5894 high = 513;
5895 } else {
5896 if (IS_E1HMF(bp))
5897 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5898 else if (bp->dev->mtu > 4096) {
5899 if (bp->flags & ONE_PORT_FLAG)
5900 low = 160;
5901 else {
5902 val = bp->dev->mtu;
5903 /* (24*1024 + val*4)/256 */
5904 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5905 }
5906 } else
5907 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5908 high = low + 56; /* 14*1024/256 */
5909 }
5910 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5911 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5912
5913
ad8d3948 5914 /* Port PRS comes here */
94a78b79 5915 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
a2fbb9ea 5916 /* Port TSDM comes here */
94a78b79 5917 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
a2fbb9ea 5918 /* Port CSDM comes here */
94a78b79 5919 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
a2fbb9ea 5920 /* Port USDM comes here */
94a78b79 5921 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
a2fbb9ea 5922 /* Port XSDM comes here */
94a78b79 5923 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5924
94a78b79
VZ
5925 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5926 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5927 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5928 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 5929
a2fbb9ea 5930 /* Port UPB comes here */
94a78b79 5931 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
34f80b04 5932 /* Port XPB comes here */
94a78b79 5933 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5934
94a78b79 5935 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
5936
5937 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5938 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5939
5940 /* update threshold */
34f80b04 5941 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5942 /* update init credit */
34f80b04 5943 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5944
5945 /* probe changes */
34f80b04 5946 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5947 msleep(5);
34f80b04 5948 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5949
5950#ifdef BCM_ISCSI
5951 /* tell the searcher where the T2 table is */
5952 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5953
5954 wb_write[0] = U64_LO(bp->t2_mapping);
5955 wb_write[1] = U64_HI(bp->t2_mapping);
5956 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5957 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5958 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5959 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5960
5961 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5962 /* Port SRCH comes here */
5963#endif
5964 /* Port CDU comes here */
94a78b79 5965 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
a2fbb9ea 5966 /* Port CFC comes here */
94a78b79 5967 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5968
5969 if (CHIP_IS_E1(bp)) {
5970 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5971 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5972 }
94a78b79 5973 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5974
94a78b79 5975 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5976 /* init aeu_mask_attn_func_0/1:
5977 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5978 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5979 * bits 4-7 are used for "per vn group attention" */
5980 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5981 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5982
a2fbb9ea 5983 /* Port PXPCS comes here */
94a78b79 5984 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
a2fbb9ea 5985 /* Port EMAC0 comes here */
94a78b79 5986 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
a2fbb9ea 5987 /* Port EMAC1 comes here */
94a78b79 5988 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
a2fbb9ea 5989 /* Port DBU comes here */
94a78b79 5990 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
a2fbb9ea 5991 /* Port DBG comes here */
94a78b79 5992 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5993
94a78b79 5994 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5995
5996 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5997
5998 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5999 /* 0x2 disable e1hov, 0x1 enable */
6000 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6001 (IS_E1HMF(bp) ? 0x1 : 0x2));
6002
1c06328c
EG
6003 /* support pause requests from USDM, TSDM and BRB */
6004 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6005
6006 {
6007 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6008 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6009 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6010 }
34f80b04
EG
6011 }
6012
a2fbb9ea 6013 /* Port MCP comes here */
94a78b79 6014 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
a2fbb9ea 6015 /* Port DMAE comes here */
94a78b79 6016 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6017
35b19ba5 6018 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6020 {
6021 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6022
6023 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6024 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6025
6026 /* The GPIO should be swapped if the swap register is
6027 set and active */
6028 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6029 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6030
6031 /* Select function upon port-swap configuration */
6032 if (port == 0) {
6033 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6034 aeu_gpio_mask = (swap_val && swap_override) ?
6035 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6036 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6037 } else {
6038 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6039 aeu_gpio_mask = (swap_val && swap_override) ?
6040 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6041 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6042 }
6043 val = REG_RD(bp, offset);
6044 /* add GPIO3 to group */
6045 val |= aeu_gpio_mask;
6046 REG_WR(bp, offset, val);
6047 }
6048 break;
6049
35b19ba5 6050 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
6051 /* add SPIO 5 to group 0 */
6052 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6053 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6054 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6055 break;
6056
6057 default:
6058 break;
6059 }
6060
c18487ee 6061 bnx2x__link_reset(bp);
a2fbb9ea 6062
34f80b04
EG
6063 return 0;
6064}
6065
6066#define ILT_PER_FUNC (768/2)
6067#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6068/* the phys address is shifted right 12 bits and has an added
6069 1=valid bit added to the 53rd bit
6070 then since this is a wide register(TM)
6071 we split it into two 32 bit writes
6072 */
6073#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6074#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6075#define PXP_ONE_ILT(x) (((x) << 10) | x)
6076#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6077
6078#define CNIC_ILT_LINES 0
6079
6080static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6081{
6082 int reg;
6083
6084 if (CHIP_IS_E1H(bp))
6085 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6086 else /* E1 */
6087 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6088
6089 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6090}
6091
6092static int bnx2x_init_func(struct bnx2x *bp)
6093{
6094 int port = BP_PORT(bp);
6095 int func = BP_FUNC(bp);
8badd27a 6096 u32 addr, val;
34f80b04
EG
6097 int i;
6098
6099 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6100
8badd27a
EG
6101 /* set MSI reconfigure capability */
6102 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6103 val = REG_RD(bp, addr);
6104 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6105 REG_WR(bp, addr, val);
6106
34f80b04
EG
6107 i = FUNC_ILT_BASE(func);
6108
6109 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6110 if (CHIP_IS_E1H(bp)) {
6111 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6112 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6113 } else /* E1 */
6114 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6115 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6116
6117
6118 if (CHIP_IS_E1H(bp)) {
6119 for (i = 0; i < 9; i++)
6120 bnx2x_init_block(bp,
94a78b79 6121 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6122
6123 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6124 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6125 }
6126
6127 /* HC init per function */
6128 if (CHIP_IS_E1H(bp)) {
6129 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6130
6131 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6132 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6133 }
94a78b79 6134 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6135
c14423fe 6136 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6137 REG_WR(bp, 0x2114, 0xffffffff);
6138 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6139
34f80b04
EG
6140 return 0;
6141}
6142
6143static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6144{
6145 int i, rc = 0;
a2fbb9ea 6146
34f80b04
EG
6147 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6148 BP_FUNC(bp), load_code);
a2fbb9ea 6149
34f80b04
EG
6150 bp->dmae_ready = 0;
6151 mutex_init(&bp->dmae_mutex);
6152 bnx2x_gunzip_init(bp);
a2fbb9ea 6153
34f80b04
EG
6154 switch (load_code) {
6155 case FW_MSG_CODE_DRV_LOAD_COMMON:
6156 rc = bnx2x_init_common(bp);
6157 if (rc)
6158 goto init_hw_err;
6159 /* no break */
6160
6161 case FW_MSG_CODE_DRV_LOAD_PORT:
6162 bp->dmae_ready = 1;
6163 rc = bnx2x_init_port(bp);
6164 if (rc)
6165 goto init_hw_err;
6166 /* no break */
6167
6168 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6169 bp->dmae_ready = 1;
6170 rc = bnx2x_init_func(bp);
6171 if (rc)
6172 goto init_hw_err;
6173 break;
6174
6175 default:
6176 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6177 break;
6178 }
6179
6180 if (!BP_NOMCP(bp)) {
6181 int func = BP_FUNC(bp);
a2fbb9ea
ET
6182
6183 bp->fw_drv_pulse_wr_seq =
34f80b04 6184 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6185 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6186 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6187 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6188 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6189 } else
6190 bp->func_stx = 0;
a2fbb9ea 6191
34f80b04
EG
6192 /* this needs to be done before gunzip end */
6193 bnx2x_zero_def_sb(bp);
6194 for_each_queue(bp, i)
6195 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6196
6197init_hw_err:
6198 bnx2x_gunzip_end(bp);
6199
6200 return rc;
a2fbb9ea
ET
6201}
6202
c14423fe 6203/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6204static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6205{
34f80b04 6206 int func = BP_FUNC(bp);
f1410647
ET
6207 u32 seq = ++bp->fw_seq;
6208 u32 rc = 0;
19680c48
EG
6209 u32 cnt = 1;
6210 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6211
34f80b04 6212 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6213 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6214
19680c48
EG
6215 do {
6216 /* let the FW do it's magic ... */
6217 msleep(delay);
a2fbb9ea 6218
19680c48 6219 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6220
19680c48
EG
6221 /* Give the FW up to 2 second (200*10ms) */
6222 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6223
6224 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6225 cnt*delay, rc, seq);
a2fbb9ea
ET
6226
6227 /* is this a reply to our command? */
6228 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6229 rc &= FW_MSG_CODE_MASK;
f1410647 6230
a2fbb9ea
ET
6231 } else {
6232 /* FW BUG! */
6233 BNX2X_ERR("FW failed to respond!\n");
6234 bnx2x_fw_dump(bp);
6235 rc = 0;
6236 }
f1410647 6237
a2fbb9ea
ET
6238 return rc;
6239}
6240
6241static void bnx2x_free_mem(struct bnx2x *bp)
6242{
6243
6244#define BNX2X_PCI_FREE(x, y, size) \
6245 do { \
6246 if (x) { \
6247 pci_free_consistent(bp->pdev, size, x, y); \
6248 x = NULL; \
6249 y = 0; \
6250 } \
6251 } while (0)
6252
6253#define BNX2X_FREE(x) \
6254 do { \
6255 if (x) { \
6256 vfree(x); \
6257 x = NULL; \
6258 } \
6259 } while (0)
6260
6261 int i;
6262
6263 /* fastpath */
555f6c78 6264 /* Common */
a2fbb9ea
ET
6265 for_each_queue(bp, i) {
6266
555f6c78 6267 /* status blocks */
a2fbb9ea
ET
6268 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6269 bnx2x_fp(bp, i, status_blk_mapping),
6270 sizeof(struct host_status_block) +
6271 sizeof(struct eth_tx_db_data));
555f6c78
EG
6272 }
6273 /* Rx */
6274 for_each_rx_queue(bp, i) {
a2fbb9ea 6275
555f6c78 6276 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6277 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6278 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6279 bnx2x_fp(bp, i, rx_desc_mapping),
6280 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6281
6282 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6283 bnx2x_fp(bp, i, rx_comp_mapping),
6284 sizeof(struct eth_fast_path_rx_cqe) *
6285 NUM_RCQ_BD);
a2fbb9ea 6286
7a9b2557 6287 /* SGE ring */
32626230 6288 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6289 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6290 bnx2x_fp(bp, i, rx_sge_mapping),
6291 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6292 }
555f6c78
EG
6293 /* Tx */
6294 for_each_tx_queue(bp, i) {
6295
6296 /* fastpath tx rings: tx_buf tx_desc */
6297 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6298 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6299 bnx2x_fp(bp, i, tx_desc_mapping),
6300 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6301 }
a2fbb9ea
ET
6302 /* end of fastpath */
6303
6304 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6305 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6306
6307 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6308 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6309
6310#ifdef BCM_ISCSI
6311 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6312 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6313 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6314 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6315#endif
7a9b2557 6316 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6317
6318#undef BNX2X_PCI_FREE
6319#undef BNX2X_KFREE
6320}
6321
6322static int bnx2x_alloc_mem(struct bnx2x *bp)
6323{
6324
6325#define BNX2X_PCI_ALLOC(x, y, size) \
6326 do { \
6327 x = pci_alloc_consistent(bp->pdev, size, y); \
6328 if (x == NULL) \
6329 goto alloc_mem_err; \
6330 memset(x, 0, size); \
6331 } while (0)
6332
6333#define BNX2X_ALLOC(x, size) \
6334 do { \
6335 x = vmalloc(size); \
6336 if (x == NULL) \
6337 goto alloc_mem_err; \
6338 memset(x, 0, size); \
6339 } while (0)
6340
6341 int i;
6342
6343 /* fastpath */
555f6c78 6344 /* Common */
a2fbb9ea
ET
6345 for_each_queue(bp, i) {
6346 bnx2x_fp(bp, i, bp) = bp;
6347
555f6c78 6348 /* status blocks */
a2fbb9ea
ET
6349 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6350 &bnx2x_fp(bp, i, status_blk_mapping),
6351 sizeof(struct host_status_block) +
6352 sizeof(struct eth_tx_db_data));
555f6c78
EG
6353 }
6354 /* Rx */
6355 for_each_rx_queue(bp, i) {
a2fbb9ea 6356
555f6c78 6357 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6358 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6359 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6360 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6361 &bnx2x_fp(bp, i, rx_desc_mapping),
6362 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6363
6364 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6365 &bnx2x_fp(bp, i, rx_comp_mapping),
6366 sizeof(struct eth_fast_path_rx_cqe) *
6367 NUM_RCQ_BD);
6368
7a9b2557
VZ
6369 /* SGE ring */
6370 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6371 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6372 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6373 &bnx2x_fp(bp, i, rx_sge_mapping),
6374 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6375 }
555f6c78
EG
6376 /* Tx */
6377 for_each_tx_queue(bp, i) {
6378
6379 bnx2x_fp(bp, i, hw_tx_prods) =
6380 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6381
6382 bnx2x_fp(bp, i, tx_prods_mapping) =
6383 bnx2x_fp(bp, i, status_blk_mapping) +
6384 sizeof(struct host_status_block);
6385
6386 /* fastpath tx rings: tx_buf tx_desc */
6387 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6388 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6389 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6390 &bnx2x_fp(bp, i, tx_desc_mapping),
6391 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6392 }
a2fbb9ea
ET
6393 /* end of fastpath */
6394
6395 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6396 sizeof(struct host_def_status_block));
6397
6398 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6399 sizeof(struct bnx2x_slowpath));
6400
6401#ifdef BCM_ISCSI
6402 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6403
6404 /* Initialize T1 */
6405 for (i = 0; i < 64*1024; i += 64) {
6406 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6407 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6408 }
6409
6410 /* allocate searcher T2 table
6411 we allocate 1/4 of alloc num for T2
6412 (which is not entered into the ILT) */
6413 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6414
6415 /* Initialize T2 */
6416 for (i = 0; i < 16*1024; i += 64)
6417 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6418
c14423fe 6419 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6420 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6421
6422 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6423 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6424
6425 /* QM queues (128*MAX_CONN) */
6426 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6427#endif
6428
6429 /* Slow path ring */
6430 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6431
6432 return 0;
6433
6434alloc_mem_err:
6435 bnx2x_free_mem(bp);
6436 return -ENOMEM;
6437
6438#undef BNX2X_PCI_ALLOC
6439#undef BNX2X_ALLOC
6440}
6441
6442static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6443{
6444 int i;
6445
555f6c78 6446 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6447 struct bnx2x_fastpath *fp = &bp->fp[i];
6448
6449 u16 bd_cons = fp->tx_bd_cons;
6450 u16 sw_prod = fp->tx_pkt_prod;
6451 u16 sw_cons = fp->tx_pkt_cons;
6452
a2fbb9ea
ET
6453 while (sw_cons != sw_prod) {
6454 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6455 sw_cons++;
6456 }
6457 }
6458}
6459
6460static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6461{
6462 int i, j;
6463
555f6c78 6464 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6465 struct bnx2x_fastpath *fp = &bp->fp[j];
6466
a2fbb9ea
ET
6467 for (i = 0; i < NUM_RX_BD; i++) {
6468 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6469 struct sk_buff *skb = rx_buf->skb;
6470
6471 if (skb == NULL)
6472 continue;
6473
6474 pci_unmap_single(bp->pdev,
6475 pci_unmap_addr(rx_buf, mapping),
356e2385 6476 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6477
6478 rx_buf->skb = NULL;
6479 dev_kfree_skb(skb);
6480 }
7a9b2557 6481 if (!fp->disable_tpa)
32626230
EG
6482 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6483 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6484 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6485 }
6486}
6487
6488static void bnx2x_free_skbs(struct bnx2x *bp)
6489{
6490 bnx2x_free_tx_skbs(bp);
6491 bnx2x_free_rx_skbs(bp);
6492}
6493
6494static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6495{
34f80b04 6496 int i, offset = 1;
a2fbb9ea
ET
6497
6498 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6499 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6500 bp->msix_table[0].vector);
6501
6502 for_each_queue(bp, i) {
c14423fe 6503 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6504 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6505 bnx2x_fp(bp, i, state));
6506
34f80b04 6507 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6508 }
a2fbb9ea
ET
6509}
6510
6511static void bnx2x_free_irq(struct bnx2x *bp)
6512{
a2fbb9ea 6513 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6514 bnx2x_free_msix_irqs(bp);
6515 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6516 bp->flags &= ~USING_MSIX_FLAG;
6517
8badd27a
EG
6518 } else if (bp->flags & USING_MSI_FLAG) {
6519 free_irq(bp->pdev->irq, bp->dev);
6520 pci_disable_msi(bp->pdev);
6521 bp->flags &= ~USING_MSI_FLAG;
6522
a2fbb9ea
ET
6523 } else
6524 free_irq(bp->pdev->irq, bp->dev);
6525}
6526
6527static int bnx2x_enable_msix(struct bnx2x *bp)
6528{
8badd27a
EG
6529 int i, rc, offset = 1;
6530 int igu_vec = 0;
a2fbb9ea 6531
8badd27a
EG
6532 bp->msix_table[0].entry = igu_vec;
6533 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6534
34f80b04 6535 for_each_queue(bp, i) {
8badd27a 6536 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6537 bp->msix_table[i + offset].entry = igu_vec;
6538 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6539 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6540 }
6541
34f80b04 6542 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6543 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6544 if (rc) {
8badd27a
EG
6545 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6546 return rc;
34f80b04 6547 }
8badd27a 6548
a2fbb9ea
ET
6549 bp->flags |= USING_MSIX_FLAG;
6550
6551 return 0;
a2fbb9ea
ET
6552}
6553
a2fbb9ea
ET
6554static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6555{
34f80b04 6556 int i, rc, offset = 1;
a2fbb9ea 6557
a2fbb9ea
ET
6558 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6559 bp->dev->name, bp->dev);
a2fbb9ea
ET
6560 if (rc) {
6561 BNX2X_ERR("request sp irq failed\n");
6562 return -EBUSY;
6563 }
6564
6565 for_each_queue(bp, i) {
555f6c78
EG
6566 struct bnx2x_fastpath *fp = &bp->fp[i];
6567
6568 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6569 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6570 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6571 if (rc) {
555f6c78 6572 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6573 bnx2x_free_msix_irqs(bp);
6574 return -EBUSY;
6575 }
6576
555f6c78 6577 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6578 }
6579
555f6c78
EG
6580 i = BNX2X_NUM_QUEUES(bp);
6581 if (is_multi(bp))
6582 printk(KERN_INFO PFX
6583 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6584 bp->dev->name, bp->msix_table[0].vector,
6585 bp->msix_table[offset].vector,
6586 bp->msix_table[offset + i - 1].vector);
6587 else
6588 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6589 bp->dev->name, bp->msix_table[0].vector,
6590 bp->msix_table[offset + i - 1].vector);
6591
a2fbb9ea 6592 return 0;
a2fbb9ea
ET
6593}
6594
8badd27a
EG
6595static int bnx2x_enable_msi(struct bnx2x *bp)
6596{
6597 int rc;
6598
6599 rc = pci_enable_msi(bp->pdev);
6600 if (rc) {
6601 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6602 return -1;
6603 }
6604 bp->flags |= USING_MSI_FLAG;
6605
6606 return 0;
6607}
6608
a2fbb9ea
ET
6609static int bnx2x_req_irq(struct bnx2x *bp)
6610{
8badd27a 6611 unsigned long flags;
34f80b04 6612 int rc;
a2fbb9ea 6613
8badd27a
EG
6614 if (bp->flags & USING_MSI_FLAG)
6615 flags = 0;
6616 else
6617 flags = IRQF_SHARED;
6618
6619 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6620 bp->dev->name, bp->dev);
a2fbb9ea
ET
6621 if (!rc)
6622 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6623
6624 return rc;
a2fbb9ea
ET
6625}
6626
65abd74d
YG
6627static void bnx2x_napi_enable(struct bnx2x *bp)
6628{
6629 int i;
6630
555f6c78 6631 for_each_rx_queue(bp, i)
65abd74d
YG
6632 napi_enable(&bnx2x_fp(bp, i, napi));
6633}
6634
6635static void bnx2x_napi_disable(struct bnx2x *bp)
6636{
6637 int i;
6638
555f6c78 6639 for_each_rx_queue(bp, i)
65abd74d
YG
6640 napi_disable(&bnx2x_fp(bp, i, napi));
6641}
6642
6643static void bnx2x_netif_start(struct bnx2x *bp)
6644{
6645 if (atomic_dec_and_test(&bp->intr_sem)) {
6646 if (netif_running(bp->dev)) {
65abd74d
YG
6647 bnx2x_napi_enable(bp);
6648 bnx2x_int_enable(bp);
555f6c78
EG
6649 if (bp->state == BNX2X_STATE_OPEN)
6650 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6651 }
6652 }
6653}
6654
f8ef6e44 6655static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6656{
f8ef6e44 6657 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6658 bnx2x_napi_disable(bp);
762d5f6c
EG
6659 netif_tx_disable(bp->dev);
6660 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6661}
6662
a2fbb9ea
ET
6663/*
6664 * Init service functions
6665 */
6666
3101c2bc 6667static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6668{
6669 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6670 int port = BP_PORT(bp);
a2fbb9ea
ET
6671
6672 /* CAM allocation
6673 * unicasts 0-31:port0 32-63:port1
6674 * multicast 64-127:port0 128-191:port1
6675 */
8d9c5f34 6676 config->hdr.length = 2;
af246401 6677 config->hdr.offset = port ? 32 : 0;
0626b899 6678 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6679 config->hdr.reserved1 = 0;
6680
6681 /* primary MAC */
6682 config->config_table[0].cam_entry.msb_mac_addr =
6683 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6684 config->config_table[0].cam_entry.middle_mac_addr =
6685 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6686 config->config_table[0].cam_entry.lsb_mac_addr =
6687 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6688 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6689 if (set)
6690 config->config_table[0].target_table_entry.flags = 0;
6691 else
6692 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6693 config->config_table[0].target_table_entry.client_id = 0;
6694 config->config_table[0].target_table_entry.vlan_id = 0;
6695
3101c2bc
YG
6696 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6697 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6698 config->config_table[0].cam_entry.msb_mac_addr,
6699 config->config_table[0].cam_entry.middle_mac_addr,
6700 config->config_table[0].cam_entry.lsb_mac_addr);
6701
6702 /* broadcast */
4781bfad
EG
6703 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6704 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6705 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6706 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6707 if (set)
6708 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6709 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6710 else
6711 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6712 config->config_table[1].target_table_entry.client_id = 0;
6713 config->config_table[1].target_table_entry.vlan_id = 0;
6714
6715 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6716 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6717 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6718}
6719
3101c2bc 6720static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6721{
6722 struct mac_configuration_cmd_e1h *config =
6723 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6724
3101c2bc 6725 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6726 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6727 return;
6728 }
6729
6730 /* CAM allocation for E1H
6731 * unicasts: by func number
6732 * multicast: 20+FUNC*20, 20 each
6733 */
8d9c5f34 6734 config->hdr.length = 1;
34f80b04 6735 config->hdr.offset = BP_FUNC(bp);
0626b899 6736 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6737 config->hdr.reserved1 = 0;
6738
6739 /* primary MAC */
6740 config->config_table[0].msb_mac_addr =
6741 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6742 config->config_table[0].middle_mac_addr =
6743 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6744 config->config_table[0].lsb_mac_addr =
6745 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6746 config->config_table[0].client_id = BP_L_ID(bp);
6747 config->config_table[0].vlan_id = 0;
6748 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6749 if (set)
6750 config->config_table[0].flags = BP_PORT(bp);
6751 else
6752 config->config_table[0].flags =
6753 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6754
3101c2bc
YG
6755 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6756 (set ? "setting" : "clearing"),
34f80b04
EG
6757 config->config_table[0].msb_mac_addr,
6758 config->config_table[0].middle_mac_addr,
6759 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6760
6761 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6762 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6763 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6764}
6765
a2fbb9ea
ET
6766static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6767 int *state_p, int poll)
6768{
6769 /* can take a while if any port is running */
8b3a0f0b 6770 int cnt = 5000;
a2fbb9ea 6771
c14423fe
ET
6772 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6773 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6774
6775 might_sleep();
34f80b04 6776 while (cnt--) {
a2fbb9ea
ET
6777 if (poll) {
6778 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6779 /* if index is different from 0
6780 * the reply for some commands will
3101c2bc 6781 * be on the non default queue
a2fbb9ea
ET
6782 */
6783 if (idx)
6784 bnx2x_rx_int(&bp->fp[idx], 10);
6785 }
a2fbb9ea 6786
3101c2bc 6787 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6788 if (*state_p == state) {
6789#ifdef BNX2X_STOP_ON_ERROR
6790 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6791#endif
a2fbb9ea 6792 return 0;
8b3a0f0b 6793 }
a2fbb9ea 6794
a2fbb9ea 6795 msleep(1);
a2fbb9ea
ET
6796 }
6797
a2fbb9ea 6798 /* timeout! */
49d66772
ET
6799 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6800 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6801#ifdef BNX2X_STOP_ON_ERROR
6802 bnx2x_panic();
6803#endif
a2fbb9ea 6804
49d66772 6805 return -EBUSY;
a2fbb9ea
ET
6806}
6807
6808static int bnx2x_setup_leading(struct bnx2x *bp)
6809{
34f80b04 6810 int rc;
a2fbb9ea 6811
c14423fe 6812 /* reset IGU state */
34f80b04 6813 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6814
6815 /* SETUP ramrod */
6816 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6817
34f80b04
EG
6818 /* Wait for completion */
6819 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6820
34f80b04 6821 return rc;
a2fbb9ea
ET
6822}
6823
6824static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6825{
555f6c78
EG
6826 struct bnx2x_fastpath *fp = &bp->fp[index];
6827
a2fbb9ea 6828 /* reset IGU state */
555f6c78 6829 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6830
228241eb 6831 /* SETUP ramrod */
555f6c78
EG
6832 fp->state = BNX2X_FP_STATE_OPENING;
6833 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6834 fp->cl_id, 0);
a2fbb9ea
ET
6835
6836 /* Wait for completion */
6837 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6838 &(fp->state), 0);
a2fbb9ea
ET
6839}
6840
a2fbb9ea 6841static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6842
8badd27a 6843static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6844{
555f6c78 6845 int num_queues;
a2fbb9ea 6846
8badd27a
EG
6847 switch (int_mode) {
6848 case INT_MODE_INTx:
6849 case INT_MODE_MSI:
555f6c78
EG
6850 num_queues = 1;
6851 bp->num_rx_queues = num_queues;
6852 bp->num_tx_queues = num_queues;
6853 DP(NETIF_MSG_IFUP,
6854 "set number of queues to %d\n", num_queues);
8badd27a
EG
6855 break;
6856
6857 case INT_MODE_MSIX:
6858 default:
555f6c78
EG
6859 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6860 num_queues = min_t(u32, num_online_cpus(),
6861 BNX2X_MAX_QUEUES(bp));
34f80b04 6862 else
555f6c78
EG
6863 num_queues = 1;
6864 bp->num_rx_queues = num_queues;
6865 bp->num_tx_queues = num_queues;
6866 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6867 " number of tx queues to %d\n",
6868 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6869 /* if we can't use MSI-X we only need one fp,
6870 * so try to enable MSI-X with the requested number of fp's
6871 * and fallback to MSI or legacy INTx with one fp
6872 */
8badd27a 6873 if (bnx2x_enable_msix(bp)) {
34f80b04 6874 /* failed to enable MSI-X */
555f6c78
EG
6875 num_queues = 1;
6876 bp->num_rx_queues = num_queues;
6877 bp->num_tx_queues = num_queues;
6878 if (bp->multi_mode)
6879 BNX2X_ERR("Multi requested but failed to "
6880 "enable MSI-X set number of "
6881 "queues to %d\n", num_queues);
a2fbb9ea 6882 }
8badd27a 6883 break;
a2fbb9ea 6884 }
555f6c78 6885 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6886}
6887
6888static void bnx2x_set_rx_mode(struct net_device *dev);
6889
6890/* must be called with rtnl_lock */
6891static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6892{
6893 u32 load_code;
6894 int i, rc = 0;
6895#ifdef BNX2X_STOP_ON_ERROR
6896 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6897 if (unlikely(bp->panic))
6898 return -EPERM;
6899#endif
6900
6901 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6902
6903 bnx2x_set_int_mode(bp);
c14423fe 6904
a2fbb9ea
ET
6905 if (bnx2x_alloc_mem(bp))
6906 return -ENOMEM;
6907
555f6c78 6908 for_each_rx_queue(bp, i)
7a9b2557
VZ
6909 bnx2x_fp(bp, i, disable_tpa) =
6910 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6911
555f6c78 6912 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6913 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6914 bnx2x_poll, 128);
6915
6916#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6917 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6918 struct bnx2x_fastpath *fp = &bp->fp[i];
6919
6920 fp->poll_no_work = 0;
6921 fp->poll_calls = 0;
6922 fp->poll_max_calls = 0;
6923 fp->poll_complete = 0;
6924 fp->poll_exit = 0;
6925 }
6926#endif
6927 bnx2x_napi_enable(bp);
6928
34f80b04
EG
6929 if (bp->flags & USING_MSIX_FLAG) {
6930 rc = bnx2x_req_msix_irqs(bp);
6931 if (rc) {
6932 pci_disable_msix(bp->pdev);
2dfe0e1f 6933 goto load_error1;
34f80b04
EG
6934 }
6935 } else {
8badd27a
EG
6936 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6937 bnx2x_enable_msi(bp);
34f80b04
EG
6938 bnx2x_ack_int(bp);
6939 rc = bnx2x_req_irq(bp);
6940 if (rc) {
2dfe0e1f 6941 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6942 if (bp->flags & USING_MSI_FLAG)
6943 pci_disable_msi(bp->pdev);
2dfe0e1f 6944 goto load_error1;
a2fbb9ea 6945 }
8badd27a
EG
6946 if (bp->flags & USING_MSI_FLAG) {
6947 bp->dev->irq = bp->pdev->irq;
6948 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6949 bp->dev->name, bp->pdev->irq);
6950 }
a2fbb9ea
ET
6951 }
6952
2dfe0e1f
EG
6953 /* Send LOAD_REQUEST command to MCP
6954 Returns the type of LOAD command:
6955 if it is the first port to be initialized
6956 common blocks should be initialized, otherwise - not
6957 */
6958 if (!BP_NOMCP(bp)) {
6959 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6960 if (!load_code) {
6961 BNX2X_ERR("MCP response failure, aborting\n");
6962 rc = -EBUSY;
6963 goto load_error2;
6964 }
6965 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6966 rc = -EBUSY; /* other port in diagnostic mode */
6967 goto load_error2;
6968 }
6969
6970 } else {
6971 int port = BP_PORT(bp);
6972
f5372251 6973 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
6974 load_count[0], load_count[1], load_count[2]);
6975 load_count[0]++;
6976 load_count[1 + port]++;
f5372251 6977 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
6978 load_count[0], load_count[1], load_count[2]);
6979 if (load_count[0] == 1)
6980 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6981 else if (load_count[1 + port] == 1)
6982 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6983 else
6984 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6985 }
6986
6987 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6988 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6989 bp->port.pmf = 1;
6990 else
6991 bp->port.pmf = 0;
6992 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6993
a2fbb9ea 6994 /* Initialize HW */
34f80b04
EG
6995 rc = bnx2x_init_hw(bp, load_code);
6996 if (rc) {
a2fbb9ea 6997 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6998 goto load_error2;
a2fbb9ea
ET
6999 }
7000
a2fbb9ea 7001 /* Setup NIC internals and enable interrupts */
471de716 7002 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
7003
7004 /* Send LOAD_DONE command to MCP */
34f80b04 7005 if (!BP_NOMCP(bp)) {
228241eb
ET
7006 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7007 if (!load_code) {
da5a662a 7008 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7009 rc = -EBUSY;
2dfe0e1f 7010 goto load_error3;
a2fbb9ea
ET
7011 }
7012 }
7013
7014 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7015
34f80b04
EG
7016 rc = bnx2x_setup_leading(bp);
7017 if (rc) {
da5a662a 7018 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7019 goto load_error3;
34f80b04 7020 }
a2fbb9ea 7021
34f80b04
EG
7022 if (CHIP_IS_E1H(bp))
7023 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7024 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7025 bp->state = BNX2X_STATE_DISABLED;
7026 }
a2fbb9ea 7027
34f80b04
EG
7028 if (bp->state == BNX2X_STATE_OPEN)
7029 for_each_nondefault_queue(bp, i) {
7030 rc = bnx2x_setup_multi(bp, i);
7031 if (rc)
2dfe0e1f 7032 goto load_error3;
34f80b04 7033 }
a2fbb9ea 7034
34f80b04 7035 if (CHIP_IS_E1(bp))
3101c2bc 7036 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 7037 else
3101c2bc 7038 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
7039
7040 if (bp->port.pmf)
b5bf9068 7041 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7042
7043 /* Start fast path */
34f80b04
EG
7044 switch (load_mode) {
7045 case LOAD_NORMAL:
7046 /* Tx queue should be only reenabled */
555f6c78 7047 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 7048 /* Initialize the receive filter. */
34f80b04
EG
7049 bnx2x_set_rx_mode(bp->dev);
7050 break;
7051
7052 case LOAD_OPEN:
555f6c78 7053 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 7054 /* Initialize the receive filter. */
34f80b04 7055 bnx2x_set_rx_mode(bp->dev);
34f80b04 7056 break;
a2fbb9ea 7057
34f80b04 7058 case LOAD_DIAG:
2dfe0e1f 7059 /* Initialize the receive filter. */
a2fbb9ea 7060 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7061 bp->state = BNX2X_STATE_DIAG;
7062 break;
7063
7064 default:
7065 break;
a2fbb9ea
ET
7066 }
7067
34f80b04
EG
7068 if (!bp->port.pmf)
7069 bnx2x__link_status_update(bp);
7070
a2fbb9ea
ET
7071 /* start the timer */
7072 mod_timer(&bp->timer, jiffies + bp->current_interval);
7073
34f80b04 7074
a2fbb9ea
ET
7075 return 0;
7076
2dfe0e1f
EG
7077load_error3:
7078 bnx2x_int_disable_sync(bp, 1);
7079 if (!BP_NOMCP(bp)) {
7080 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7081 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7082 }
7083 bp->port.pmf = 0;
7a9b2557
VZ
7084 /* Free SKBs, SGEs, TPA pool and driver internals */
7085 bnx2x_free_skbs(bp);
555f6c78 7086 for_each_rx_queue(bp, i)
3196a88a 7087 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7088load_error2:
d1014634
YG
7089 /* Release IRQs */
7090 bnx2x_free_irq(bp);
2dfe0e1f
EG
7091load_error1:
7092 bnx2x_napi_disable(bp);
555f6c78 7093 for_each_rx_queue(bp, i)
7cde1c8b 7094 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7095 bnx2x_free_mem(bp);
7096
34f80b04 7097 return rc;
a2fbb9ea
ET
7098}
7099
7100static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7101{
555f6c78 7102 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7103 int rc;
7104
c14423fe 7105 /* halt the connection */
555f6c78
EG
7106 fp->state = BNX2X_FP_STATE_HALTING;
7107 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7108
34f80b04 7109 /* Wait for completion */
a2fbb9ea 7110 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7111 &(fp->state), 1);
c14423fe 7112 if (rc) /* timeout */
a2fbb9ea
ET
7113 return rc;
7114
7115 /* delete cfc entry */
7116 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7117
34f80b04
EG
7118 /* Wait for completion */
7119 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7120 &(fp->state), 1);
34f80b04 7121 return rc;
a2fbb9ea
ET
7122}
7123
da5a662a 7124static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7125{
4781bfad 7126 __le16 dsb_sp_prod_idx;
c14423fe 7127 /* if the other port is handling traffic,
a2fbb9ea 7128 this can take a lot of time */
34f80b04
EG
7129 int cnt = 500;
7130 int rc;
a2fbb9ea
ET
7131
7132 might_sleep();
7133
7134 /* Send HALT ramrod */
7135 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7136 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7137
34f80b04
EG
7138 /* Wait for completion */
7139 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7140 &(bp->fp[0].state), 1);
7141 if (rc) /* timeout */
da5a662a 7142 return rc;
a2fbb9ea 7143
49d66772 7144 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7145
228241eb 7146 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7147 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7148
49d66772 7149 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7150 we are going to reset the chip anyway
7151 so there is not much to do if this times out
7152 */
34f80b04 7153 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7154 if (!cnt) {
7155 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7156 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7157 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7158#ifdef BNX2X_STOP_ON_ERROR
7159 bnx2x_panic();
7160#endif
36e552ab 7161 rc = -EBUSY;
34f80b04
EG
7162 break;
7163 }
7164 cnt--;
da5a662a 7165 msleep(1);
5650d9d4 7166 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7167 }
7168 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7169 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7170
7171 return rc;
a2fbb9ea
ET
7172}
7173
34f80b04
EG
7174static void bnx2x_reset_func(struct bnx2x *bp)
7175{
7176 int port = BP_PORT(bp);
7177 int func = BP_FUNC(bp);
7178 int base, i;
7179
7180 /* Configure IGU */
7181 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7182 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7183
34f80b04
EG
7184 /* Clear ILT */
7185 base = FUNC_ILT_BASE(func);
7186 for (i = base; i < base + ILT_PER_FUNC; i++)
7187 bnx2x_ilt_wr(bp, i, 0);
7188}
7189
7190static void bnx2x_reset_port(struct bnx2x *bp)
7191{
7192 int port = BP_PORT(bp);
7193 u32 val;
7194
7195 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7196
7197 /* Do not rcv packets to BRB */
7198 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7199 /* Do not direct rcv packets that are not for MCP to the BRB */
7200 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7201 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7202
7203 /* Configure AEU */
7204 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7205
7206 msleep(100);
7207 /* Check for BRB port occupancy */
7208 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7209 if (val)
7210 DP(NETIF_MSG_IFDOWN,
33471629 7211 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7212
7213 /* TODO: Close Doorbell port? */
7214}
7215
34f80b04
EG
7216static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7217{
7218 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7219 BP_FUNC(bp), reset_code);
7220
7221 switch (reset_code) {
7222 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7223 bnx2x_reset_port(bp);
7224 bnx2x_reset_func(bp);
7225 bnx2x_reset_common(bp);
7226 break;
7227
7228 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7229 bnx2x_reset_port(bp);
7230 bnx2x_reset_func(bp);
7231 break;
7232
7233 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7234 bnx2x_reset_func(bp);
7235 break;
49d66772 7236
34f80b04
EG
7237 default:
7238 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7239 break;
7240 }
7241}
7242
33471629 7243/* must be called with rtnl_lock */
34f80b04 7244static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7245{
da5a662a 7246 int port = BP_PORT(bp);
a2fbb9ea 7247 u32 reset_code = 0;
da5a662a 7248 int i, cnt, rc;
a2fbb9ea
ET
7249
7250 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7251
228241eb
ET
7252 bp->rx_mode = BNX2X_RX_MODE_NONE;
7253 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7254
f8ef6e44 7255 bnx2x_netif_stop(bp, 1);
e94d8af3 7256
34f80b04
EG
7257 del_timer_sync(&bp->timer);
7258 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7259 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7260 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7261
70b9986c
EG
7262 /* Release IRQs */
7263 bnx2x_free_irq(bp);
7264
555f6c78
EG
7265 /* Wait until tx fastpath tasks complete */
7266 for_each_tx_queue(bp, i) {
228241eb
ET
7267 struct bnx2x_fastpath *fp = &bp->fp[i];
7268
34f80b04 7269 cnt = 1000;
e8b5fc51 7270 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7271
7961f791 7272 bnx2x_tx_int(fp);
34f80b04
EG
7273 if (!cnt) {
7274 BNX2X_ERR("timeout waiting for queue[%d]\n",
7275 i);
7276#ifdef BNX2X_STOP_ON_ERROR
7277 bnx2x_panic();
7278 return -EBUSY;
7279#else
7280 break;
7281#endif
7282 }
7283 cnt--;
da5a662a 7284 msleep(1);
34f80b04 7285 }
228241eb 7286 }
da5a662a
VZ
7287 /* Give HW time to discard old tx messages */
7288 msleep(1);
a2fbb9ea 7289
3101c2bc
YG
7290 if (CHIP_IS_E1(bp)) {
7291 struct mac_configuration_cmd *config =
7292 bnx2x_sp(bp, mcast_config);
7293
7294 bnx2x_set_mac_addr_e1(bp, 0);
7295
8d9c5f34 7296 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7297 CAM_INVALIDATE(config->config_table[i]);
7298
8d9c5f34 7299 config->hdr.length = i;
3101c2bc
YG
7300 if (CHIP_REV_IS_SLOW(bp))
7301 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7302 else
7303 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7304 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7305 config->hdr.reserved1 = 0;
7306
7307 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7308 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7309 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7310
7311 } else { /* E1H */
65abd74d
YG
7312 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7313
3101c2bc
YG
7314 bnx2x_set_mac_addr_e1h(bp, 0);
7315
7316 for (i = 0; i < MC_HASH_SIZE; i++)
7317 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7318 }
7319
65abd74d
YG
7320 if (unload_mode == UNLOAD_NORMAL)
7321 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7322
7323 else if (bp->flags & NO_WOL_FLAG) {
7324 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7325 if (CHIP_IS_E1H(bp))
7326 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7327
7328 } else if (bp->wol) {
7329 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7330 u8 *mac_addr = bp->dev->dev_addr;
7331 u32 val;
7332 /* The mac address is written to entries 1-4 to
7333 preserve entry 0 which is used by the PMF */
7334 u8 entry = (BP_E1HVN(bp) + 1)*8;
7335
7336 val = (mac_addr[0] << 8) | mac_addr[1];
7337 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7338
7339 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7340 (mac_addr[4] << 8) | mac_addr[5];
7341 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7342
7343 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7344
7345 } else
7346 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7347
34f80b04
EG
7348 /* Close multi and leading connections
7349 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7350 for_each_nondefault_queue(bp, i)
7351 if (bnx2x_stop_multi(bp, i))
228241eb 7352 goto unload_error;
a2fbb9ea 7353
da5a662a
VZ
7354 rc = bnx2x_stop_leading(bp);
7355 if (rc) {
34f80b04 7356 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7357#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7358 return -EBUSY;
da5a662a
VZ
7359#else
7360 goto unload_error;
34f80b04 7361#endif
228241eb
ET
7362 }
7363
7364unload_error:
34f80b04 7365 if (!BP_NOMCP(bp))
228241eb 7366 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7367 else {
f5372251 7368 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7369 load_count[0], load_count[1], load_count[2]);
7370 load_count[0]--;
da5a662a 7371 load_count[1 + port]--;
f5372251 7372 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7373 load_count[0], load_count[1], load_count[2]);
7374 if (load_count[0] == 0)
7375 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7376 else if (load_count[1 + port] == 0)
34f80b04
EG
7377 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7378 else
7379 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7380 }
a2fbb9ea 7381
34f80b04
EG
7382 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7383 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7384 bnx2x__link_reset(bp);
a2fbb9ea
ET
7385
7386 /* Reset the chip */
228241eb 7387 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7388
7389 /* Report UNLOAD_DONE to MCP */
34f80b04 7390 if (!BP_NOMCP(bp))
a2fbb9ea 7391 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7392
9a035440 7393 bp->port.pmf = 0;
a2fbb9ea 7394
7a9b2557 7395 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7396 bnx2x_free_skbs(bp);
555f6c78 7397 for_each_rx_queue(bp, i)
3196a88a 7398 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7399 for_each_rx_queue(bp, i)
7cde1c8b 7400 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7401 bnx2x_free_mem(bp);
7402
7403 bp->state = BNX2X_STATE_CLOSED;
228241eb 7404
a2fbb9ea
ET
7405 netif_carrier_off(bp->dev);
7406
7407 return 0;
7408}
7409
34f80b04
EG
7410static void bnx2x_reset_task(struct work_struct *work)
7411{
7412 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7413
7414#ifdef BNX2X_STOP_ON_ERROR
7415 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7416 " so reset not done to allow debug dump,\n"
7417 KERN_ERR " you will need to reboot when done\n");
7418 return;
7419#endif
7420
7421 rtnl_lock();
7422
7423 if (!netif_running(bp->dev))
7424 goto reset_task_exit;
7425
7426 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7427 bnx2x_nic_load(bp, LOAD_NORMAL);
7428
7429reset_task_exit:
7430 rtnl_unlock();
7431}
7432
a2fbb9ea
ET
7433/* end of nic load/unload */
7434
7435/* ethtool_ops */
7436
7437/*
7438 * Init service functions
7439 */
7440
f1ef27ef
EG
7441static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7442{
7443 switch (func) {
7444 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7445 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7446 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7447 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7448 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7449 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7450 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7451 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7452 default:
7453 BNX2X_ERR("Unsupported function index: %d\n", func);
7454 return (u32)(-1);
7455 }
7456}
7457
7458static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7459{
7460 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7461
7462 /* Flush all outstanding writes */
7463 mmiowb();
7464
7465 /* Pretend to be function 0 */
7466 REG_WR(bp, reg, 0);
7467 /* Flush the GRC transaction (in the chip) */
7468 new_val = REG_RD(bp, reg);
7469 if (new_val != 0) {
7470 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7471 new_val);
7472 BUG();
7473 }
7474
7475 /* From now we are in the "like-E1" mode */
7476 bnx2x_int_disable(bp);
7477
7478 /* Flush all outstanding writes */
7479 mmiowb();
7480
7481 /* Restore the original funtion settings */
7482 REG_WR(bp, reg, orig_func);
7483 new_val = REG_RD(bp, reg);
7484 if (new_val != orig_func) {
7485 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7486 orig_func, new_val);
7487 BUG();
7488 }
7489}
7490
7491static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7492{
7493 if (CHIP_IS_E1H(bp))
7494 bnx2x_undi_int_disable_e1h(bp, func);
7495 else
7496 bnx2x_int_disable(bp);
7497}
7498
34f80b04
EG
7499static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7500{
7501 u32 val;
7502
7503 /* Check if there is any driver already loaded */
7504 val = REG_RD(bp, MISC_REG_UNPREPARED);
7505 if (val == 0x1) {
7506 /* Check if it is the UNDI driver
7507 * UNDI driver initializes CID offset for normal bell to 0x7
7508 */
4a37fb66 7509 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7510 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7511 if (val == 0x7) {
7512 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7513 /* save our func */
34f80b04 7514 int func = BP_FUNC(bp);
da5a662a
VZ
7515 u32 swap_en;
7516 u32 swap_val;
34f80b04 7517
b4661739
EG
7518 /* clear the UNDI indication */
7519 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7520
34f80b04
EG
7521 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7522
7523 /* try unload UNDI on port 0 */
7524 bp->func = 0;
da5a662a
VZ
7525 bp->fw_seq =
7526 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7527 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7528 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7529
7530 /* if UNDI is loaded on the other port */
7531 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7532
da5a662a
VZ
7533 /* send "DONE" for previous unload */
7534 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7535
7536 /* unload UNDI on port 1 */
34f80b04 7537 bp->func = 1;
da5a662a
VZ
7538 bp->fw_seq =
7539 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7540 DRV_MSG_SEQ_NUMBER_MASK);
7541 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7542
7543 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7544 }
7545
b4661739
EG
7546 /* now it's safe to release the lock */
7547 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7548
f1ef27ef 7549 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7550
7551 /* close input traffic and wait for it */
7552 /* Do not rcv packets to BRB */
7553 REG_WR(bp,
7554 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7555 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7556 /* Do not direct rcv packets that are not for MCP to
7557 * the BRB */
7558 REG_WR(bp,
7559 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7560 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7561 /* clear AEU */
7562 REG_WR(bp,
7563 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7564 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7565 msleep(10);
7566
7567 /* save NIG port swap info */
7568 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7569 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7570 /* reset device */
7571 REG_WR(bp,
7572 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7573 0xd3ffffff);
34f80b04
EG
7574 REG_WR(bp,
7575 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7576 0x1403);
da5a662a
VZ
7577 /* take the NIG out of reset and restore swap values */
7578 REG_WR(bp,
7579 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7580 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7581 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7582 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7583
7584 /* send unload done to the MCP */
7585 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7586
7587 /* restore our func and fw_seq */
7588 bp->func = func;
7589 bp->fw_seq =
7590 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7591 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7592
7593 } else
7594 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7595 }
7596}
7597
7598static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7599{
7600 u32 val, val2, val3, val4, id;
72ce58c3 7601 u16 pmc;
34f80b04
EG
7602
7603 /* Get the chip revision id and number. */
7604 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7605 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7606 id = ((val & 0xffff) << 16);
7607 val = REG_RD(bp, MISC_REG_CHIP_REV);
7608 id |= ((val & 0xf) << 12);
7609 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7610 id |= ((val & 0xff) << 4);
5a40e08e 7611 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7612 id |= (val & 0xf);
7613 bp->common.chip_id = id;
7614 bp->link_params.chip_id = bp->common.chip_id;
7615 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7616
1c06328c
EG
7617 val = (REG_RD(bp, 0x2874) & 0x55);
7618 if ((bp->common.chip_id & 0x1) ||
7619 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7620 bp->flags |= ONE_PORT_FLAG;
7621 BNX2X_DEV_INFO("single port device\n");
7622 }
7623
34f80b04
EG
7624 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7625 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7626 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7627 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7628 bp->common.flash_size, bp->common.flash_size);
7629
7630 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7631 bp->link_params.shmem_base = bp->common.shmem_base;
7632 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7633
7634 if (!bp->common.shmem_base ||
7635 (bp->common.shmem_base < 0xA0000) ||
7636 (bp->common.shmem_base >= 0xC0000)) {
7637 BNX2X_DEV_INFO("MCP not active\n");
7638 bp->flags |= NO_MCP_FLAG;
7639 return;
7640 }
7641
7642 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7643 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7644 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7645 BNX2X_ERR("BAD MCP validity signature\n");
7646
7647 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7648 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7649
7650 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7651 SHARED_HW_CFG_LED_MODE_MASK) >>
7652 SHARED_HW_CFG_LED_MODE_SHIFT);
7653
c2c8b03e
EG
7654 bp->link_params.feature_config_flags = 0;
7655 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7656 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7657 bp->link_params.feature_config_flags |=
7658 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7659 else
7660 bp->link_params.feature_config_flags &=
7661 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7662
34f80b04
EG
7663 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7664 bp->common.bc_ver = val;
7665 BNX2X_DEV_INFO("bc_ver %X\n", val);
7666 if (val < BNX2X_BC_VER) {
7667 /* for now only warn
7668 * later we might need to enforce this */
7669 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7670 " please upgrade BC\n", BNX2X_BC_VER, val);
7671 }
72ce58c3
EG
7672
7673 if (BP_E1HVN(bp) == 0) {
7674 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7675 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7676 } else {
7677 /* no WOL capability for E1HVN != 0 */
7678 bp->flags |= NO_WOL_FLAG;
7679 }
7680 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7681 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7682
7683 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7684 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7685 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7686 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7687
7688 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7689 val, val2, val3, val4);
7690}
7691
7692static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7693 u32 switch_cfg)
a2fbb9ea 7694{
34f80b04 7695 int port = BP_PORT(bp);
a2fbb9ea
ET
7696 u32 ext_phy_type;
7697
a2fbb9ea
ET
7698 switch (switch_cfg) {
7699 case SWITCH_CFG_1G:
7700 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7701
c18487ee
YR
7702 ext_phy_type =
7703 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7704 switch (ext_phy_type) {
7705 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7706 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7707 ext_phy_type);
7708
34f80b04
EG
7709 bp->port.supported |= (SUPPORTED_10baseT_Half |
7710 SUPPORTED_10baseT_Full |
7711 SUPPORTED_100baseT_Half |
7712 SUPPORTED_100baseT_Full |
7713 SUPPORTED_1000baseT_Full |
7714 SUPPORTED_2500baseX_Full |
7715 SUPPORTED_TP |
7716 SUPPORTED_FIBRE |
7717 SUPPORTED_Autoneg |
7718 SUPPORTED_Pause |
7719 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7720 break;
7721
7722 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7723 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7724 ext_phy_type);
7725
34f80b04
EG
7726 bp->port.supported |= (SUPPORTED_10baseT_Half |
7727 SUPPORTED_10baseT_Full |
7728 SUPPORTED_100baseT_Half |
7729 SUPPORTED_100baseT_Full |
7730 SUPPORTED_1000baseT_Full |
7731 SUPPORTED_TP |
7732 SUPPORTED_FIBRE |
7733 SUPPORTED_Autoneg |
7734 SUPPORTED_Pause |
7735 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7736 break;
7737
7738 default:
7739 BNX2X_ERR("NVRAM config error. "
7740 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7741 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7742 return;
7743 }
7744
34f80b04
EG
7745 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7746 port*0x10);
7747 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7748 break;
7749
7750 case SWITCH_CFG_10G:
7751 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7752
c18487ee
YR
7753 ext_phy_type =
7754 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7755 switch (ext_phy_type) {
7756 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7757 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7758 ext_phy_type);
7759
34f80b04
EG
7760 bp->port.supported |= (SUPPORTED_10baseT_Half |
7761 SUPPORTED_10baseT_Full |
7762 SUPPORTED_100baseT_Half |
7763 SUPPORTED_100baseT_Full |
7764 SUPPORTED_1000baseT_Full |
7765 SUPPORTED_2500baseX_Full |
7766 SUPPORTED_10000baseT_Full |
7767 SUPPORTED_TP |
7768 SUPPORTED_FIBRE |
7769 SUPPORTED_Autoneg |
7770 SUPPORTED_Pause |
7771 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7772 break;
7773
589abe3a
EG
7774 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7775 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7776 ext_phy_type);
f1410647 7777
34f80b04 7778 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7779 SUPPORTED_1000baseT_Full |
34f80b04 7780 SUPPORTED_FIBRE |
589abe3a 7781 SUPPORTED_Autoneg |
34f80b04
EG
7782 SUPPORTED_Pause |
7783 SUPPORTED_Asym_Pause);
f1410647
ET
7784 break;
7785
589abe3a
EG
7786 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7787 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7788 ext_phy_type);
7789
34f80b04 7790 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7791 SUPPORTED_2500baseX_Full |
34f80b04 7792 SUPPORTED_1000baseT_Full |
589abe3a
EG
7793 SUPPORTED_FIBRE |
7794 SUPPORTED_Autoneg |
7795 SUPPORTED_Pause |
7796 SUPPORTED_Asym_Pause);
7797 break;
7798
7799 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7800 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7801 ext_phy_type);
7802
7803 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7804 SUPPORTED_FIBRE |
7805 SUPPORTED_Pause |
7806 SUPPORTED_Asym_Pause);
f1410647
ET
7807 break;
7808
589abe3a
EG
7809 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7810 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7811 ext_phy_type);
7812
34f80b04
EG
7813 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7814 SUPPORTED_1000baseT_Full |
7815 SUPPORTED_FIBRE |
34f80b04
EG
7816 SUPPORTED_Pause |
7817 SUPPORTED_Asym_Pause);
f1410647
ET
7818 break;
7819
589abe3a
EG
7820 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7821 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7822 ext_phy_type);
7823
34f80b04 7824 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7825 SUPPORTED_1000baseT_Full |
34f80b04 7826 SUPPORTED_Autoneg |
589abe3a 7827 SUPPORTED_FIBRE |
34f80b04
EG
7828 SUPPORTED_Pause |
7829 SUPPORTED_Asym_Pause);
c18487ee
YR
7830 break;
7831
f1410647
ET
7832 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7833 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7834 ext_phy_type);
7835
34f80b04
EG
7836 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7837 SUPPORTED_TP |
7838 SUPPORTED_Autoneg |
7839 SUPPORTED_Pause |
7840 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7841 break;
7842
28577185
EG
7843 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7844 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7845 ext_phy_type);
7846
7847 bp->port.supported |= (SUPPORTED_10baseT_Half |
7848 SUPPORTED_10baseT_Full |
7849 SUPPORTED_100baseT_Half |
7850 SUPPORTED_100baseT_Full |
7851 SUPPORTED_1000baseT_Full |
7852 SUPPORTED_10000baseT_Full |
7853 SUPPORTED_TP |
7854 SUPPORTED_Autoneg |
7855 SUPPORTED_Pause |
7856 SUPPORTED_Asym_Pause);
7857 break;
7858
c18487ee
YR
7859 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7860 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7861 bp->link_params.ext_phy_config);
7862 break;
7863
a2fbb9ea
ET
7864 default:
7865 BNX2X_ERR("NVRAM config error. "
7866 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7867 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7868 return;
7869 }
7870
34f80b04
EG
7871 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7872 port*0x18);
7873 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7874
a2fbb9ea
ET
7875 break;
7876
7877 default:
7878 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7879 bp->port.link_config);
a2fbb9ea
ET
7880 return;
7881 }
34f80b04 7882 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7883
7884 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7885 if (!(bp->link_params.speed_cap_mask &
7886 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7887 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7888
c18487ee
YR
7889 if (!(bp->link_params.speed_cap_mask &
7890 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7891 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7892
c18487ee
YR
7893 if (!(bp->link_params.speed_cap_mask &
7894 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7895 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7896
c18487ee
YR
7897 if (!(bp->link_params.speed_cap_mask &
7898 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7899 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7900
c18487ee
YR
7901 if (!(bp->link_params.speed_cap_mask &
7902 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7903 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7904 SUPPORTED_1000baseT_Full);
a2fbb9ea 7905
c18487ee
YR
7906 if (!(bp->link_params.speed_cap_mask &
7907 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7908 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7909
c18487ee
YR
7910 if (!(bp->link_params.speed_cap_mask &
7911 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7912 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7913
34f80b04 7914 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7915}
7916
34f80b04 7917static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7918{
c18487ee 7919 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7920
34f80b04 7921 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7922 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7923 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7924 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7925 bp->port.advertising = bp->port.supported;
a2fbb9ea 7926 } else {
c18487ee
YR
7927 u32 ext_phy_type =
7928 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7929
7930 if ((ext_phy_type ==
7931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7932 (ext_phy_type ==
7933 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7934 /* force 10G, no AN */
c18487ee 7935 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7936 bp->port.advertising =
a2fbb9ea
ET
7937 (ADVERTISED_10000baseT_Full |
7938 ADVERTISED_FIBRE);
7939 break;
7940 }
7941 BNX2X_ERR("NVRAM config error. "
7942 "Invalid link_config 0x%x"
7943 " Autoneg not supported\n",
34f80b04 7944 bp->port.link_config);
a2fbb9ea
ET
7945 return;
7946 }
7947 break;
7948
7949 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7950 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7951 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7952 bp->port.advertising = (ADVERTISED_10baseT_Full |
7953 ADVERTISED_TP);
a2fbb9ea
ET
7954 } else {
7955 BNX2X_ERR("NVRAM config error. "
7956 "Invalid link_config 0x%x"
7957 " speed_cap_mask 0x%x\n",
34f80b04 7958 bp->port.link_config,
c18487ee 7959 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7960 return;
7961 }
7962 break;
7963
7964 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7965 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7966 bp->link_params.req_line_speed = SPEED_10;
7967 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7968 bp->port.advertising = (ADVERTISED_10baseT_Half |
7969 ADVERTISED_TP);
a2fbb9ea
ET
7970 } else {
7971 BNX2X_ERR("NVRAM config error. "
7972 "Invalid link_config 0x%x"
7973 " speed_cap_mask 0x%x\n",
34f80b04 7974 bp->port.link_config,
c18487ee 7975 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7976 return;
7977 }
7978 break;
7979
7980 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7981 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7982 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7983 bp->port.advertising = (ADVERTISED_100baseT_Full |
7984 ADVERTISED_TP);
a2fbb9ea
ET
7985 } else {
7986 BNX2X_ERR("NVRAM config error. "
7987 "Invalid link_config 0x%x"
7988 " speed_cap_mask 0x%x\n",
34f80b04 7989 bp->port.link_config,
c18487ee 7990 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7991 return;
7992 }
7993 break;
7994
7995 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7996 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7997 bp->link_params.req_line_speed = SPEED_100;
7998 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7999 bp->port.advertising = (ADVERTISED_100baseT_Half |
8000 ADVERTISED_TP);
a2fbb9ea
ET
8001 } else {
8002 BNX2X_ERR("NVRAM config error. "
8003 "Invalid link_config 0x%x"
8004 " speed_cap_mask 0x%x\n",
34f80b04 8005 bp->port.link_config,
c18487ee 8006 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8007 return;
8008 }
8009 break;
8010
8011 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8012 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8013 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8014 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8015 ADVERTISED_TP);
a2fbb9ea
ET
8016 } else {
8017 BNX2X_ERR("NVRAM config error. "
8018 "Invalid link_config 0x%x"
8019 " speed_cap_mask 0x%x\n",
34f80b04 8020 bp->port.link_config,
c18487ee 8021 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8022 return;
8023 }
8024 break;
8025
8026 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8027 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8028 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8029 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8030 ADVERTISED_TP);
a2fbb9ea
ET
8031 } else {
8032 BNX2X_ERR("NVRAM config error. "
8033 "Invalid link_config 0x%x"
8034 " speed_cap_mask 0x%x\n",
34f80b04 8035 bp->port.link_config,
c18487ee 8036 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8037 return;
8038 }
8039 break;
8040
8041 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8042 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8043 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8044 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8045 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8046 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8047 ADVERTISED_FIBRE);
a2fbb9ea
ET
8048 } else {
8049 BNX2X_ERR("NVRAM config error. "
8050 "Invalid link_config 0x%x"
8051 " speed_cap_mask 0x%x\n",
34f80b04 8052 bp->port.link_config,
c18487ee 8053 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8054 return;
8055 }
8056 break;
8057
8058 default:
8059 BNX2X_ERR("NVRAM config error. "
8060 "BAD link speed link_config 0x%x\n",
34f80b04 8061 bp->port.link_config);
c18487ee 8062 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8063 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8064 break;
8065 }
a2fbb9ea 8066
34f80b04
EG
8067 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8068 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8069 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8070 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8071 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8072
c18487ee 8073 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8074 " advertising 0x%x\n",
c18487ee
YR
8075 bp->link_params.req_line_speed,
8076 bp->link_params.req_duplex,
34f80b04 8077 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8078}
8079
34f80b04 8080static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8081{
34f80b04
EG
8082 int port = BP_PORT(bp);
8083 u32 val, val2;
589abe3a 8084 u32 config;
c2c8b03e 8085 u16 i;
a2fbb9ea 8086
c18487ee 8087 bp->link_params.bp = bp;
34f80b04 8088 bp->link_params.port = port;
c18487ee 8089
c18487ee 8090 bp->link_params.lane_config =
a2fbb9ea 8091 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8092 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8093 SHMEM_RD(bp,
8094 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8095 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8096 SHMEM_RD(bp,
8097 dev_info.port_hw_config[port].speed_capability_mask);
8098
34f80b04 8099 bp->port.link_config =
a2fbb9ea
ET
8100 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8101
c2c8b03e
EG
8102 /* Get the 4 lanes xgxs config rx and tx */
8103 for (i = 0; i < 2; i++) {
8104 val = SHMEM_RD(bp,
8105 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8106 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8107 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8108
8109 val = SHMEM_RD(bp,
8110 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8111 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8112 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8113 }
8114
589abe3a
EG
8115 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8116 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8117 bp->link_params.feature_config_flags |=
8118 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8119 else
8120 bp->link_params.feature_config_flags &=
8121 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8122
3ce2c3f9
EG
8123 /* If the device is capable of WoL, set the default state according
8124 * to the HW
8125 */
8126 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8127 (config & PORT_FEATURE_WOL_ENABLED));
8128
c2c8b03e
EG
8129 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8130 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8131 bp->link_params.lane_config,
8132 bp->link_params.ext_phy_config,
34f80b04 8133 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8134
34f80b04 8135 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8136 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8137 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8138
8139 bnx2x_link_settings_requested(bp);
8140
8141 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8142 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8143 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8144 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8145 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8146 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8147 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8148 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8149 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8150 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8151}
8152
8153static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8154{
8155 int func = BP_FUNC(bp);
8156 u32 val, val2;
8157 int rc = 0;
a2fbb9ea 8158
34f80b04 8159 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8160
34f80b04
EG
8161 bp->e1hov = 0;
8162 bp->e1hmf = 0;
8163 if (CHIP_IS_E1H(bp)) {
8164 bp->mf_config =
8165 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8166
3196a88a
EG
8167 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8168 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8169 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8170
34f80b04
EG
8171 bp->e1hov = val;
8172 bp->e1hmf = 1;
8173 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8174 "(0x%04x)\n",
8175 func, bp->e1hov, bp->e1hov);
8176 } else {
f5372251 8177 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8178 if (BP_E1HVN(bp)) {
8179 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8180 " aborting\n", func);
8181 rc = -EPERM;
8182 }
8183 }
8184 }
a2fbb9ea 8185
34f80b04
EG
8186 if (!BP_NOMCP(bp)) {
8187 bnx2x_get_port_hwinfo(bp);
8188
8189 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8190 DRV_MSG_SEQ_NUMBER_MASK);
8191 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8192 }
8193
8194 if (IS_E1HMF(bp)) {
8195 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8196 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8197 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8198 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8199 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8200 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8201 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8202 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8203 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8204 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8205 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8206 ETH_ALEN);
8207 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8208 ETH_ALEN);
a2fbb9ea 8209 }
34f80b04
EG
8210
8211 return rc;
a2fbb9ea
ET
8212 }
8213
34f80b04
EG
8214 if (BP_NOMCP(bp)) {
8215 /* only supposed to happen on emulation/FPGA */
33471629 8216 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8217 random_ether_addr(bp->dev->dev_addr);
8218 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8219 }
a2fbb9ea 8220
34f80b04
EG
8221 return rc;
8222}
8223
8224static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8225{
8226 int func = BP_FUNC(bp);
87942b46 8227 int timer_interval;
34f80b04
EG
8228 int rc;
8229
da5a662a
VZ
8230 /* Disable interrupt handling until HW is initialized */
8231 atomic_set(&bp->intr_sem, 1);
8232
34f80b04 8233 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8234
1cf167f2 8235 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8236 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8237
8238 rc = bnx2x_get_hwinfo(bp);
8239
8240 /* need to reset chip if undi was active */
8241 if (!BP_NOMCP(bp))
8242 bnx2x_undi_unload(bp);
8243
8244 if (CHIP_REV_IS_FPGA(bp))
8245 printk(KERN_ERR PFX "FPGA detected\n");
8246
8247 if (BP_NOMCP(bp) && (func == 0))
8248 printk(KERN_ERR PFX
8249 "MCP disabled, must load devices in order!\n");
8250
555f6c78 8251 /* Set multi queue mode */
8badd27a
EG
8252 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8253 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8254 printk(KERN_ERR PFX
8badd27a 8255 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8256 multi_mode = ETH_RSS_MODE_DISABLED;
8257 }
8258 bp->multi_mode = multi_mode;
8259
8260
7a9b2557
VZ
8261 /* Set TPA flags */
8262 if (disable_tpa) {
8263 bp->flags &= ~TPA_ENABLE_FLAG;
8264 bp->dev->features &= ~NETIF_F_LRO;
8265 } else {
8266 bp->flags |= TPA_ENABLE_FLAG;
8267 bp->dev->features |= NETIF_F_LRO;
8268 }
8269
8d5726c4 8270 bp->mrrs = mrrs;
7a9b2557 8271
34f80b04
EG
8272 bp->tx_ring_size = MAX_TX_AVAIL;
8273 bp->rx_ring_size = MAX_RX_AVAIL;
8274
8275 bp->rx_csum = 1;
34f80b04
EG
8276
8277 bp->tx_ticks = 50;
8278 bp->rx_ticks = 25;
8279
87942b46
EG
8280 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8281 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8282
8283 init_timer(&bp->timer);
8284 bp->timer.expires = jiffies + bp->current_interval;
8285 bp->timer.data = (unsigned long) bp;
8286 bp->timer.function = bnx2x_timer;
8287
8288 return rc;
a2fbb9ea
ET
8289}
8290
8291/*
8292 * ethtool service functions
8293 */
8294
8295/* All ethtool functions called with rtnl_lock */
8296
8297static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8298{
8299 struct bnx2x *bp = netdev_priv(dev);
8300
34f80b04
EG
8301 cmd->supported = bp->port.supported;
8302 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8303
8304 if (netif_carrier_ok(dev)) {
c18487ee
YR
8305 cmd->speed = bp->link_vars.line_speed;
8306 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8307 } else {
c18487ee
YR
8308 cmd->speed = bp->link_params.req_line_speed;
8309 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8310 }
34f80b04
EG
8311 if (IS_E1HMF(bp)) {
8312 u16 vn_max_rate;
8313
8314 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8315 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8316 if (vn_max_rate < cmd->speed)
8317 cmd->speed = vn_max_rate;
8318 }
a2fbb9ea 8319
c18487ee
YR
8320 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8321 u32 ext_phy_type =
8322 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8323
8324 switch (ext_phy_type) {
8325 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8326 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8327 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8328 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8329 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8330 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8331 cmd->port = PORT_FIBRE;
8332 break;
8333
8334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8335 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8336 cmd->port = PORT_TP;
8337 break;
8338
c18487ee
YR
8339 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8340 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8341 bp->link_params.ext_phy_config);
8342 break;
8343
f1410647
ET
8344 default:
8345 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8346 bp->link_params.ext_phy_config);
8347 break;
f1410647
ET
8348 }
8349 } else
a2fbb9ea 8350 cmd->port = PORT_TP;
a2fbb9ea 8351
34f80b04 8352 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8353 cmd->transceiver = XCVR_INTERNAL;
8354
c18487ee 8355 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8356 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8357 else
a2fbb9ea 8358 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8359
8360 cmd->maxtxpkt = 0;
8361 cmd->maxrxpkt = 0;
8362
8363 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8364 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8365 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8366 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8367 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8368 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8369 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8370
8371 return 0;
8372}
8373
8374static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8375{
8376 struct bnx2x *bp = netdev_priv(dev);
8377 u32 advertising;
8378
34f80b04
EG
8379 if (IS_E1HMF(bp))
8380 return 0;
8381
a2fbb9ea
ET
8382 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8383 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8384 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8385 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8386 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8387 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8388 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8389
a2fbb9ea 8390 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8391 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8392 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8393 return -EINVAL;
f1410647 8394 }
a2fbb9ea
ET
8395
8396 /* advertise the requested speed and duplex if supported */
34f80b04 8397 cmd->advertising &= bp->port.supported;
a2fbb9ea 8398
c18487ee
YR
8399 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8400 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8401 bp->port.advertising |= (ADVERTISED_Autoneg |
8402 cmd->advertising);
a2fbb9ea
ET
8403
8404 } else { /* forced speed */
8405 /* advertise the requested speed and duplex if supported */
8406 switch (cmd->speed) {
8407 case SPEED_10:
8408 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8409 if (!(bp->port.supported &
f1410647
ET
8410 SUPPORTED_10baseT_Full)) {
8411 DP(NETIF_MSG_LINK,
8412 "10M full not supported\n");
a2fbb9ea 8413 return -EINVAL;
f1410647 8414 }
a2fbb9ea
ET
8415
8416 advertising = (ADVERTISED_10baseT_Full |
8417 ADVERTISED_TP);
8418 } else {
34f80b04 8419 if (!(bp->port.supported &
f1410647
ET
8420 SUPPORTED_10baseT_Half)) {
8421 DP(NETIF_MSG_LINK,
8422 "10M half not supported\n");
a2fbb9ea 8423 return -EINVAL;
f1410647 8424 }
a2fbb9ea
ET
8425
8426 advertising = (ADVERTISED_10baseT_Half |
8427 ADVERTISED_TP);
8428 }
8429 break;
8430
8431 case SPEED_100:
8432 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8433 if (!(bp->port.supported &
f1410647
ET
8434 SUPPORTED_100baseT_Full)) {
8435 DP(NETIF_MSG_LINK,
8436 "100M full not supported\n");
a2fbb9ea 8437 return -EINVAL;
f1410647 8438 }
a2fbb9ea
ET
8439
8440 advertising = (ADVERTISED_100baseT_Full |
8441 ADVERTISED_TP);
8442 } else {
34f80b04 8443 if (!(bp->port.supported &
f1410647
ET
8444 SUPPORTED_100baseT_Half)) {
8445 DP(NETIF_MSG_LINK,
8446 "100M half not supported\n");
a2fbb9ea 8447 return -EINVAL;
f1410647 8448 }
a2fbb9ea
ET
8449
8450 advertising = (ADVERTISED_100baseT_Half |
8451 ADVERTISED_TP);
8452 }
8453 break;
8454
8455 case SPEED_1000:
f1410647
ET
8456 if (cmd->duplex != DUPLEX_FULL) {
8457 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8458 return -EINVAL;
f1410647 8459 }
a2fbb9ea 8460
34f80b04 8461 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8462 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8463 return -EINVAL;
f1410647 8464 }
a2fbb9ea
ET
8465
8466 advertising = (ADVERTISED_1000baseT_Full |
8467 ADVERTISED_TP);
8468 break;
8469
8470 case SPEED_2500:
f1410647
ET
8471 if (cmd->duplex != DUPLEX_FULL) {
8472 DP(NETIF_MSG_LINK,
8473 "2.5G half not supported\n");
a2fbb9ea 8474 return -EINVAL;
f1410647 8475 }
a2fbb9ea 8476
34f80b04 8477 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8478 DP(NETIF_MSG_LINK,
8479 "2.5G full not supported\n");
a2fbb9ea 8480 return -EINVAL;
f1410647 8481 }
a2fbb9ea 8482
f1410647 8483 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8484 ADVERTISED_TP);
8485 break;
8486
8487 case SPEED_10000:
f1410647
ET
8488 if (cmd->duplex != DUPLEX_FULL) {
8489 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8490 return -EINVAL;
f1410647 8491 }
a2fbb9ea 8492
34f80b04 8493 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8494 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8495 return -EINVAL;
f1410647 8496 }
a2fbb9ea
ET
8497
8498 advertising = (ADVERTISED_10000baseT_Full |
8499 ADVERTISED_FIBRE);
8500 break;
8501
8502 default:
f1410647 8503 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8504 return -EINVAL;
8505 }
8506
c18487ee
YR
8507 bp->link_params.req_line_speed = cmd->speed;
8508 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8509 bp->port.advertising = advertising;
a2fbb9ea
ET
8510 }
8511
c18487ee 8512 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8513 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8514 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8515 bp->port.advertising);
a2fbb9ea 8516
34f80b04 8517 if (netif_running(dev)) {
bb2a0f7a 8518 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8519 bnx2x_link_set(bp);
8520 }
a2fbb9ea
ET
8521
8522 return 0;
8523}
8524
c18487ee
YR
8525#define PHY_FW_VER_LEN 10
8526
a2fbb9ea
ET
8527static void bnx2x_get_drvinfo(struct net_device *dev,
8528 struct ethtool_drvinfo *info)
8529{
8530 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8531 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8532
8533 strcpy(info->driver, DRV_MODULE_NAME);
8534 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8535
8536 phy_fw_ver[0] = '\0';
34f80b04 8537 if (bp->port.pmf) {
4a37fb66 8538 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8539 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8540 (bp->state != BNX2X_STATE_CLOSED),
8541 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8542 bnx2x_release_phy_lock(bp);
34f80b04 8543 }
c18487ee 8544
f0e53a84
EG
8545 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8546 (bp->common.bc_ver & 0xff0000) >> 16,
8547 (bp->common.bc_ver & 0xff00) >> 8,
8548 (bp->common.bc_ver & 0xff),
8549 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8550 strcpy(info->bus_info, pci_name(bp->pdev));
8551 info->n_stats = BNX2X_NUM_STATS;
8552 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8553 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8554 info->regdump_len = 0;
8555}
8556
0a64ea57
EG
8557#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8558#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8559
8560static int bnx2x_get_regs_len(struct net_device *dev)
8561{
8562 static u32 regdump_len;
8563 struct bnx2x *bp = netdev_priv(dev);
8564 int i;
8565
8566 if (regdump_len)
8567 return regdump_len;
8568
8569 if (CHIP_IS_E1(bp)) {
8570 for (i = 0; i < REGS_COUNT; i++)
8571 if (IS_E1_ONLINE(reg_addrs[i].info))
8572 regdump_len += reg_addrs[i].size;
8573
8574 for (i = 0; i < WREGS_COUNT_E1; i++)
8575 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8576 regdump_len += wreg_addrs_e1[i].size *
8577 (1 + wreg_addrs_e1[i].read_regs_count);
8578
8579 } else { /* E1H */
8580 for (i = 0; i < REGS_COUNT; i++)
8581 if (IS_E1H_ONLINE(reg_addrs[i].info))
8582 regdump_len += reg_addrs[i].size;
8583
8584 for (i = 0; i < WREGS_COUNT_E1H; i++)
8585 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8586 regdump_len += wreg_addrs_e1h[i].size *
8587 (1 + wreg_addrs_e1h[i].read_regs_count);
8588 }
8589 regdump_len *= 4;
8590 regdump_len += sizeof(struct dump_hdr);
8591
8592 return regdump_len;
8593}
8594
8595static void bnx2x_get_regs(struct net_device *dev,
8596 struct ethtool_regs *regs, void *_p)
8597{
8598 u32 *p = _p, i, j;
8599 struct bnx2x *bp = netdev_priv(dev);
8600 struct dump_hdr dump_hdr = {0};
8601
8602 regs->version = 0;
8603 memset(p, 0, regs->len);
8604
8605 if (!netif_running(bp->dev))
8606 return;
8607
8608 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8609 dump_hdr.dump_sign = dump_sign_all;
8610 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8611 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8612 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8613 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8614 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8615
8616 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8617 p += dump_hdr.hdr_size + 1;
8618
8619 if (CHIP_IS_E1(bp)) {
8620 for (i = 0; i < REGS_COUNT; i++)
8621 if (IS_E1_ONLINE(reg_addrs[i].info))
8622 for (j = 0; j < reg_addrs[i].size; j++)
8623 *p++ = REG_RD(bp,
8624 reg_addrs[i].addr + j*4);
8625
8626 } else { /* E1H */
8627 for (i = 0; i < REGS_COUNT; i++)
8628 if (IS_E1H_ONLINE(reg_addrs[i].info))
8629 for (j = 0; j < reg_addrs[i].size; j++)
8630 *p++ = REG_RD(bp,
8631 reg_addrs[i].addr + j*4);
8632 }
8633}
8634
a2fbb9ea
ET
8635static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8636{
8637 struct bnx2x *bp = netdev_priv(dev);
8638
8639 if (bp->flags & NO_WOL_FLAG) {
8640 wol->supported = 0;
8641 wol->wolopts = 0;
8642 } else {
8643 wol->supported = WAKE_MAGIC;
8644 if (bp->wol)
8645 wol->wolopts = WAKE_MAGIC;
8646 else
8647 wol->wolopts = 0;
8648 }
8649 memset(&wol->sopass, 0, sizeof(wol->sopass));
8650}
8651
8652static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8653{
8654 struct bnx2x *bp = netdev_priv(dev);
8655
8656 if (wol->wolopts & ~WAKE_MAGIC)
8657 return -EINVAL;
8658
8659 if (wol->wolopts & WAKE_MAGIC) {
8660 if (bp->flags & NO_WOL_FLAG)
8661 return -EINVAL;
8662
8663 bp->wol = 1;
34f80b04 8664 } else
a2fbb9ea 8665 bp->wol = 0;
34f80b04 8666
a2fbb9ea
ET
8667 return 0;
8668}
8669
8670static u32 bnx2x_get_msglevel(struct net_device *dev)
8671{
8672 struct bnx2x *bp = netdev_priv(dev);
8673
8674 return bp->msglevel;
8675}
8676
8677static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8678{
8679 struct bnx2x *bp = netdev_priv(dev);
8680
8681 if (capable(CAP_NET_ADMIN))
8682 bp->msglevel = level;
8683}
8684
8685static int bnx2x_nway_reset(struct net_device *dev)
8686{
8687 struct bnx2x *bp = netdev_priv(dev);
8688
34f80b04
EG
8689 if (!bp->port.pmf)
8690 return 0;
a2fbb9ea 8691
34f80b04 8692 if (netif_running(dev)) {
bb2a0f7a 8693 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8694 bnx2x_link_set(bp);
8695 }
a2fbb9ea
ET
8696
8697 return 0;
8698}
8699
01e53298
NO
8700static u32
8701bnx2x_get_link(struct net_device *dev)
8702{
8703 struct bnx2x *bp = netdev_priv(dev);
8704
8705 return bp->link_vars.link_up;
8706}
8707
a2fbb9ea
ET
8708static int bnx2x_get_eeprom_len(struct net_device *dev)
8709{
8710 struct bnx2x *bp = netdev_priv(dev);
8711
34f80b04 8712 return bp->common.flash_size;
a2fbb9ea
ET
8713}
8714
8715static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8716{
34f80b04 8717 int port = BP_PORT(bp);
a2fbb9ea
ET
8718 int count, i;
8719 u32 val = 0;
8720
8721 /* adjust timeout for emulation/FPGA */
8722 count = NVRAM_TIMEOUT_COUNT;
8723 if (CHIP_REV_IS_SLOW(bp))
8724 count *= 100;
8725
8726 /* request access to nvram interface */
8727 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8728 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8729
8730 for (i = 0; i < count*10; i++) {
8731 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8732 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8733 break;
8734
8735 udelay(5);
8736 }
8737
8738 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8739 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8740 return -EBUSY;
8741 }
8742
8743 return 0;
8744}
8745
8746static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8747{
34f80b04 8748 int port = BP_PORT(bp);
a2fbb9ea
ET
8749 int count, i;
8750 u32 val = 0;
8751
8752 /* adjust timeout for emulation/FPGA */
8753 count = NVRAM_TIMEOUT_COUNT;
8754 if (CHIP_REV_IS_SLOW(bp))
8755 count *= 100;
8756
8757 /* relinquish nvram interface */
8758 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8759 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8760
8761 for (i = 0; i < count*10; i++) {
8762 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8763 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8764 break;
8765
8766 udelay(5);
8767 }
8768
8769 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8770 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8771 return -EBUSY;
8772 }
8773
8774 return 0;
8775}
8776
8777static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8778{
8779 u32 val;
8780
8781 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8782
8783 /* enable both bits, even on read */
8784 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8785 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8786 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8787}
8788
8789static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8790{
8791 u32 val;
8792
8793 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8794
8795 /* disable both bits, even after read */
8796 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8797 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8798 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8799}
8800
4781bfad 8801static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8802 u32 cmd_flags)
8803{
f1410647 8804 int count, i, rc;
a2fbb9ea
ET
8805 u32 val;
8806
8807 /* build the command word */
8808 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8809
8810 /* need to clear DONE bit separately */
8811 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8812
8813 /* address of the NVRAM to read from */
8814 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8815 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8816
8817 /* issue a read command */
8818 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8819
8820 /* adjust timeout for emulation/FPGA */
8821 count = NVRAM_TIMEOUT_COUNT;
8822 if (CHIP_REV_IS_SLOW(bp))
8823 count *= 100;
8824
8825 /* wait for completion */
8826 *ret_val = 0;
8827 rc = -EBUSY;
8828 for (i = 0; i < count; i++) {
8829 udelay(5);
8830 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8831
8832 if (val & MCPR_NVM_COMMAND_DONE) {
8833 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8834 /* we read nvram data in cpu order
8835 * but ethtool sees it as an array of bytes
8836 * converting to big-endian will do the work */
4781bfad 8837 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8838 rc = 0;
8839 break;
8840 }
8841 }
8842
8843 return rc;
8844}
8845
8846static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8847 int buf_size)
8848{
8849 int rc;
8850 u32 cmd_flags;
4781bfad 8851 __be32 val;
a2fbb9ea
ET
8852
8853 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8854 DP(BNX2X_MSG_NVM,
c14423fe 8855 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8856 offset, buf_size);
8857 return -EINVAL;
8858 }
8859
34f80b04
EG
8860 if (offset + buf_size > bp->common.flash_size) {
8861 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8862 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8863 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8864 return -EINVAL;
8865 }
8866
8867 /* request access to nvram interface */
8868 rc = bnx2x_acquire_nvram_lock(bp);
8869 if (rc)
8870 return rc;
8871
8872 /* enable access to nvram interface */
8873 bnx2x_enable_nvram_access(bp);
8874
8875 /* read the first word(s) */
8876 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8877 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8878 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8879 memcpy(ret_buf, &val, 4);
8880
8881 /* advance to the next dword */
8882 offset += sizeof(u32);
8883 ret_buf += sizeof(u32);
8884 buf_size -= sizeof(u32);
8885 cmd_flags = 0;
8886 }
8887
8888 if (rc == 0) {
8889 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8890 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8891 memcpy(ret_buf, &val, 4);
8892 }
8893
8894 /* disable access to nvram interface */
8895 bnx2x_disable_nvram_access(bp);
8896 bnx2x_release_nvram_lock(bp);
8897
8898 return rc;
8899}
8900
8901static int bnx2x_get_eeprom(struct net_device *dev,
8902 struct ethtool_eeprom *eeprom, u8 *eebuf)
8903{
8904 struct bnx2x *bp = netdev_priv(dev);
8905 int rc;
8906
2add3acb
EG
8907 if (!netif_running(dev))
8908 return -EAGAIN;
8909
34f80b04 8910 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8911 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8912 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8913 eeprom->len, eeprom->len);
8914
8915 /* parameters already validated in ethtool_get_eeprom */
8916
8917 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8918
8919 return rc;
8920}
8921
8922static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8923 u32 cmd_flags)
8924{
f1410647 8925 int count, i, rc;
a2fbb9ea
ET
8926
8927 /* build the command word */
8928 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8929
8930 /* need to clear DONE bit separately */
8931 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8932
8933 /* write the data */
8934 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8935
8936 /* address of the NVRAM to write to */
8937 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8938 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8939
8940 /* issue the write command */
8941 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8942
8943 /* adjust timeout for emulation/FPGA */
8944 count = NVRAM_TIMEOUT_COUNT;
8945 if (CHIP_REV_IS_SLOW(bp))
8946 count *= 100;
8947
8948 /* wait for completion */
8949 rc = -EBUSY;
8950 for (i = 0; i < count; i++) {
8951 udelay(5);
8952 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8953 if (val & MCPR_NVM_COMMAND_DONE) {
8954 rc = 0;
8955 break;
8956 }
8957 }
8958
8959 return rc;
8960}
8961
f1410647 8962#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8963
8964static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8965 int buf_size)
8966{
8967 int rc;
8968 u32 cmd_flags;
8969 u32 align_offset;
4781bfad 8970 __be32 val;
a2fbb9ea 8971
34f80b04
EG
8972 if (offset + buf_size > bp->common.flash_size) {
8973 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8974 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8975 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8976 return -EINVAL;
8977 }
8978
8979 /* request access to nvram interface */
8980 rc = bnx2x_acquire_nvram_lock(bp);
8981 if (rc)
8982 return rc;
8983
8984 /* enable access to nvram interface */
8985 bnx2x_enable_nvram_access(bp);
8986
8987 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8988 align_offset = (offset & ~0x03);
8989 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8990
8991 if (rc == 0) {
8992 val &= ~(0xff << BYTE_OFFSET(offset));
8993 val |= (*data_buf << BYTE_OFFSET(offset));
8994
8995 /* nvram data is returned as an array of bytes
8996 * convert it back to cpu order */
8997 val = be32_to_cpu(val);
8998
a2fbb9ea
ET
8999 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9000 cmd_flags);
9001 }
9002
9003 /* disable access to nvram interface */
9004 bnx2x_disable_nvram_access(bp);
9005 bnx2x_release_nvram_lock(bp);
9006
9007 return rc;
9008}
9009
9010static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9011 int buf_size)
9012{
9013 int rc;
9014 u32 cmd_flags;
9015 u32 val;
9016 u32 written_so_far;
9017
34f80b04 9018 if (buf_size == 1) /* ethtool */
a2fbb9ea 9019 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9020
9021 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9022 DP(BNX2X_MSG_NVM,
c14423fe 9023 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9024 offset, buf_size);
9025 return -EINVAL;
9026 }
9027
34f80b04
EG
9028 if (offset + buf_size > bp->common.flash_size) {
9029 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9030 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9031 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9032 return -EINVAL;
9033 }
9034
9035 /* request access to nvram interface */
9036 rc = bnx2x_acquire_nvram_lock(bp);
9037 if (rc)
9038 return rc;
9039
9040 /* enable access to nvram interface */
9041 bnx2x_enable_nvram_access(bp);
9042
9043 written_so_far = 0;
9044 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9045 while ((written_so_far < buf_size) && (rc == 0)) {
9046 if (written_so_far == (buf_size - sizeof(u32)))
9047 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9048 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9049 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9050 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9051 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9052
9053 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9054
9055 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9056
9057 /* advance to the next dword */
9058 offset += sizeof(u32);
9059 data_buf += sizeof(u32);
9060 written_so_far += sizeof(u32);
9061 cmd_flags = 0;
9062 }
9063
9064 /* disable access to nvram interface */
9065 bnx2x_disable_nvram_access(bp);
9066 bnx2x_release_nvram_lock(bp);
9067
9068 return rc;
9069}
9070
9071static int bnx2x_set_eeprom(struct net_device *dev,
9072 struct ethtool_eeprom *eeprom, u8 *eebuf)
9073{
9074 struct bnx2x *bp = netdev_priv(dev);
9075 int rc;
9076
9f4c9583
EG
9077 if (!netif_running(dev))
9078 return -EAGAIN;
9079
34f80b04 9080 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9081 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9082 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9083 eeprom->len, eeprom->len);
9084
9085 /* parameters already validated in ethtool_set_eeprom */
9086
c18487ee 9087 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
9088 if (eeprom->magic == 0x00504859)
9089 if (bp->port.pmf) {
9090
4a37fb66 9091 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9092 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9093 bp->link_params.ext_phy_config,
9094 (bp->state != BNX2X_STATE_CLOSED),
9095 eebuf, eeprom->len);
bb2a0f7a
YG
9096 if ((bp->state == BNX2X_STATE_OPEN) ||
9097 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9098 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9099 &bp->link_vars, 1);
34f80b04
EG
9100 rc |= bnx2x_phy_init(&bp->link_params,
9101 &bp->link_vars);
bb2a0f7a 9102 }
4a37fb66 9103 bnx2x_release_phy_lock(bp);
34f80b04
EG
9104
9105 } else /* Only the PMF can access the PHY */
9106 return -EINVAL;
9107 else
c18487ee 9108 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9109
9110 return rc;
9111}
9112
9113static int bnx2x_get_coalesce(struct net_device *dev,
9114 struct ethtool_coalesce *coal)
9115{
9116 struct bnx2x *bp = netdev_priv(dev);
9117
9118 memset(coal, 0, sizeof(struct ethtool_coalesce));
9119
9120 coal->rx_coalesce_usecs = bp->rx_ticks;
9121 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9122
9123 return 0;
9124}
9125
9126static int bnx2x_set_coalesce(struct net_device *dev,
9127 struct ethtool_coalesce *coal)
9128{
9129 struct bnx2x *bp = netdev_priv(dev);
9130
9131 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
1e9d9987
EG
9132 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9133 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea
ET
9134
9135 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
1e9d9987
EG
9136 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9137 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 9138
34f80b04 9139 if (netif_running(dev))
a2fbb9ea
ET
9140 bnx2x_update_coalesce(bp);
9141
9142 return 0;
9143}
9144
9145static void bnx2x_get_ringparam(struct net_device *dev,
9146 struct ethtool_ringparam *ering)
9147{
9148 struct bnx2x *bp = netdev_priv(dev);
9149
9150 ering->rx_max_pending = MAX_RX_AVAIL;
9151 ering->rx_mini_max_pending = 0;
9152 ering->rx_jumbo_max_pending = 0;
9153
9154 ering->rx_pending = bp->rx_ring_size;
9155 ering->rx_mini_pending = 0;
9156 ering->rx_jumbo_pending = 0;
9157
9158 ering->tx_max_pending = MAX_TX_AVAIL;
9159 ering->tx_pending = bp->tx_ring_size;
9160}
9161
9162static int bnx2x_set_ringparam(struct net_device *dev,
9163 struct ethtool_ringparam *ering)
9164{
9165 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9166 int rc = 0;
a2fbb9ea
ET
9167
9168 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9169 (ering->tx_pending > MAX_TX_AVAIL) ||
9170 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9171 return -EINVAL;
9172
9173 bp->rx_ring_size = ering->rx_pending;
9174 bp->tx_ring_size = ering->tx_pending;
9175
34f80b04
EG
9176 if (netif_running(dev)) {
9177 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9178 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9179 }
9180
34f80b04 9181 return rc;
a2fbb9ea
ET
9182}
9183
9184static void bnx2x_get_pauseparam(struct net_device *dev,
9185 struct ethtool_pauseparam *epause)
9186{
9187 struct bnx2x *bp = netdev_priv(dev);
9188
356e2385
EG
9189 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9190 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9191 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9192
c0700f90
DM
9193 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9194 BNX2X_FLOW_CTRL_RX);
9195 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9196 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9197
9198 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9199 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9200 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9201}
9202
9203static int bnx2x_set_pauseparam(struct net_device *dev,
9204 struct ethtool_pauseparam *epause)
9205{
9206 struct bnx2x *bp = netdev_priv(dev);
9207
34f80b04
EG
9208 if (IS_E1HMF(bp))
9209 return 0;
9210
a2fbb9ea
ET
9211 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9212 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9213 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9214
c0700f90 9215 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9216
f1410647 9217 if (epause->rx_pause)
c0700f90 9218 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9219
f1410647 9220 if (epause->tx_pause)
c0700f90 9221 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9222
c0700f90
DM
9223 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9224 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9225
c18487ee 9226 if (epause->autoneg) {
34f80b04 9227 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9228 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9229 return -EINVAL;
9230 }
a2fbb9ea 9231
c18487ee 9232 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9233 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9234 }
a2fbb9ea 9235
c18487ee
YR
9236 DP(NETIF_MSG_LINK,
9237 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9238
9239 if (netif_running(dev)) {
bb2a0f7a 9240 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9241 bnx2x_link_set(bp);
9242 }
a2fbb9ea
ET
9243
9244 return 0;
9245}
9246
df0f2343
VZ
9247static int bnx2x_set_flags(struct net_device *dev, u32 data)
9248{
9249 struct bnx2x *bp = netdev_priv(dev);
9250 int changed = 0;
9251 int rc = 0;
9252
9253 /* TPA requires Rx CSUM offloading */
9254 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9255 if (!(dev->features & NETIF_F_LRO)) {
9256 dev->features |= NETIF_F_LRO;
9257 bp->flags |= TPA_ENABLE_FLAG;
9258 changed = 1;
9259 }
9260
9261 } else if (dev->features & NETIF_F_LRO) {
9262 dev->features &= ~NETIF_F_LRO;
9263 bp->flags &= ~TPA_ENABLE_FLAG;
9264 changed = 1;
9265 }
9266
9267 if (changed && netif_running(dev)) {
9268 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9269 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9270 }
9271
9272 return rc;
9273}
9274
a2fbb9ea
ET
9275static u32 bnx2x_get_rx_csum(struct net_device *dev)
9276{
9277 struct bnx2x *bp = netdev_priv(dev);
9278
9279 return bp->rx_csum;
9280}
9281
9282static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9283{
9284 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9285 int rc = 0;
a2fbb9ea
ET
9286
9287 bp->rx_csum = data;
df0f2343
VZ
9288
9289 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9290 TPA'ed packets will be discarded due to wrong TCP CSUM */
9291 if (!data) {
9292 u32 flags = ethtool_op_get_flags(dev);
9293
9294 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9295 }
9296
9297 return rc;
a2fbb9ea
ET
9298}
9299
9300static int bnx2x_set_tso(struct net_device *dev, u32 data)
9301{
755735eb 9302 if (data) {
a2fbb9ea 9303 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9304 dev->features |= NETIF_F_TSO6;
9305 } else {
a2fbb9ea 9306 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9307 dev->features &= ~NETIF_F_TSO6;
9308 }
9309
a2fbb9ea
ET
9310 return 0;
9311}
9312
f3c87cdd 9313static const struct {
a2fbb9ea
ET
9314 char string[ETH_GSTRING_LEN];
9315} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9316 { "register_test (offline)" },
9317 { "memory_test (offline)" },
9318 { "loopback_test (offline)" },
9319 { "nvram_test (online)" },
9320 { "interrupt_test (online)" },
9321 { "link_test (online)" },
d3d4f495 9322 { "idle check (online)" }
a2fbb9ea
ET
9323};
9324
9325static int bnx2x_self_test_count(struct net_device *dev)
9326{
9327 return BNX2X_NUM_TESTS;
9328}
9329
f3c87cdd
YG
9330static int bnx2x_test_registers(struct bnx2x *bp)
9331{
9332 int idx, i, rc = -ENODEV;
9333 u32 wr_val = 0;
9dabc424 9334 int port = BP_PORT(bp);
f3c87cdd
YG
9335 static const struct {
9336 u32 offset0;
9337 u32 offset1;
9338 u32 mask;
9339 } reg_tbl[] = {
9340/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9341 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9342 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9343 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9344 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9345 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9346 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9347 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9348 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9349 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9350/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9351 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9352 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9353 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9354 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9355 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9356 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9357 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9358 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9359 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9360/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9361 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9362 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9363 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9364 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9365 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9366 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9367 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9368 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9369 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9370/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9371 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9372 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9373 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9374 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9375 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9376 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9377 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9378
9379 { 0xffffffff, 0, 0x00000000 }
9380 };
9381
9382 if (!netif_running(bp->dev))
9383 return rc;
9384
9385 /* Repeat the test twice:
9386 First by writing 0x00000000, second by writing 0xffffffff */
9387 for (idx = 0; idx < 2; idx++) {
9388
9389 switch (idx) {
9390 case 0:
9391 wr_val = 0;
9392 break;
9393 case 1:
9394 wr_val = 0xffffffff;
9395 break;
9396 }
9397
9398 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9399 u32 offset, mask, save_val, val;
f3c87cdd
YG
9400
9401 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9402 mask = reg_tbl[i].mask;
9403
9404 save_val = REG_RD(bp, offset);
9405
9406 REG_WR(bp, offset, wr_val);
9407 val = REG_RD(bp, offset);
9408
9409 /* Restore the original register's value */
9410 REG_WR(bp, offset, save_val);
9411
9412 /* verify that value is as expected value */
9413 if ((val & mask) != (wr_val & mask))
9414 goto test_reg_exit;
9415 }
9416 }
9417
9418 rc = 0;
9419
9420test_reg_exit:
9421 return rc;
9422}
9423
9424static int bnx2x_test_memory(struct bnx2x *bp)
9425{
9426 int i, j, rc = -ENODEV;
9427 u32 val;
9428 static const struct {
9429 u32 offset;
9430 int size;
9431 } mem_tbl[] = {
9432 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9433 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9434 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9435 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9436 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9437 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9438 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9439
9440 { 0xffffffff, 0 }
9441 };
9442 static const struct {
9443 char *name;
9444 u32 offset;
9dabc424
YG
9445 u32 e1_mask;
9446 u32 e1h_mask;
f3c87cdd 9447 } prty_tbl[] = {
9dabc424
YG
9448 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9449 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9450 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9451 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9452 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9453 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9454
9455 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9456 };
9457
9458 if (!netif_running(bp->dev))
9459 return rc;
9460
9461 /* Go through all the memories */
9462 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9463 for (j = 0; j < mem_tbl[i].size; j++)
9464 REG_RD(bp, mem_tbl[i].offset + j*4);
9465
9466 /* Check the parity status */
9467 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9468 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9469 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9470 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9471 DP(NETIF_MSG_HW,
9472 "%s is 0x%x\n", prty_tbl[i].name, val);
9473 goto test_mem_exit;
9474 }
9475 }
9476
9477 rc = 0;
9478
9479test_mem_exit:
9480 return rc;
9481}
9482
f3c87cdd
YG
9483static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9484{
9485 int cnt = 1000;
9486
9487 if (link_up)
9488 while (bnx2x_link_test(bp) && cnt--)
9489 msleep(10);
9490}
9491
9492static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9493{
9494 unsigned int pkt_size, num_pkts, i;
9495 struct sk_buff *skb;
9496 unsigned char *packet;
9497 struct bnx2x_fastpath *fp = &bp->fp[0];
9498 u16 tx_start_idx, tx_idx;
9499 u16 rx_start_idx, rx_idx;
9500 u16 pkt_prod;
9501 struct sw_tx_bd *tx_buf;
9502 struct eth_tx_bd *tx_bd;
9503 dma_addr_t mapping;
9504 union eth_rx_cqe *cqe;
9505 u8 cqe_fp_flags;
9506 struct sw_rx_bd *rx_buf;
9507 u16 len;
9508 int rc = -ENODEV;
9509
b5bf9068
EG
9510 /* check the loopback mode */
9511 switch (loopback_mode) {
9512 case BNX2X_PHY_LOOPBACK:
9513 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9514 return -EINVAL;
9515 break;
9516 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9517 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9518 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9519 break;
9520 default:
f3c87cdd 9521 return -EINVAL;
b5bf9068 9522 }
f3c87cdd 9523
b5bf9068
EG
9524 /* prepare the loopback packet */
9525 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9526 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9527 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9528 if (!skb) {
9529 rc = -ENOMEM;
9530 goto test_loopback_exit;
9531 }
9532 packet = skb_put(skb, pkt_size);
9533 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9534 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9535 for (i = ETH_HLEN; i < pkt_size; i++)
9536 packet[i] = (unsigned char) (i & 0xff);
9537
b5bf9068 9538 /* send the loopback packet */
f3c87cdd
YG
9539 num_pkts = 0;
9540 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9541 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9542
9543 pkt_prod = fp->tx_pkt_prod++;
9544 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9545 tx_buf->first_bd = fp->tx_bd_prod;
9546 tx_buf->skb = skb;
9547
9548 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9549 mapping = pci_map_single(bp->pdev, skb->data,
9550 skb_headlen(skb), PCI_DMA_TODEVICE);
9551 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9552 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9553 tx_bd->nbd = cpu_to_le16(1);
9554 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9555 tx_bd->vlan = cpu_to_le16(pkt_prod);
9556 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9557 ETH_TX_BD_FLAGS_END_BD);
9558 tx_bd->general_data = ((UNICAST_ADDRESS <<
9559 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9560
58f4c4cf
EG
9561 wmb();
9562
4781bfad 9563 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9564 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9565 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9566 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9567
9568 mmiowb();
9569
9570 num_pkts++;
9571 fp->tx_bd_prod++;
9572 bp->dev->trans_start = jiffies;
9573
9574 udelay(100);
9575
9576 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9577 if (tx_idx != tx_start_idx + num_pkts)
9578 goto test_loopback_exit;
9579
9580 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9581 if (rx_idx != rx_start_idx + num_pkts)
9582 goto test_loopback_exit;
9583
9584 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9585 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9586 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9587 goto test_loopback_rx_exit;
9588
9589 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9590 if (len != pkt_size)
9591 goto test_loopback_rx_exit;
9592
9593 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9594 skb = rx_buf->skb;
9595 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9596 for (i = ETH_HLEN; i < pkt_size; i++)
9597 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9598 goto test_loopback_rx_exit;
9599
9600 rc = 0;
9601
9602test_loopback_rx_exit:
f3c87cdd
YG
9603
9604 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9605 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9606 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9607 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9608
9609 /* Update producers */
9610 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9611 fp->rx_sge_prod);
f3c87cdd
YG
9612
9613test_loopback_exit:
9614 bp->link_params.loopback_mode = LOOPBACK_NONE;
9615
9616 return rc;
9617}
9618
9619static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9620{
b5bf9068 9621 int rc = 0, res;
f3c87cdd
YG
9622
9623 if (!netif_running(bp->dev))
9624 return BNX2X_LOOPBACK_FAILED;
9625
f8ef6e44 9626 bnx2x_netif_stop(bp, 1);
3910c8ae 9627 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9628
b5bf9068
EG
9629 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9630 if (res) {
9631 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9632 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9633 }
9634
b5bf9068
EG
9635 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9636 if (res) {
9637 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9638 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9639 }
9640
3910c8ae 9641 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9642 bnx2x_netif_start(bp);
9643
9644 return rc;
9645}
9646
9647#define CRC32_RESIDUAL 0xdebb20e3
9648
9649static int bnx2x_test_nvram(struct bnx2x *bp)
9650{
9651 static const struct {
9652 int offset;
9653 int size;
9654 } nvram_tbl[] = {
9655 { 0, 0x14 }, /* bootstrap */
9656 { 0x14, 0xec }, /* dir */
9657 { 0x100, 0x350 }, /* manuf_info */
9658 { 0x450, 0xf0 }, /* feature_info */
9659 { 0x640, 0x64 }, /* upgrade_key_info */
9660 { 0x6a4, 0x64 },
9661 { 0x708, 0x70 }, /* manuf_key_info */
9662 { 0x778, 0x70 },
9663 { 0, 0 }
9664 };
4781bfad 9665 __be32 buf[0x350 / 4];
f3c87cdd
YG
9666 u8 *data = (u8 *)buf;
9667 int i, rc;
9668 u32 magic, csum;
9669
9670 rc = bnx2x_nvram_read(bp, 0, data, 4);
9671 if (rc) {
f5372251 9672 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9673 goto test_nvram_exit;
9674 }
9675
9676 magic = be32_to_cpu(buf[0]);
9677 if (magic != 0x669955aa) {
9678 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9679 rc = -ENODEV;
9680 goto test_nvram_exit;
9681 }
9682
9683 for (i = 0; nvram_tbl[i].size; i++) {
9684
9685 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9686 nvram_tbl[i].size);
9687 if (rc) {
9688 DP(NETIF_MSG_PROBE,
f5372251 9689 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9690 goto test_nvram_exit;
9691 }
9692
9693 csum = ether_crc_le(nvram_tbl[i].size, data);
9694 if (csum != CRC32_RESIDUAL) {
9695 DP(NETIF_MSG_PROBE,
9696 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9697 rc = -ENODEV;
9698 goto test_nvram_exit;
9699 }
9700 }
9701
9702test_nvram_exit:
9703 return rc;
9704}
9705
9706static int bnx2x_test_intr(struct bnx2x *bp)
9707{
9708 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9709 int i, rc;
9710
9711 if (!netif_running(bp->dev))
9712 return -ENODEV;
9713
8d9c5f34 9714 config->hdr.length = 0;
af246401
EG
9715 if (CHIP_IS_E1(bp))
9716 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9717 else
9718 config->hdr.offset = BP_FUNC(bp);
0626b899 9719 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9720 config->hdr.reserved1 = 0;
9721
9722 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9723 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9724 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9725 if (rc == 0) {
9726 bp->set_mac_pending++;
9727 for (i = 0; i < 10; i++) {
9728 if (!bp->set_mac_pending)
9729 break;
9730 msleep_interruptible(10);
9731 }
9732 if (i == 10)
9733 rc = -ENODEV;
9734 }
9735
9736 return rc;
9737}
9738
a2fbb9ea
ET
9739static void bnx2x_self_test(struct net_device *dev,
9740 struct ethtool_test *etest, u64 *buf)
9741{
9742 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9743
9744 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9745
f3c87cdd 9746 if (!netif_running(dev))
a2fbb9ea 9747 return;
a2fbb9ea 9748
33471629 9749 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9750 if (IS_E1HMF(bp))
9751 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9752
9753 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
9754 int port = BP_PORT(bp);
9755 u32 val;
f3c87cdd
YG
9756 u8 link_up;
9757
279abdf5
EG
9758 /* save current value of input enable for TX port IF */
9759 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9760 /* disable input for TX port IF */
9761 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9762
f3c87cdd
YG
9763 link_up = bp->link_vars.link_up;
9764 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9765 bnx2x_nic_load(bp, LOAD_DIAG);
9766 /* wait until link state is restored */
9767 bnx2x_wait_for_link(bp, link_up);
9768
9769 if (bnx2x_test_registers(bp) != 0) {
9770 buf[0] = 1;
9771 etest->flags |= ETH_TEST_FL_FAILED;
9772 }
9773 if (bnx2x_test_memory(bp) != 0) {
9774 buf[1] = 1;
9775 etest->flags |= ETH_TEST_FL_FAILED;
9776 }
9777 buf[2] = bnx2x_test_loopback(bp, link_up);
9778 if (buf[2] != 0)
9779 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9780
f3c87cdd 9781 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
9782
9783 /* restore input for TX port IF */
9784 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9785
f3c87cdd
YG
9786 bnx2x_nic_load(bp, LOAD_NORMAL);
9787 /* wait until link state is restored */
9788 bnx2x_wait_for_link(bp, link_up);
9789 }
9790 if (bnx2x_test_nvram(bp) != 0) {
9791 buf[3] = 1;
a2fbb9ea
ET
9792 etest->flags |= ETH_TEST_FL_FAILED;
9793 }
f3c87cdd
YG
9794 if (bnx2x_test_intr(bp) != 0) {
9795 buf[4] = 1;
9796 etest->flags |= ETH_TEST_FL_FAILED;
9797 }
9798 if (bp->port.pmf)
9799 if (bnx2x_link_test(bp) != 0) {
9800 buf[5] = 1;
9801 etest->flags |= ETH_TEST_FL_FAILED;
9802 }
f3c87cdd
YG
9803
9804#ifdef BNX2X_EXTRA_DEBUG
9805 bnx2x_panic_dump(bp);
9806#endif
a2fbb9ea
ET
9807}
9808
de832a55
EG
9809static const struct {
9810 long offset;
9811 int size;
9812 u8 string[ETH_GSTRING_LEN];
9813} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9814/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9815 { Q_STATS_OFFSET32(error_bytes_received_hi),
9816 8, "[%d]: rx_error_bytes" },
9817 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9818 8, "[%d]: rx_ucast_packets" },
9819 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9820 8, "[%d]: rx_mcast_packets" },
9821 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9822 8, "[%d]: rx_bcast_packets" },
9823 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9824 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9825 4, "[%d]: rx_phy_ip_err_discards"},
9826 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9827 4, "[%d]: rx_skb_alloc_discard" },
9828 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9829
9830/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9831 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9832 8, "[%d]: tx_packets" }
9833};
9834
bb2a0f7a
YG
9835static const struct {
9836 long offset;
9837 int size;
9838 u32 flags;
66e855f3
YG
9839#define STATS_FLAGS_PORT 1
9840#define STATS_FLAGS_FUNC 2
de832a55 9841#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9842 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9843} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9844/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9845 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9846 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9847 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9848 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9849 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9850 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9851 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9852 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9853 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9854 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9855 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9856 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9857 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9858 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9859 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9860 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9861 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9862/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9863 8, STATS_FLAGS_PORT, "rx_fragments" },
9864 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9865 8, STATS_FLAGS_PORT, "rx_jabbers" },
9866 { STATS_OFFSET32(no_buff_discard_hi),
9867 8, STATS_FLAGS_BOTH, "rx_discards" },
9868 { STATS_OFFSET32(mac_filter_discard),
9869 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9870 { STATS_OFFSET32(xxoverflow_discard),
9871 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9872 { STATS_OFFSET32(brb_drop_hi),
9873 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9874 { STATS_OFFSET32(brb_truncate_hi),
9875 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9876 { STATS_OFFSET32(pause_frames_received_hi),
9877 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9878 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9879 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9880 { STATS_OFFSET32(nig_timer_max),
9881 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9882/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9883 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9884 { STATS_OFFSET32(rx_skb_alloc_failed),
9885 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9886 { STATS_OFFSET32(hw_csum_err),
9887 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9888
9889 { STATS_OFFSET32(total_bytes_transmitted_hi),
9890 8, STATS_FLAGS_BOTH, "tx_bytes" },
9891 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9892 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9893 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9894 8, STATS_FLAGS_BOTH, "tx_packets" },
9895 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9896 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9897 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9898 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9899 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9900 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9901 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9902 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9903/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9904 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9905 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9906 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9907 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9908 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9909 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9910 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9911 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9912 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9913 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9914 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9915 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9916 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9917 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9918 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9919 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9920 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9921 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9922 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9923/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9924 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9925 { STATS_OFFSET32(pause_frames_sent_hi),
9926 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9927};
9928
de832a55
EG
9929#define IS_PORT_STAT(i) \
9930 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9931#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9932#define IS_E1HMF_MODE_STAT(bp) \
9933 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9934
a2fbb9ea
ET
9935static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9936{
bb2a0f7a 9937 struct bnx2x *bp = netdev_priv(dev);
de832a55 9938 int i, j, k;
bb2a0f7a 9939
a2fbb9ea
ET
9940 switch (stringset) {
9941 case ETH_SS_STATS:
de832a55
EG
9942 if (is_multi(bp)) {
9943 k = 0;
9944 for_each_queue(bp, i) {
9945 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9946 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9947 bnx2x_q_stats_arr[j].string, i);
9948 k += BNX2X_NUM_Q_STATS;
9949 }
9950 if (IS_E1HMF_MODE_STAT(bp))
9951 break;
9952 for (j = 0; j < BNX2X_NUM_STATS; j++)
9953 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9954 bnx2x_stats_arr[j].string);
9955 } else {
9956 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9957 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9958 continue;
9959 strcpy(buf + j*ETH_GSTRING_LEN,
9960 bnx2x_stats_arr[i].string);
9961 j++;
9962 }
bb2a0f7a 9963 }
a2fbb9ea
ET
9964 break;
9965
9966 case ETH_SS_TEST:
9967 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9968 break;
9969 }
9970}
9971
9972static int bnx2x_get_stats_count(struct net_device *dev)
9973{
bb2a0f7a 9974 struct bnx2x *bp = netdev_priv(dev);
de832a55 9975 int i, num_stats;
bb2a0f7a 9976
de832a55
EG
9977 if (is_multi(bp)) {
9978 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9979 if (!IS_E1HMF_MODE_STAT(bp))
9980 num_stats += BNX2X_NUM_STATS;
9981 } else {
9982 if (IS_E1HMF_MODE_STAT(bp)) {
9983 num_stats = 0;
9984 for (i = 0; i < BNX2X_NUM_STATS; i++)
9985 if (IS_FUNC_STAT(i))
9986 num_stats++;
9987 } else
9988 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9989 }
de832a55 9990
bb2a0f7a 9991 return num_stats;
a2fbb9ea
ET
9992}
9993
9994static void bnx2x_get_ethtool_stats(struct net_device *dev,
9995 struct ethtool_stats *stats, u64 *buf)
9996{
9997 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9998 u32 *hw_stats, *offset;
9999 int i, j, k;
bb2a0f7a 10000
de832a55
EG
10001 if (is_multi(bp)) {
10002 k = 0;
10003 for_each_queue(bp, i) {
10004 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10005 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10006 if (bnx2x_q_stats_arr[j].size == 0) {
10007 /* skip this counter */
10008 buf[k + j] = 0;
10009 continue;
10010 }
10011 offset = (hw_stats +
10012 bnx2x_q_stats_arr[j].offset);
10013 if (bnx2x_q_stats_arr[j].size == 4) {
10014 /* 4-byte counter */
10015 buf[k + j] = (u64) *offset;
10016 continue;
10017 }
10018 /* 8-byte counter */
10019 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10020 }
10021 k += BNX2X_NUM_Q_STATS;
10022 }
10023 if (IS_E1HMF_MODE_STAT(bp))
10024 return;
10025 hw_stats = (u32 *)&bp->eth_stats;
10026 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10027 if (bnx2x_stats_arr[j].size == 0) {
10028 /* skip this counter */
10029 buf[k + j] = 0;
10030 continue;
10031 }
10032 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10033 if (bnx2x_stats_arr[j].size == 4) {
10034 /* 4-byte counter */
10035 buf[k + j] = (u64) *offset;
10036 continue;
10037 }
10038 /* 8-byte counter */
10039 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10040 }
de832a55
EG
10041 } else {
10042 hw_stats = (u32 *)&bp->eth_stats;
10043 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10044 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10045 continue;
10046 if (bnx2x_stats_arr[i].size == 0) {
10047 /* skip this counter */
10048 buf[j] = 0;
10049 j++;
10050 continue;
10051 }
10052 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10053 if (bnx2x_stats_arr[i].size == 4) {
10054 /* 4-byte counter */
10055 buf[j] = (u64) *offset;
10056 j++;
10057 continue;
10058 }
10059 /* 8-byte counter */
10060 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10061 j++;
a2fbb9ea 10062 }
a2fbb9ea
ET
10063 }
10064}
10065
10066static int bnx2x_phys_id(struct net_device *dev, u32 data)
10067{
10068 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10069 int port = BP_PORT(bp);
a2fbb9ea
ET
10070 int i;
10071
34f80b04
EG
10072 if (!netif_running(dev))
10073 return 0;
10074
10075 if (!bp->port.pmf)
10076 return 0;
10077
a2fbb9ea
ET
10078 if (data == 0)
10079 data = 2;
10080
10081 for (i = 0; i < (data * 2); i++) {
c18487ee 10082 if ((i % 2) == 0)
34f80b04 10083 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10084 bp->link_params.hw_led_mode,
10085 bp->link_params.chip_id);
10086 else
34f80b04 10087 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10088 bp->link_params.hw_led_mode,
10089 bp->link_params.chip_id);
10090
a2fbb9ea
ET
10091 msleep_interruptible(500);
10092 if (signal_pending(current))
10093 break;
10094 }
10095
c18487ee 10096 if (bp->link_vars.link_up)
34f80b04 10097 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10098 bp->link_vars.line_speed,
10099 bp->link_params.hw_led_mode,
10100 bp->link_params.chip_id);
a2fbb9ea
ET
10101
10102 return 0;
10103}
10104
10105static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10106 .get_settings = bnx2x_get_settings,
10107 .set_settings = bnx2x_set_settings,
10108 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10109 .get_regs_len = bnx2x_get_regs_len,
10110 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10111 .get_wol = bnx2x_get_wol,
10112 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10113 .get_msglevel = bnx2x_get_msglevel,
10114 .set_msglevel = bnx2x_set_msglevel,
10115 .nway_reset = bnx2x_nway_reset,
01e53298 10116 .get_link = bnx2x_get_link,
7a9b2557
VZ
10117 .get_eeprom_len = bnx2x_get_eeprom_len,
10118 .get_eeprom = bnx2x_get_eeprom,
10119 .set_eeprom = bnx2x_set_eeprom,
10120 .get_coalesce = bnx2x_get_coalesce,
10121 .set_coalesce = bnx2x_set_coalesce,
10122 .get_ringparam = bnx2x_get_ringparam,
10123 .set_ringparam = bnx2x_set_ringparam,
10124 .get_pauseparam = bnx2x_get_pauseparam,
10125 .set_pauseparam = bnx2x_set_pauseparam,
10126 .get_rx_csum = bnx2x_get_rx_csum,
10127 .set_rx_csum = bnx2x_set_rx_csum,
10128 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10129 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10130 .set_flags = bnx2x_set_flags,
10131 .get_flags = ethtool_op_get_flags,
10132 .get_sg = ethtool_op_get_sg,
10133 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10134 .get_tso = ethtool_op_get_tso,
10135 .set_tso = bnx2x_set_tso,
10136 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10137 .self_test = bnx2x_self_test,
10138 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10139 .phys_id = bnx2x_phys_id,
10140 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10141 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10142};
10143
10144/* end of ethtool_ops */
10145
10146/****************************************************************************
10147* General service functions
10148****************************************************************************/
10149
10150static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10151{
10152 u16 pmcsr;
10153
10154 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10155
10156 switch (state) {
10157 case PCI_D0:
34f80b04 10158 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10159 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10160 PCI_PM_CTRL_PME_STATUS));
10161
10162 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10163 /* delay required during transition out of D3hot */
a2fbb9ea 10164 msleep(20);
34f80b04 10165 break;
a2fbb9ea 10166
34f80b04
EG
10167 case PCI_D3hot:
10168 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10169 pmcsr |= 3;
a2fbb9ea 10170
34f80b04
EG
10171 if (bp->wol)
10172 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10173
34f80b04
EG
10174 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10175 pmcsr);
a2fbb9ea 10176
34f80b04
EG
10177 /* No more memory access after this point until
10178 * device is brought back to D0.
10179 */
10180 break;
10181
10182 default:
10183 return -EINVAL;
10184 }
10185 return 0;
a2fbb9ea
ET
10186}
10187
237907c1
EG
10188static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10189{
10190 u16 rx_cons_sb;
10191
10192 /* Tell compiler that status block fields can change */
10193 barrier();
10194 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10195 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10196 rx_cons_sb++;
10197 return (fp->rx_comp_cons != rx_cons_sb);
10198}
10199
34f80b04
EG
10200/*
10201 * net_device service functions
10202 */
10203
a2fbb9ea
ET
10204static int bnx2x_poll(struct napi_struct *napi, int budget)
10205{
10206 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10207 napi);
10208 struct bnx2x *bp = fp->bp;
10209 int work_done = 0;
10210
10211#ifdef BNX2X_STOP_ON_ERROR
10212 if (unlikely(bp->panic))
34f80b04 10213 goto poll_panic;
a2fbb9ea
ET
10214#endif
10215
10216 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10217 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10218 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10219
10220 bnx2x_update_fpsb_idx(fp);
10221
237907c1 10222 if (bnx2x_has_tx_work(fp))
7961f791 10223 bnx2x_tx_int(fp);
a2fbb9ea 10224
8534f32c 10225 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10226 work_done = bnx2x_rx_int(fp, budget);
356e2385 10227
8534f32c
EG
10228 /* must not complete if we consumed full budget */
10229 if (work_done >= budget)
10230 goto poll_again;
10231 }
a2fbb9ea 10232
8534f32c
EG
10233 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10234 * ensure that status block indices have been actually read
10235 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10236 * so that we won't write the "newer" value of the status block to IGU
10237 * (if there was a DMA right after BNX2X_HAS_WORK and
10238 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10239 * may be postponed to right before bnx2x_ack_sb). In this case
10240 * there will never be another interrupt until there is another update
10241 * of the status block, while there is still unhandled work.
10242 */
10243 rmb();
a2fbb9ea 10244
8534f32c 10245 if (!BNX2X_HAS_WORK(fp)) {
a2fbb9ea 10246#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10247poll_panic:
a2fbb9ea 10248#endif
288379f0 10249 napi_complete(napi);
a2fbb9ea 10250
0626b899 10251 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10252 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10253 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10254 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10255 }
356e2385 10256
8534f32c 10257poll_again:
a2fbb9ea
ET
10258 return work_done;
10259}
10260
755735eb
EG
10261
10262/* we split the first BD into headers and data BDs
33471629 10263 * to ease the pain of our fellow microcode engineers
755735eb
EG
10264 * we use one mapping for both BDs
10265 * So far this has only been observed to happen
10266 * in Other Operating Systems(TM)
10267 */
10268static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10269 struct bnx2x_fastpath *fp,
10270 struct eth_tx_bd **tx_bd, u16 hlen,
10271 u16 bd_prod, int nbd)
10272{
10273 struct eth_tx_bd *h_tx_bd = *tx_bd;
10274 struct eth_tx_bd *d_tx_bd;
10275 dma_addr_t mapping;
10276 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10277
10278 /* first fix first BD */
10279 h_tx_bd->nbd = cpu_to_le16(nbd);
10280 h_tx_bd->nbytes = cpu_to_le16(hlen);
10281
10282 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10283 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10284 h_tx_bd->addr_lo, h_tx_bd->nbd);
10285
10286 /* now get a new data BD
10287 * (after the pbd) and fill it */
10288 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10289 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10290
10291 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10292 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10293
10294 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10295 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10296 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10297 d_tx_bd->vlan = 0;
10298 /* this marks the BD as one that has no individual mapping
10299 * the FW ignores this flag in a BD not marked start
10300 */
10301 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10302 DP(NETIF_MSG_TX_QUEUED,
10303 "TSO split data size is %d (%x:%x)\n",
10304 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10305
10306 /* update tx_bd for marking the last BD flag */
10307 *tx_bd = d_tx_bd;
10308
10309 return bd_prod;
10310}
10311
10312static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10313{
10314 if (fix > 0)
10315 csum = (u16) ~csum_fold(csum_sub(csum,
10316 csum_partial(t_header - fix, fix, 0)));
10317
10318 else if (fix < 0)
10319 csum = (u16) ~csum_fold(csum_add(csum,
10320 csum_partial(t_header, -fix, 0)));
10321
10322 return swab16(csum);
10323}
10324
10325static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10326{
10327 u32 rc;
10328
10329 if (skb->ip_summed != CHECKSUM_PARTIAL)
10330 rc = XMIT_PLAIN;
10331
10332 else {
4781bfad 10333 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10334 rc = XMIT_CSUM_V6;
10335 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10336 rc |= XMIT_CSUM_TCP;
10337
10338 } else {
10339 rc = XMIT_CSUM_V4;
10340 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10341 rc |= XMIT_CSUM_TCP;
10342 }
10343 }
10344
10345 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10346 rc |= XMIT_GSO_V4;
10347
10348 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10349 rc |= XMIT_GSO_V6;
10350
10351 return rc;
10352}
10353
632da4d6 10354#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10355/* check if packet requires linearization (packet is too fragmented)
10356 no need to check fragmentation if page size > 8K (there will be no
10357 violation to FW restrictions) */
755735eb
EG
10358static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10359 u32 xmit_type)
10360{
10361 int to_copy = 0;
10362 int hlen = 0;
10363 int first_bd_sz = 0;
10364
10365 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10366 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10367
10368 if (xmit_type & XMIT_GSO) {
10369 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10370 /* Check if LSO packet needs to be copied:
10371 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10372 int wnd_size = MAX_FETCH_BD - 3;
33471629 10373 /* Number of windows to check */
755735eb
EG
10374 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10375 int wnd_idx = 0;
10376 int frag_idx = 0;
10377 u32 wnd_sum = 0;
10378
10379 /* Headers length */
10380 hlen = (int)(skb_transport_header(skb) - skb->data) +
10381 tcp_hdrlen(skb);
10382
10383 /* Amount of data (w/o headers) on linear part of SKB*/
10384 first_bd_sz = skb_headlen(skb) - hlen;
10385
10386 wnd_sum = first_bd_sz;
10387
10388 /* Calculate the first sum - it's special */
10389 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10390 wnd_sum +=
10391 skb_shinfo(skb)->frags[frag_idx].size;
10392
10393 /* If there was data on linear skb data - check it */
10394 if (first_bd_sz > 0) {
10395 if (unlikely(wnd_sum < lso_mss)) {
10396 to_copy = 1;
10397 goto exit_lbl;
10398 }
10399
10400 wnd_sum -= first_bd_sz;
10401 }
10402
10403 /* Others are easier: run through the frag list and
10404 check all windows */
10405 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10406 wnd_sum +=
10407 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10408
10409 if (unlikely(wnd_sum < lso_mss)) {
10410 to_copy = 1;
10411 break;
10412 }
10413 wnd_sum -=
10414 skb_shinfo(skb)->frags[wnd_idx].size;
10415 }
755735eb
EG
10416 } else {
10417 /* in non-LSO too fragmented packet should always
10418 be linearized */
10419 to_copy = 1;
10420 }
10421 }
10422
10423exit_lbl:
10424 if (unlikely(to_copy))
10425 DP(NETIF_MSG_TX_QUEUED,
10426 "Linearization IS REQUIRED for %s packet. "
10427 "num_frags %d hlen %d first_bd_sz %d\n",
10428 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10429 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10430
10431 return to_copy;
10432}
632da4d6 10433#endif
755735eb
EG
10434
10435/* called with netif_tx_lock
a2fbb9ea 10436 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10437 * netif_wake_queue()
a2fbb9ea
ET
10438 */
10439static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10440{
10441 struct bnx2x *bp = netdev_priv(dev);
10442 struct bnx2x_fastpath *fp;
555f6c78 10443 struct netdev_queue *txq;
a2fbb9ea
ET
10444 struct sw_tx_bd *tx_buf;
10445 struct eth_tx_bd *tx_bd;
10446 struct eth_tx_parse_bd *pbd = NULL;
10447 u16 pkt_prod, bd_prod;
755735eb 10448 int nbd, fp_index;
a2fbb9ea 10449 dma_addr_t mapping;
755735eb
EG
10450 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10451 int vlan_off = (bp->e1hov ? 4 : 0);
10452 int i;
10453 u8 hlen = 0;
a2fbb9ea
ET
10454
10455#ifdef BNX2X_STOP_ON_ERROR
10456 if (unlikely(bp->panic))
10457 return NETDEV_TX_BUSY;
10458#endif
10459
555f6c78
EG
10460 fp_index = skb_get_queue_mapping(skb);
10461 txq = netdev_get_tx_queue(dev, fp_index);
10462
a2fbb9ea 10463 fp = &bp->fp[fp_index];
755735eb 10464
231fd58a 10465 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10466 fp->eth_q_stats.driver_xoff++,
555f6c78 10467 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10468 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10469 return NETDEV_TX_BUSY;
10470 }
10471
755735eb
EG
10472 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10473 " gso type %x xmit_type %x\n",
10474 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10475 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10476
632da4d6 10477#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10478 /* First, check if we need to linearize the skb (due to FW
10479 restrictions). No need to check fragmentation if page size > 8K
10480 (there will be no violation to FW restrictions) */
755735eb
EG
10481 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10482 /* Statistics of linearization */
10483 bp->lin_cnt++;
10484 if (skb_linearize(skb) != 0) {
10485 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10486 "silently dropping this SKB\n");
10487 dev_kfree_skb_any(skb);
da5a662a 10488 return NETDEV_TX_OK;
755735eb
EG
10489 }
10490 }
632da4d6 10491#endif
755735eb 10492
a2fbb9ea 10493 /*
755735eb 10494 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10495 then for TSO or xsum we have a parsing info BD,
755735eb 10496 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10497 (don't forget to mark the last one as last,
10498 and to unmap only AFTER you write to the BD ...)
755735eb 10499 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10500 */
10501
10502 pkt_prod = fp->tx_pkt_prod++;
755735eb 10503 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10504
755735eb 10505 /* get a tx_buf and first BD */
a2fbb9ea
ET
10506 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10507 tx_bd = &fp->tx_desc_ring[bd_prod];
10508
10509 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10510 tx_bd->general_data = (UNICAST_ADDRESS <<
10511 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10512 /* header nbd */
10513 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10514
755735eb
EG
10515 /* remember the first BD of the packet */
10516 tx_buf->first_bd = fp->tx_bd_prod;
10517 tx_buf->skb = skb;
a2fbb9ea
ET
10518
10519 DP(NETIF_MSG_TX_QUEUED,
10520 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10521 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10522
0c6671b0
EG
10523#ifdef BCM_VLAN
10524 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10525 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10526 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10527 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10528 vlan_off += 4;
10529 } else
0c6671b0 10530#endif
755735eb 10531 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10532
755735eb 10533 if (xmit_type) {
755735eb 10534 /* turn on parsing and get a BD */
a2fbb9ea
ET
10535 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10536 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10537
10538 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10539 }
10540
10541 if (xmit_type & XMIT_CSUM) {
10542 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10543
10544 /* for now NS flag is not used in Linux */
4781bfad
EG
10545 pbd->global_data =
10546 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10547 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10548
755735eb
EG
10549 pbd->ip_hlen = (skb_transport_header(skb) -
10550 skb_network_header(skb)) / 2;
10551
10552 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10553
755735eb
EG
10554 pbd->total_hlen = cpu_to_le16(hlen);
10555 hlen = hlen*2 - vlan_off;
a2fbb9ea 10556
755735eb
EG
10557 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10558
10559 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10560 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10561 ETH_TX_BD_FLAGS_IP_CSUM;
10562 else
10563 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10564
10565 if (xmit_type & XMIT_CSUM_TCP) {
10566 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10567
10568 } else {
10569 s8 fix = SKB_CS_OFF(skb); /* signed! */
10570
a2fbb9ea 10571 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10572 pbd->cs_offset = fix / 2;
a2fbb9ea 10573
755735eb
EG
10574 DP(NETIF_MSG_TX_QUEUED,
10575 "hlen %d offset %d fix %d csum before fix %x\n",
10576 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10577 SKB_CS(skb));
10578
10579 /* HW bug: fixup the CSUM */
10580 pbd->tcp_pseudo_csum =
10581 bnx2x_csum_fix(skb_transport_header(skb),
10582 SKB_CS(skb), fix);
10583
10584 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10585 pbd->tcp_pseudo_csum);
10586 }
a2fbb9ea
ET
10587 }
10588
10589 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10590 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10591
10592 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10593 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10594 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10595 tx_bd->nbd = cpu_to_le16(nbd);
10596 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10597
10598 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10599 " nbytes %d flags %x vlan %x\n",
10600 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10601 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10602 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10603
755735eb 10604 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10605
10606 DP(NETIF_MSG_TX_QUEUED,
10607 "TSO packet len %d hlen %d total len %d tso size %d\n",
10608 skb->len, hlen, skb_headlen(skb),
10609 skb_shinfo(skb)->gso_size);
10610
10611 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10612
755735eb
EG
10613 if (unlikely(skb_headlen(skb) > hlen))
10614 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10615 bd_prod, ++nbd);
a2fbb9ea
ET
10616
10617 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10618 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10619 pbd->tcp_flags = pbd_tcp_flags(skb);
10620
10621 if (xmit_type & XMIT_GSO_V4) {
10622 pbd->ip_id = swab16(ip_hdr(skb)->id);
10623 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10624 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10625 ip_hdr(skb)->daddr,
10626 0, IPPROTO_TCP, 0));
755735eb
EG
10627
10628 } else
10629 pbd->tcp_pseudo_csum =
10630 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10631 &ipv6_hdr(skb)->daddr,
10632 0, IPPROTO_TCP, 0));
10633
a2fbb9ea
ET
10634 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10635 }
10636
755735eb
EG
10637 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10638 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10639
755735eb
EG
10640 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10641 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10642
755735eb
EG
10643 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10644 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10645
755735eb
EG
10646 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10647 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10648 tx_bd->nbytes = cpu_to_le16(frag->size);
10649 tx_bd->vlan = cpu_to_le16(pkt_prod);
10650 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10651
755735eb
EG
10652 DP(NETIF_MSG_TX_QUEUED,
10653 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10654 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10655 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10656 }
10657
755735eb 10658 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10659 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10660
10661 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10662 tx_bd, tx_bd->bd_flags.as_bitfield);
10663
a2fbb9ea
ET
10664 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10665
755735eb 10666 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10667 * if the packet contains or ends with it
10668 */
10669 if (TX_BD_POFF(bd_prod) < nbd)
10670 nbd++;
10671
10672 if (pbd)
10673 DP(NETIF_MSG_TX_QUEUED,
10674 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10675 " tcp_flags %x xsum %x seq %u hlen %u\n",
10676 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10677 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10678 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10679
755735eb 10680 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10681
58f4c4cf
EG
10682 /*
10683 * Make sure that the BD data is updated before updating the producer
10684 * since FW might read the BD right after the producer is updated.
10685 * This is only applicable for weak-ordered memory model archs such
10686 * as IA-64. The following barrier is also mandatory since FW will
10687 * assumes packets must have BDs.
10688 */
10689 wmb();
10690
4781bfad 10691 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10692 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10693 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10694 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10695
10696 mmiowb();
10697
755735eb 10698 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10699
10700 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10701 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10702 if we put Tx into XOFF state. */
10703 smp_mb();
555f6c78 10704 netif_tx_stop_queue(txq);
de832a55 10705 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10706 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10707 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10708 }
10709 fp->tx_pkt++;
10710
10711 return NETDEV_TX_OK;
10712}
10713
bb2a0f7a 10714/* called with rtnl_lock */
a2fbb9ea
ET
10715static int bnx2x_open(struct net_device *dev)
10716{
10717 struct bnx2x *bp = netdev_priv(dev);
10718
6eccabb3
EG
10719 netif_carrier_off(dev);
10720
a2fbb9ea
ET
10721 bnx2x_set_power_state(bp, PCI_D0);
10722
bb2a0f7a 10723 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10724}
10725
bb2a0f7a 10726/* called with rtnl_lock */
a2fbb9ea
ET
10727static int bnx2x_close(struct net_device *dev)
10728{
a2fbb9ea
ET
10729 struct bnx2x *bp = netdev_priv(dev);
10730
10731 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10732 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10733 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10734 if (!CHIP_REV_IS_SLOW(bp))
10735 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10736
10737 return 0;
10738}
10739
f5372251 10740/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10741static void bnx2x_set_rx_mode(struct net_device *dev)
10742{
10743 struct bnx2x *bp = netdev_priv(dev);
10744 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10745 int port = BP_PORT(bp);
10746
10747 if (bp->state != BNX2X_STATE_OPEN) {
10748 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10749 return;
10750 }
10751
10752 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10753
10754 if (dev->flags & IFF_PROMISC)
10755 rx_mode = BNX2X_RX_MODE_PROMISC;
10756
10757 else if ((dev->flags & IFF_ALLMULTI) ||
10758 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10759 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10760
10761 else { /* some multicasts */
10762 if (CHIP_IS_E1(bp)) {
10763 int i, old, offset;
10764 struct dev_mc_list *mclist;
10765 struct mac_configuration_cmd *config =
10766 bnx2x_sp(bp, mcast_config);
10767
10768 for (i = 0, mclist = dev->mc_list;
10769 mclist && (i < dev->mc_count);
10770 i++, mclist = mclist->next) {
10771
10772 config->config_table[i].
10773 cam_entry.msb_mac_addr =
10774 swab16(*(u16 *)&mclist->dmi_addr[0]);
10775 config->config_table[i].
10776 cam_entry.middle_mac_addr =
10777 swab16(*(u16 *)&mclist->dmi_addr[2]);
10778 config->config_table[i].
10779 cam_entry.lsb_mac_addr =
10780 swab16(*(u16 *)&mclist->dmi_addr[4]);
10781 config->config_table[i].cam_entry.flags =
10782 cpu_to_le16(port);
10783 config->config_table[i].
10784 target_table_entry.flags = 0;
10785 config->config_table[i].
10786 target_table_entry.client_id = 0;
10787 config->config_table[i].
10788 target_table_entry.vlan_id = 0;
10789
10790 DP(NETIF_MSG_IFUP,
10791 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10792 config->config_table[i].
10793 cam_entry.msb_mac_addr,
10794 config->config_table[i].
10795 cam_entry.middle_mac_addr,
10796 config->config_table[i].
10797 cam_entry.lsb_mac_addr);
10798 }
8d9c5f34 10799 old = config->hdr.length;
34f80b04
EG
10800 if (old > i) {
10801 for (; i < old; i++) {
10802 if (CAM_IS_INVALID(config->
10803 config_table[i])) {
af246401 10804 /* already invalidated */
34f80b04
EG
10805 break;
10806 }
10807 /* invalidate */
10808 CAM_INVALIDATE(config->
10809 config_table[i]);
10810 }
10811 }
10812
10813 if (CHIP_REV_IS_SLOW(bp))
10814 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10815 else
10816 offset = BNX2X_MAX_MULTICAST*(1 + port);
10817
8d9c5f34 10818 config->hdr.length = i;
34f80b04 10819 config->hdr.offset = offset;
8d9c5f34 10820 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10821 config->hdr.reserved1 = 0;
10822
10823 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10824 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10825 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10826 0);
10827 } else { /* E1H */
10828 /* Accept one or more multicasts */
10829 struct dev_mc_list *mclist;
10830 u32 mc_filter[MC_HASH_SIZE];
10831 u32 crc, bit, regidx;
10832 int i;
10833
10834 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10835
10836 for (i = 0, mclist = dev->mc_list;
10837 mclist && (i < dev->mc_count);
10838 i++, mclist = mclist->next) {
10839
7c510e4b
JB
10840 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10841 mclist->dmi_addr);
34f80b04
EG
10842
10843 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10844 bit = (crc >> 24) & 0xff;
10845 regidx = bit >> 5;
10846 bit &= 0x1f;
10847 mc_filter[regidx] |= (1 << bit);
10848 }
10849
10850 for (i = 0; i < MC_HASH_SIZE; i++)
10851 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10852 mc_filter[i]);
10853 }
10854 }
10855
10856 bp->rx_mode = rx_mode;
10857 bnx2x_set_storm_rx_mode(bp);
10858}
10859
10860/* called with rtnl_lock */
a2fbb9ea
ET
10861static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10862{
10863 struct sockaddr *addr = p;
10864 struct bnx2x *bp = netdev_priv(dev);
10865
34f80b04 10866 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10867 return -EINVAL;
10868
10869 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10870 if (netif_running(dev)) {
10871 if (CHIP_IS_E1(bp))
3101c2bc 10872 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10873 else
3101c2bc 10874 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10875 }
a2fbb9ea
ET
10876
10877 return 0;
10878}
10879
c18487ee 10880/* called with rtnl_lock */
a2fbb9ea
ET
10881static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10882{
10883 struct mii_ioctl_data *data = if_mii(ifr);
10884 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10885 int port = BP_PORT(bp);
a2fbb9ea
ET
10886 int err;
10887
10888 switch (cmd) {
10889 case SIOCGMIIPHY:
34f80b04 10890 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10891
c14423fe 10892 /* fallthrough */
c18487ee 10893
a2fbb9ea 10894 case SIOCGMIIREG: {
c18487ee 10895 u16 mii_regval;
a2fbb9ea 10896
c18487ee
YR
10897 if (!netif_running(dev))
10898 return -EAGAIN;
a2fbb9ea 10899
34f80b04 10900 mutex_lock(&bp->port.phy_mutex);
3196a88a 10901 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10902 DEFAULT_PHY_DEV_ADDR,
10903 (data->reg_num & 0x1f), &mii_regval);
10904 data->val_out = mii_regval;
34f80b04 10905 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10906 return err;
10907 }
10908
10909 case SIOCSMIIREG:
10910 if (!capable(CAP_NET_ADMIN))
10911 return -EPERM;
10912
c18487ee
YR
10913 if (!netif_running(dev))
10914 return -EAGAIN;
10915
34f80b04 10916 mutex_lock(&bp->port.phy_mutex);
3196a88a 10917 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10918 DEFAULT_PHY_DEV_ADDR,
10919 (data->reg_num & 0x1f), data->val_in);
34f80b04 10920 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10921 return err;
10922
10923 default:
10924 /* do nothing */
10925 break;
10926 }
10927
10928 return -EOPNOTSUPP;
10929}
10930
34f80b04 10931/* called with rtnl_lock */
a2fbb9ea
ET
10932static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10933{
10934 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10935 int rc = 0;
a2fbb9ea
ET
10936
10937 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10938 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10939 return -EINVAL;
10940
10941 /* This does not race with packet allocation
c14423fe 10942 * because the actual alloc size is
a2fbb9ea
ET
10943 * only updated as part of load
10944 */
10945 dev->mtu = new_mtu;
10946
10947 if (netif_running(dev)) {
34f80b04
EG
10948 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10949 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10950 }
34f80b04
EG
10951
10952 return rc;
a2fbb9ea
ET
10953}
10954
10955static void bnx2x_tx_timeout(struct net_device *dev)
10956{
10957 struct bnx2x *bp = netdev_priv(dev);
10958
10959#ifdef BNX2X_STOP_ON_ERROR
10960 if (!bp->panic)
10961 bnx2x_panic();
10962#endif
10963 /* This allows the netif to be shutdown gracefully before resetting */
10964 schedule_work(&bp->reset_task);
10965}
10966
10967#ifdef BCM_VLAN
34f80b04 10968/* called with rtnl_lock */
a2fbb9ea
ET
10969static void bnx2x_vlan_rx_register(struct net_device *dev,
10970 struct vlan_group *vlgrp)
10971{
10972 struct bnx2x *bp = netdev_priv(dev);
10973
10974 bp->vlgrp = vlgrp;
0c6671b0
EG
10975
10976 /* Set flags according to the required capabilities */
10977 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10978
10979 if (dev->features & NETIF_F_HW_VLAN_TX)
10980 bp->flags |= HW_VLAN_TX_FLAG;
10981
10982 if (dev->features & NETIF_F_HW_VLAN_RX)
10983 bp->flags |= HW_VLAN_RX_FLAG;
10984
a2fbb9ea 10985 if (netif_running(dev))
49d66772 10986 bnx2x_set_client_config(bp);
a2fbb9ea 10987}
34f80b04 10988
a2fbb9ea
ET
10989#endif
10990
10991#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10992static void poll_bnx2x(struct net_device *dev)
10993{
10994 struct bnx2x *bp = netdev_priv(dev);
10995
10996 disable_irq(bp->pdev->irq);
10997 bnx2x_interrupt(bp->pdev->irq, dev);
10998 enable_irq(bp->pdev->irq);
10999}
11000#endif
11001
c64213cd
SH
11002static const struct net_device_ops bnx2x_netdev_ops = {
11003 .ndo_open = bnx2x_open,
11004 .ndo_stop = bnx2x_close,
11005 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11006 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11007 .ndo_set_mac_address = bnx2x_change_mac_addr,
11008 .ndo_validate_addr = eth_validate_addr,
11009 .ndo_do_ioctl = bnx2x_ioctl,
11010 .ndo_change_mtu = bnx2x_change_mtu,
11011 .ndo_tx_timeout = bnx2x_tx_timeout,
11012#ifdef BCM_VLAN
11013 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11014#endif
11015#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11016 .ndo_poll_controller = poll_bnx2x,
11017#endif
11018};
11019
34f80b04
EG
11020static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11021 struct net_device *dev)
a2fbb9ea
ET
11022{
11023 struct bnx2x *bp;
11024 int rc;
11025
11026 SET_NETDEV_DEV(dev, &pdev->dev);
11027 bp = netdev_priv(dev);
11028
34f80b04
EG
11029 bp->dev = dev;
11030 bp->pdev = pdev;
a2fbb9ea 11031 bp->flags = 0;
34f80b04 11032 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11033
11034 rc = pci_enable_device(pdev);
11035 if (rc) {
11036 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11037 goto err_out;
11038 }
11039
11040 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11041 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11042 " aborting\n");
11043 rc = -ENODEV;
11044 goto err_out_disable;
11045 }
11046
11047 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11048 printk(KERN_ERR PFX "Cannot find second PCI device"
11049 " base address, aborting\n");
11050 rc = -ENODEV;
11051 goto err_out_disable;
11052 }
11053
34f80b04
EG
11054 if (atomic_read(&pdev->enable_cnt) == 1) {
11055 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11056 if (rc) {
11057 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11058 " aborting\n");
11059 goto err_out_disable;
11060 }
a2fbb9ea 11061
34f80b04
EG
11062 pci_set_master(pdev);
11063 pci_save_state(pdev);
11064 }
a2fbb9ea
ET
11065
11066 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11067 if (bp->pm_cap == 0) {
11068 printk(KERN_ERR PFX "Cannot find power management"
11069 " capability, aborting\n");
11070 rc = -EIO;
11071 goto err_out_release;
11072 }
11073
11074 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11075 if (bp->pcie_cap == 0) {
11076 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11077 " aborting\n");
11078 rc = -EIO;
11079 goto err_out_release;
11080 }
11081
6a35528a 11082 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11083 bp->flags |= USING_DAC_FLAG;
6a35528a 11084 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11085 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11086 " failed, aborting\n");
11087 rc = -EIO;
11088 goto err_out_release;
11089 }
11090
284901a9 11091 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11092 printk(KERN_ERR PFX "System does not support DMA,"
11093 " aborting\n");
11094 rc = -EIO;
11095 goto err_out_release;
11096 }
11097
34f80b04
EG
11098 dev->mem_start = pci_resource_start(pdev, 0);
11099 dev->base_addr = dev->mem_start;
11100 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11101
11102 dev->irq = pdev->irq;
11103
275f165f 11104 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11105 if (!bp->regview) {
11106 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11107 rc = -ENOMEM;
11108 goto err_out_release;
11109 }
11110
34f80b04
EG
11111 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11112 min_t(u64, BNX2X_DB_SIZE,
11113 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11114 if (!bp->doorbells) {
11115 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11116 rc = -ENOMEM;
11117 goto err_out_unmap;
11118 }
11119
11120 bnx2x_set_power_state(bp, PCI_D0);
11121
34f80b04
EG
11122 /* clean indirect addresses */
11123 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11124 PCICFG_VENDOR_ID_OFFSET);
11125 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11126 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11127 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11128 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11129
34f80b04 11130 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11131
c64213cd 11132 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11133 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11134 dev->features |= NETIF_F_SG;
11135 dev->features |= NETIF_F_HW_CSUM;
11136 if (bp->flags & USING_DAC_FLAG)
11137 dev->features |= NETIF_F_HIGHDMA;
11138#ifdef BCM_VLAN
11139 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11140 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
11141#endif
11142 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 11143 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
11144
11145 return 0;
11146
11147err_out_unmap:
11148 if (bp->regview) {
11149 iounmap(bp->regview);
11150 bp->regview = NULL;
11151 }
a2fbb9ea
ET
11152 if (bp->doorbells) {
11153 iounmap(bp->doorbells);
11154 bp->doorbells = NULL;
11155 }
11156
11157err_out_release:
34f80b04
EG
11158 if (atomic_read(&pdev->enable_cnt) == 1)
11159 pci_release_regions(pdev);
a2fbb9ea
ET
11160
11161err_out_disable:
11162 pci_disable_device(pdev);
11163 pci_set_drvdata(pdev, NULL);
11164
11165err_out:
11166 return rc;
11167}
11168
25047950
ET
11169static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11170{
11171 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11172
11173 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11174 return val;
11175}
11176
11177/* return value of 1=2.5GHz 2=5GHz */
11178static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11179{
11180 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11181
11182 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11183 return val;
11184}
94a78b79
VZ
11185static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11186{
11187 struct bnx2x_fw_file_hdr *fw_hdr;
11188 struct bnx2x_fw_file_section *sections;
11189 u16 *ops_offsets;
11190 u32 offset, len, num_ops;
11191 int i;
11192 const struct firmware *firmware = bp->firmware;
11193 const u8 * fw_ver;
11194
11195 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11196 return -EINVAL;
11197
11198 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11199 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11200
11201 /* Make sure none of the offsets and sizes make us read beyond
11202 * the end of the firmware data */
11203 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11204 offset = be32_to_cpu(sections[i].offset);
11205 len = be32_to_cpu(sections[i].len);
11206 if (offset + len > firmware->size) {
11207 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11208 return -EINVAL;
11209 }
11210 }
11211
11212 /* Likewise for the init_ops offsets */
11213 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11214 ops_offsets = (u16 *)(firmware->data + offset);
11215 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11216
11217 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11218 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11219 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11220 return -EINVAL;
11221 }
11222 }
11223
11224 /* Check FW version */
11225 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11226 fw_ver = firmware->data + offset;
11227 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11228 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11229 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11230 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11231 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11232 " Should be %d.%d.%d.%d\n",
11233 fw_ver[0], fw_ver[1], fw_ver[2],
11234 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11235 BCM_5710_FW_MINOR_VERSION,
11236 BCM_5710_FW_REVISION_VERSION,
11237 BCM_5710_FW_ENGINEERING_VERSION);
11238 return -EINVAL;
11239 }
11240
11241 return 0;
11242}
11243
11244static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11245{
11246 u32 i;
11247 const __be32 *source = (const __be32*)_source;
11248 u32 *target = (u32*)_target;
11249
11250 for (i = 0; i < n/4; i++)
11251 target[i] = be32_to_cpu(source[i]);
11252}
11253
11254/*
11255 Ops array is stored in the following format:
11256 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11257 */
11258static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11259{
11260 u32 i, j, tmp;
11261 const __be32 *source = (const __be32*)_source;
11262 struct raw_op *target = (struct raw_op*)_target;
11263
11264 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11265 tmp = be32_to_cpu(source[j]);
11266 target[i].op = (tmp >> 24) & 0xff;
11267 target[i].offset = tmp & 0xffffff;
11268 target[i].raw_data = be32_to_cpu(source[j+1]);
11269 }
11270}
11271static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11272{
11273 u32 i;
11274 u16 *target = (u16*)_target;
11275 const __be16 *source = (const __be16*)_source;
11276
11277 for (i = 0; i < n/2; i++)
11278 target[i] = be16_to_cpu(source[i]);
11279}
11280
11281#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11282 do { \
11283 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11284 bp->arr = kmalloc(len, GFP_KERNEL); \
11285 if (!bp->arr) { \
11286 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11287 goto lbl; \
11288 } \
11289 func(bp->firmware->data + \
11290 be32_to_cpu(fw_hdr->arr.offset), \
11291 (u8*)bp->arr, len); \
11292 } while (0)
11293
11294
11295static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11296{
11297 char fw_file_name[40] = {0};
11298 int rc, offset;
11299 struct bnx2x_fw_file_hdr *fw_hdr;
11300
11301 /* Create a FW file name */
11302 if (CHIP_IS_E1(bp))
11303 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11304 else
11305 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11306
11307 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11308 BCM_5710_FW_MAJOR_VERSION,
11309 BCM_5710_FW_MINOR_VERSION,
11310 BCM_5710_FW_REVISION_VERSION,
11311 BCM_5710_FW_ENGINEERING_VERSION);
11312
11313 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11314
11315 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11316 if (rc) {
11317 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11318 goto request_firmware_exit;
11319 }
11320
11321 rc = bnx2x_check_firmware(bp);
11322 if (rc) {
11323 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11324 goto request_firmware_exit;
11325 }
11326
11327 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11328
11329 /* Initialize the pointers to the init arrays */
11330 /* Blob */
11331 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11332
11333 /* Opcodes */
11334 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11335
11336 /* Offsets */
11337 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11338
11339 /* STORMs firmware */
11340 bp->tsem_int_table_data = bp->firmware->data +
11341 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11342 bp->tsem_pram_data = bp->firmware->data +
11343 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11344 bp->usem_int_table_data = bp->firmware->data +
11345 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11346 bp->usem_pram_data = bp->firmware->data +
11347 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11348 bp->xsem_int_table_data = bp->firmware->data +
11349 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11350 bp->xsem_pram_data = bp->firmware->data +
11351 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11352 bp->csem_int_table_data = bp->firmware->data +
11353 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11354 bp->csem_pram_data = bp->firmware->data +
11355 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11356
11357 return 0;
11358init_offsets_alloc_err:
11359 kfree(bp->init_ops);
11360init_ops_alloc_err:
11361 kfree(bp->init_data);
11362request_firmware_exit:
11363 release_firmware(bp->firmware);
11364
11365 return rc;
11366}
11367
11368
25047950 11369
a2fbb9ea
ET
11370static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11371 const struct pci_device_id *ent)
11372{
11373 static int version_printed;
11374 struct net_device *dev = NULL;
11375 struct bnx2x *bp;
25047950 11376 int rc;
a2fbb9ea
ET
11377
11378 if (version_printed++ == 0)
11379 printk(KERN_INFO "%s", version);
11380
11381 /* dev zeroed in init_etherdev */
555f6c78 11382 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11383 if (!dev) {
11384 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11385 return -ENOMEM;
34f80b04 11386 }
a2fbb9ea 11387
a2fbb9ea
ET
11388 bp = netdev_priv(dev);
11389 bp->msglevel = debug;
11390
34f80b04 11391 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11392 if (rc < 0) {
11393 free_netdev(dev);
11394 return rc;
11395 }
11396
a2fbb9ea
ET
11397 pci_set_drvdata(pdev, dev);
11398
34f80b04 11399 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11400 if (rc)
11401 goto init_one_exit;
11402
94a78b79
VZ
11403 /* Set init arrays */
11404 rc = bnx2x_init_firmware(bp, &pdev->dev);
11405 if (rc) {
11406 printk(KERN_ERR PFX "Error loading firmware\n");
11407 goto init_one_exit;
11408 }
11409
693fc0d1 11410 rc = register_netdev(dev);
34f80b04 11411 if (rc) {
693fc0d1 11412 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11413 goto init_one_exit;
11414 }
11415
25047950 11416 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11417 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11418 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11419 bnx2x_get_pcie_width(bp),
11420 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11421 dev->base_addr, bp->pdev->irq);
e174961c 11422 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11423
a2fbb9ea 11424 return 0;
34f80b04
EG
11425
11426init_one_exit:
11427 if (bp->regview)
11428 iounmap(bp->regview);
11429
11430 if (bp->doorbells)
11431 iounmap(bp->doorbells);
11432
11433 free_netdev(dev);
11434
11435 if (atomic_read(&pdev->enable_cnt) == 1)
11436 pci_release_regions(pdev);
11437
11438 pci_disable_device(pdev);
11439 pci_set_drvdata(pdev, NULL);
11440
11441 return rc;
a2fbb9ea
ET
11442}
11443
11444static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11445{
11446 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11447 struct bnx2x *bp;
11448
11449 if (!dev) {
228241eb
ET
11450 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11451 return;
11452 }
228241eb 11453 bp = netdev_priv(dev);
a2fbb9ea 11454
a2fbb9ea
ET
11455 unregister_netdev(dev);
11456
94a78b79
VZ
11457 kfree(bp->init_ops_offsets);
11458 kfree(bp->init_ops);
11459 kfree(bp->init_data);
11460 release_firmware(bp->firmware);
11461
a2fbb9ea
ET
11462 if (bp->regview)
11463 iounmap(bp->regview);
11464
11465 if (bp->doorbells)
11466 iounmap(bp->doorbells);
11467
11468 free_netdev(dev);
34f80b04
EG
11469
11470 if (atomic_read(&pdev->enable_cnt) == 1)
11471 pci_release_regions(pdev);
11472
a2fbb9ea
ET
11473 pci_disable_device(pdev);
11474 pci_set_drvdata(pdev, NULL);
11475}
11476
11477static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11478{
11479 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11480 struct bnx2x *bp;
11481
34f80b04
EG
11482 if (!dev) {
11483 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11484 return -ENODEV;
11485 }
11486 bp = netdev_priv(dev);
a2fbb9ea 11487
34f80b04 11488 rtnl_lock();
a2fbb9ea 11489
34f80b04 11490 pci_save_state(pdev);
228241eb 11491
34f80b04
EG
11492 if (!netif_running(dev)) {
11493 rtnl_unlock();
11494 return 0;
11495 }
a2fbb9ea
ET
11496
11497 netif_device_detach(dev);
a2fbb9ea 11498
da5a662a 11499 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11500
a2fbb9ea 11501 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11502
34f80b04
EG
11503 rtnl_unlock();
11504
a2fbb9ea
ET
11505 return 0;
11506}
11507
11508static int bnx2x_resume(struct pci_dev *pdev)
11509{
11510 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11511 struct bnx2x *bp;
a2fbb9ea
ET
11512 int rc;
11513
228241eb
ET
11514 if (!dev) {
11515 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11516 return -ENODEV;
11517 }
228241eb 11518 bp = netdev_priv(dev);
a2fbb9ea 11519
34f80b04
EG
11520 rtnl_lock();
11521
228241eb 11522 pci_restore_state(pdev);
34f80b04
EG
11523
11524 if (!netif_running(dev)) {
11525 rtnl_unlock();
11526 return 0;
11527 }
11528
a2fbb9ea
ET
11529 bnx2x_set_power_state(bp, PCI_D0);
11530 netif_device_attach(dev);
11531
da5a662a 11532 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11533
34f80b04
EG
11534 rtnl_unlock();
11535
11536 return rc;
a2fbb9ea
ET
11537}
11538
f8ef6e44
YG
11539static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11540{
11541 int i;
11542
11543 bp->state = BNX2X_STATE_ERROR;
11544
11545 bp->rx_mode = BNX2X_RX_MODE_NONE;
11546
11547 bnx2x_netif_stop(bp, 0);
11548
11549 del_timer_sync(&bp->timer);
11550 bp->stats_state = STATS_STATE_DISABLED;
11551 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11552
11553 /* Release IRQs */
11554 bnx2x_free_irq(bp);
11555
11556 if (CHIP_IS_E1(bp)) {
11557 struct mac_configuration_cmd *config =
11558 bnx2x_sp(bp, mcast_config);
11559
8d9c5f34 11560 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11561 CAM_INVALIDATE(config->config_table[i]);
11562 }
11563
11564 /* Free SKBs, SGEs, TPA pool and driver internals */
11565 bnx2x_free_skbs(bp);
555f6c78 11566 for_each_rx_queue(bp, i)
f8ef6e44 11567 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11568 for_each_rx_queue(bp, i)
7cde1c8b 11569 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11570 bnx2x_free_mem(bp);
11571
11572 bp->state = BNX2X_STATE_CLOSED;
11573
11574 netif_carrier_off(bp->dev);
11575
11576 return 0;
11577}
11578
11579static void bnx2x_eeh_recover(struct bnx2x *bp)
11580{
11581 u32 val;
11582
11583 mutex_init(&bp->port.phy_mutex);
11584
11585 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11586 bp->link_params.shmem_base = bp->common.shmem_base;
11587 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11588
11589 if (!bp->common.shmem_base ||
11590 (bp->common.shmem_base < 0xA0000) ||
11591 (bp->common.shmem_base >= 0xC0000)) {
11592 BNX2X_DEV_INFO("MCP not active\n");
11593 bp->flags |= NO_MCP_FLAG;
11594 return;
11595 }
11596
11597 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11598 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11599 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11600 BNX2X_ERR("BAD MCP validity signature\n");
11601
11602 if (!BP_NOMCP(bp)) {
11603 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11604 & DRV_MSG_SEQ_NUMBER_MASK);
11605 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11606 }
11607}
11608
493adb1f
WX
11609/**
11610 * bnx2x_io_error_detected - called when PCI error is detected
11611 * @pdev: Pointer to PCI device
11612 * @state: The current pci connection state
11613 *
11614 * This function is called after a PCI bus error affecting
11615 * this device has been detected.
11616 */
11617static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11618 pci_channel_state_t state)
11619{
11620 struct net_device *dev = pci_get_drvdata(pdev);
11621 struct bnx2x *bp = netdev_priv(dev);
11622
11623 rtnl_lock();
11624
11625 netif_device_detach(dev);
11626
11627 if (netif_running(dev))
f8ef6e44 11628 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11629
11630 pci_disable_device(pdev);
11631
11632 rtnl_unlock();
11633
11634 /* Request a slot reset */
11635 return PCI_ERS_RESULT_NEED_RESET;
11636}
11637
11638/**
11639 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11640 * @pdev: Pointer to PCI device
11641 *
11642 * Restart the card from scratch, as if from a cold-boot.
11643 */
11644static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11645{
11646 struct net_device *dev = pci_get_drvdata(pdev);
11647 struct bnx2x *bp = netdev_priv(dev);
11648
11649 rtnl_lock();
11650
11651 if (pci_enable_device(pdev)) {
11652 dev_err(&pdev->dev,
11653 "Cannot re-enable PCI device after reset\n");
11654 rtnl_unlock();
11655 return PCI_ERS_RESULT_DISCONNECT;
11656 }
11657
11658 pci_set_master(pdev);
11659 pci_restore_state(pdev);
11660
11661 if (netif_running(dev))
11662 bnx2x_set_power_state(bp, PCI_D0);
11663
11664 rtnl_unlock();
11665
11666 return PCI_ERS_RESULT_RECOVERED;
11667}
11668
11669/**
11670 * bnx2x_io_resume - called when traffic can start flowing again
11671 * @pdev: Pointer to PCI device
11672 *
11673 * This callback is called when the error recovery driver tells us that
11674 * its OK to resume normal operation.
11675 */
11676static void bnx2x_io_resume(struct pci_dev *pdev)
11677{
11678 struct net_device *dev = pci_get_drvdata(pdev);
11679 struct bnx2x *bp = netdev_priv(dev);
11680
11681 rtnl_lock();
11682
f8ef6e44
YG
11683 bnx2x_eeh_recover(bp);
11684
493adb1f 11685 if (netif_running(dev))
f8ef6e44 11686 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11687
11688 netif_device_attach(dev);
11689
11690 rtnl_unlock();
11691}
11692
11693static struct pci_error_handlers bnx2x_err_handler = {
11694 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11695 .slot_reset = bnx2x_io_slot_reset,
11696 .resume = bnx2x_io_resume,
493adb1f
WX
11697};
11698
a2fbb9ea 11699static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11700 .name = DRV_MODULE_NAME,
11701 .id_table = bnx2x_pci_tbl,
11702 .probe = bnx2x_init_one,
11703 .remove = __devexit_p(bnx2x_remove_one),
11704 .suspend = bnx2x_suspend,
11705 .resume = bnx2x_resume,
11706 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11707};
11708
11709static int __init bnx2x_init(void)
11710{
dd21ca6d
SG
11711 int ret;
11712
1cf167f2
EG
11713 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11714 if (bnx2x_wq == NULL) {
11715 printk(KERN_ERR PFX "Cannot create workqueue\n");
11716 return -ENOMEM;
11717 }
11718
dd21ca6d
SG
11719 ret = pci_register_driver(&bnx2x_pci_driver);
11720 if (ret) {
11721 printk(KERN_ERR PFX "Cannot register driver\n");
11722 destroy_workqueue(bnx2x_wq);
11723 }
11724 return ret;
a2fbb9ea
ET
11725}
11726
11727static void __exit bnx2x_cleanup(void)
11728{
11729 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11730
11731 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11732}
11733
11734module_init(bnx2x_init);
11735module_exit(bnx2x_cleanup);
11736
94a78b79 11737