]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Fan failure early detection
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
56ed4351
VZ
59#define DRV_MODULE_VERSION "1.48.105-1"
60#define DRV_MODULE_RELDATE "2009/04/22"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
2059aba7 83MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
555f6c78 84
19680c48 85static int disable_tpa;
19680c48 86module_param(disable_tpa, int, 0);
9898f86d 87MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
88
89static int int_mode;
90module_param(int_mode, int, 0);
91MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
9898f86d 93static int poll;
a2fbb9ea 94module_param(poll, int, 0);
9898f86d 95MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
96
97static int mrrs = -1;
98module_param(mrrs, int, 0);
99MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
9898f86d 101static int debug;
a2fbb9ea 102module_param(debug, int, 0);
9898f86d
EG
103MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 106
1cf167f2 107static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
108
109enum bnx2x_board_type {
110 BCM57710 = 0,
34f80b04
EG
111 BCM57711 = 1,
112 BCM57711E = 2,
a2fbb9ea
ET
113};
114
34f80b04 115/* indexed by board_type, above */
53a10565 116static struct {
a2fbb9ea
ET
117 char *name;
118} board_info[] __devinitdata = {
34f80b04
EG
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
122};
123
34f80b04 124
a2fbb9ea
ET
125static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
132 { 0 }
133};
134
135MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137/****************************************************************************
138* General service functions
139****************************************************************************/
140
141/* used only at init
142 * locking is done by mcp
143 */
144static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145{
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150}
151
a2fbb9ea
ET
152static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153{
154 u32 val;
155
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
160
161 return val;
162}
a2fbb9ea
ET
163
164static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169};
170
171/* copy command into DMAE command memory and set DMAE command go */
172static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173 int idx)
174{
175 u32 cmd_offset;
176 int i;
177
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
ad8d3948
EG
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
184 }
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
186}
187
ad8d3948
EG
188void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189 u32 len32)
a2fbb9ea 190{
ad8d3948 191 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
193 int cnt = 200;
194
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201 return;
202 }
203
204 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
205
206 memset(dmae, 0, sizeof(struct dmae_command));
207
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211#ifdef __BIG_ENDIAN
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
213#else
214 DMAE_CMD_ENDIANITY_DW_SWAP |
215#endif
34f80b04
EG
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
222 dmae->len = len32;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 225 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 226
c3eefaf6 227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
237
238 *wb_comp = 0;
239
34f80b04 240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
241
242 udelay(5);
ad8d3948
EG
243
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
ad8d3948 247 if (!cnt) {
c3eefaf6 248 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
249 break;
250 }
ad8d3948 251 cnt--;
12469401
YG
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
254 msleep(100);
255 else
256 udelay(5);
a2fbb9ea 257 }
ad8d3948
EG
258
259 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
260}
261
c18487ee 262void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 263{
ad8d3948 264 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
266 int cnt = 200;
267
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
270 int i;
271
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276 return;
277 }
278
279 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
280
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
283
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287#ifdef __BIG_ENDIAN
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
289#else
290 DMAE_CMD_ENDIANITY_DW_SWAP |
291#endif
34f80b04
EG
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298 dmae->len = len32;
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 301 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 302
c3eefaf6 303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
310
311 *wb_comp = 0;
312
34f80b04 313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
314
315 udelay(5);
ad8d3948
EG
316
317 while (*wb_comp != DMAE_COMP_VAL) {
318
ad8d3948 319 if (!cnt) {
c3eefaf6 320 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
321 break;
322 }
ad8d3948 323 cnt--;
12469401
YG
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
326 msleep(100);
327 else
328 udelay(5);
a2fbb9ea 329 }
ad8d3948 330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
333
334 mutex_unlock(&bp->dmae_mutex);
335}
336
337/* used only for slowpath so not inlined */
338static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339{
340 u32 wb_write[2];
341
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 345}
a2fbb9ea 346
ad8d3948
EG
347#ifdef USE_WB_RD
348static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349{
350 u32 wb_data[2];
351
352 REG_RD_DMAE(bp, reg, wb_data, 2);
353
354 return HILO_U64(wb_data[0], wb_data[1]);
355}
356#endif
357
a2fbb9ea
ET
358static int bnx2x_mc_assert(struct bnx2x *bp)
359{
a2fbb9ea 360 char last_idx;
34f80b04
EG
361 int i, rc = 0;
362 u32 row0, row1, row2, row3;
363
364 /* XSTORM */
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
367 if (last_idx)
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
386 rc++;
387 } else {
388 break;
389 }
390 }
391
392 /* TSTORM */
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
395 if (last_idx)
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
414 rc++;
415 } else {
416 break;
417 }
418 }
419
420 /* CSTORM */
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
423 if (last_idx)
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
442 rc++;
443 } else {
444 break;
445 }
446 }
447
448 /* USTORM */
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
451 if (last_idx)
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
470 rc++;
471 } else {
472 break;
a2fbb9ea
ET
473 }
474 }
34f80b04 475
a2fbb9ea
ET
476 return rc;
477}
c14423fe 478
a2fbb9ea
ET
479static void bnx2x_fw_dump(struct bnx2x *bp)
480{
481 u32 mark, offset;
4781bfad 482 __be32 data[9];
a2fbb9ea
ET
483 int word;
484
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
488
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499 offset + 4*word));
500 data[8] = 0x0;
49d66772 501 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
502 }
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
504}
505
506static void bnx2x_panic_dump(struct bnx2x *bp)
507{
508 int i;
509 u16 j, start, end;
510
66e855f3
YG
511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
a2fbb9ea
ET
514 BNX2X_ERR("begin crash dump -----------------\n");
515
8440d2b6
EG
516 /* Indices */
517 /* Common */
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524 /* Rx */
525 for_each_rx_queue(bp, i) {
a2fbb9ea 526 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 527
c3eefaf6 528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 531 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
539 }
a2fbb9ea 540
8440d2b6
EG
541 /* Tx */
542 for_each_tx_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 545
c3eefaf6 546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
8440d2b6
EG
551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552 fp->status_blk->c_status_block.status_block_index,
553 hw_prods->packets_prod, hw_prods->bds_prod);
554 }
a2fbb9ea 555
8440d2b6
EG
556 /* Rings */
557 /* Rx */
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
560
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 563 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
c3eefaf6
EG
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
569 }
570
3196a88a
EG
571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
8440d2b6 573 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
c3eefaf6
EG
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
579 }
580
a2fbb9ea
ET
581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
c3eefaf6
EG
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
588 }
589 }
590
8440d2b6
EG
591 /* Tx */
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
c3eefaf6
EG
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
602 }
603
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
611 }
612 }
a2fbb9ea 613
34f80b04 614 bnx2x_fw_dump(bp);
a2fbb9ea
ET
615 bnx2x_mc_assert(bp);
616 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
617}
618
615f8fd9 619static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 620{
34f80b04 621 int port = BP_PORT(bp);
a2fbb9ea
ET
622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
626
627 if (msix) {
8badd27a
EG
628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
632 } else if (msi) {
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
637 } else {
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 642
8badd27a
EG
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
615f8fd9
ET
645
646 REG_WR(bp, addr, val);
647
a2fbb9ea
ET
648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649 }
650
8badd27a
EG
651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
653
654 REG_WR(bp, addr, val);
34f80b04
EG
655
656 if (CHIP_IS_E1H(bp)) {
657 /* init leading/trailing edge */
658 if (IS_E1HMF(bp)) {
8badd27a 659 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 660 if (bp->port.pmf)
4acac6a5
EG
661 /* enable nig and gpio3 attention */
662 val |= 0x1100;
34f80b04
EG
663 } else
664 val = 0xffff;
665
666 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
667 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
668 }
a2fbb9ea
ET
669}
670
615f8fd9 671static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 672{
34f80b04 673 int port = BP_PORT(bp);
a2fbb9ea
ET
674 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
675 u32 val = REG_RD(bp, addr);
676
677 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
678 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
679 HC_CONFIG_0_REG_INT_LINE_EN_0 |
680 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
681
682 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
683 val, port, addr);
684
8badd27a
EG
685 /* flush all outstanding writes */
686 mmiowb();
687
a2fbb9ea
ET
688 REG_WR(bp, addr, val);
689 if (REG_RD(bp, addr) != val)
690 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 691
a2fbb9ea
ET
692}
693
f8ef6e44 694static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 695{
a2fbb9ea 696 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 697 int i, offset;
a2fbb9ea 698
34f80b04 699 /* disable interrupt handling */
a2fbb9ea 700 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
701 if (disable_hw)
702 /* prevent the HW from sending interrupts */
703 bnx2x_int_disable(bp);
a2fbb9ea
ET
704
705 /* make sure all ISRs are done */
706 if (msix) {
8badd27a
EG
707 synchronize_irq(bp->msix_table[0].vector);
708 offset = 1;
a2fbb9ea 709 for_each_queue(bp, i)
8badd27a 710 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
711 } else
712 synchronize_irq(bp->pdev->irq);
713
714 /* make sure sp_task is not running */
1cf167f2
EG
715 cancel_delayed_work(&bp->sp_task);
716 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
717}
718
34f80b04 719/* fast path */
a2fbb9ea
ET
720
721/*
34f80b04 722 * General service functions
a2fbb9ea
ET
723 */
724
34f80b04 725static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
726 u8 storm, u16 index, u8 op, u8 update)
727{
5c862848
EG
728 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
729 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
730 struct igu_ack_register igu_ack;
731
732 igu_ack.status_block_index = index;
733 igu_ack.sb_id_and_flags =
34f80b04 734 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
735 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
736 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
737 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
738
5c862848
EG
739 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
740 (*(u32 *)&igu_ack), hc_addr);
741 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
742}
743
744static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
745{
746 struct host_status_block *fpsb = fp->status_blk;
747 u16 rc = 0;
748
749 barrier(); /* status block is written to by the chip */
750 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
751 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
752 rc |= 1;
753 }
754 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
755 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
756 rc |= 2;
757 }
758 return rc;
759}
760
a2fbb9ea
ET
761static u16 bnx2x_ack_int(struct bnx2x *bp)
762{
5c862848
EG
763 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
764 COMMAND_REG_SIMD_MASK);
765 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 766
5c862848
EG
767 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
768 result, hc_addr);
a2fbb9ea 769
a2fbb9ea
ET
770 return result;
771}
772
773
774/*
775 * fast path service functions
776 */
777
237907c1
EG
778static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
779{
780 u16 tx_cons_sb;
781
782 /* Tell compiler that status block fields can change */
783 barrier();
784 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
785 return (fp->tx_pkt_cons != tx_cons_sb);
786}
787
788static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
789{
790 /* Tell compiler that consumer and producer can change */
791 barrier();
792 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
793}
794
a2fbb9ea
ET
795/* free skb in the packet ring at pos idx
796 * return idx of last bd freed
797 */
798static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
799 u16 idx)
800{
801 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
802 struct eth_tx_bd *tx_bd;
803 struct sk_buff *skb = tx_buf->skb;
34f80b04 804 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
805 int nbd;
806
807 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
808 idx, tx_buf, skb);
809
810 /* unmap first bd */
811 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
812 tx_bd = &fp->tx_desc_ring[bd_idx];
813 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
814 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
815
816 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 817 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
818#ifdef BNX2X_STOP_ON_ERROR
819 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 820 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
821 bnx2x_panic();
822 }
823#endif
824
825 /* Skip a parse bd and the TSO split header bd
826 since they have no mapping */
827 if (nbd)
828 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
829
830 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
831 ETH_TX_BD_FLAGS_TCP_CSUM |
832 ETH_TX_BD_FLAGS_SW_LSO)) {
833 if (--nbd)
834 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
835 tx_bd = &fp->tx_desc_ring[bd_idx];
836 /* is this a TSO split header bd? */
837 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
838 if (--nbd)
839 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
840 }
841 }
842
843 /* now free frags */
844 while (nbd > 0) {
845
846 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
847 tx_bd = &fp->tx_desc_ring[bd_idx];
848 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
849 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
850 if (--nbd)
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852 }
853
854 /* release skb */
53e5e96e 855 WARN_ON(!skb);
a2fbb9ea
ET
856 dev_kfree_skb(skb);
857 tx_buf->first_bd = 0;
858 tx_buf->skb = NULL;
859
34f80b04 860 return new_cons;
a2fbb9ea
ET
861}
862
34f80b04 863static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 864{
34f80b04
EG
865 s16 used;
866 u16 prod;
867 u16 cons;
a2fbb9ea 868
34f80b04 869 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
870 prod = fp->tx_bd_prod;
871 cons = fp->tx_bd_cons;
872
34f80b04
EG
873 /* NUM_TX_RINGS = number of "next-page" entries
874 It will be used as a threshold */
875 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 876
34f80b04 877#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
878 WARN_ON(used < 0);
879 WARN_ON(used > fp->bp->tx_ring_size);
880 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 881#endif
a2fbb9ea 882
34f80b04 883 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
884}
885
7961f791 886static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
887{
888 struct bnx2x *bp = fp->bp;
555f6c78 889 struct netdev_queue *txq;
a2fbb9ea
ET
890 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
891 int done = 0;
892
893#ifdef BNX2X_STOP_ON_ERROR
894 if (unlikely(bp->panic))
895 return;
896#endif
897
555f6c78 898 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
899 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
900 sw_cons = fp->tx_pkt_cons;
901
902 while (sw_cons != hw_cons) {
903 u16 pkt_cons;
904
905 pkt_cons = TX_BD(sw_cons);
906
907 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
908
34f80b04 909 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
910 hw_cons, sw_cons, pkt_cons);
911
34f80b04 912/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
913 rmb();
914 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
915 }
916*/
917 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
918 sw_cons++;
919 done++;
a2fbb9ea
ET
920 }
921
922 fp->tx_pkt_cons = sw_cons;
923 fp->tx_bd_cons = bd_cons;
924
a2fbb9ea 925 /* TBD need a thresh? */
555f6c78 926 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 927
555f6c78 928 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 929
6044735d
EG
930 /* Need to make the tx_bd_cons update visible to start_xmit()
931 * before checking for netif_tx_queue_stopped(). Without the
932 * memory barrier, there is a small possibility that
933 * start_xmit() will miss it and cause the queue to be stopped
934 * forever.
935 */
936 smp_mb();
937
555f6c78 938 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 939 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 940 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 941 netif_tx_wake_queue(txq);
a2fbb9ea 942
555f6c78 943 __netif_tx_unlock(txq);
a2fbb9ea
ET
944 }
945}
946
3196a88a 947
a2fbb9ea
ET
948static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
949 union eth_rx_cqe *rr_cqe)
950{
951 struct bnx2x *bp = fp->bp;
952 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
953 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
954
34f80b04 955 DP(BNX2X_MSG_SP,
a2fbb9ea 956 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 957 fp->index, cid, command, bp->state,
34f80b04 958 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
959
960 bp->spq_left++;
961
0626b899 962 if (fp->index) {
a2fbb9ea
ET
963 switch (command | fp->state) {
964 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
965 BNX2X_FP_STATE_OPENING):
966 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
967 cid);
968 fp->state = BNX2X_FP_STATE_OPEN;
969 break;
970
971 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
972 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
973 cid);
974 fp->state = BNX2X_FP_STATE_HALTED;
975 break;
976
977 default:
34f80b04
EG
978 BNX2X_ERR("unexpected MC reply (%d) "
979 "fp->state is %x\n", command, fp->state);
980 break;
a2fbb9ea 981 }
34f80b04 982 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
983 return;
984 }
c14423fe 985
a2fbb9ea
ET
986 switch (command | bp->state) {
987 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
988 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
989 bp->state = BNX2X_STATE_OPEN;
990 break;
991
992 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
993 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
994 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
995 fp->state = BNX2X_FP_STATE_HALTED;
996 break;
997
a2fbb9ea 998 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 999 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1000 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1001 break;
1002
3196a88a 1003
a2fbb9ea 1004 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1005 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1006 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1007 bp->set_mac_pending = 0;
a2fbb9ea
ET
1008 break;
1009
49d66772 1010 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1011 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1012 break;
1013
a2fbb9ea 1014 default:
34f80b04 1015 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1016 command, bp->state);
34f80b04 1017 break;
a2fbb9ea 1018 }
34f80b04 1019 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1020}
1021
7a9b2557
VZ
1022static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1023 struct bnx2x_fastpath *fp, u16 index)
1024{
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct page *page = sw_buf->page;
1027 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1028
1029 /* Skip "next page" elements */
1030 if (!page)
1031 return;
1032
1033 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1034 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036
1037 sw_buf->page = NULL;
1038 sge->addr_hi = 0;
1039 sge->addr_lo = 0;
1040}
1041
1042static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1043 struct bnx2x_fastpath *fp, int last)
1044{
1045 int i;
1046
1047 for (i = 0; i < last; i++)
1048 bnx2x_free_rx_sge(bp, fp, i);
1049}
1050
1051static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1052 struct bnx2x_fastpath *fp, u16 index)
1053{
1054 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1055 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1056 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1057 dma_addr_t mapping;
1058
1059 if (unlikely(page == NULL))
1060 return -ENOMEM;
1061
4f40f2cb 1062 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1063 PCI_DMA_FROMDEVICE);
8d8bb39b 1064 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1065 __free_pages(page, PAGES_PER_SGE_SHIFT);
1066 return -ENOMEM;
1067 }
1068
1069 sw_buf->page = page;
1070 pci_unmap_addr_set(sw_buf, mapping, mapping);
1071
1072 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1073 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1074
1075 return 0;
1076}
1077
a2fbb9ea
ET
1078static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1079 struct bnx2x_fastpath *fp, u16 index)
1080{
1081 struct sk_buff *skb;
1082 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1083 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1084 dma_addr_t mapping;
1085
1086 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1087 if (unlikely(skb == NULL))
1088 return -ENOMEM;
1089
437cf2f1 1090 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1091 PCI_DMA_FROMDEVICE);
8d8bb39b 1092 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1093 dev_kfree_skb(skb);
1094 return -ENOMEM;
1095 }
1096
1097 rx_buf->skb = skb;
1098 pci_unmap_addr_set(rx_buf, mapping, mapping);
1099
1100 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1101 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1102
1103 return 0;
1104}
1105
1106/* note that we are not allocating a new skb,
1107 * we are just moving one from cons to prod
1108 * we are not creating a new mapping,
1109 * so there is no need to check for dma_mapping_error().
1110 */
1111static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1112 struct sk_buff *skb, u16 cons, u16 prod)
1113{
1114 struct bnx2x *bp = fp->bp;
1115 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1116 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1117 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1118 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1119
1120 pci_dma_sync_single_for_device(bp->pdev,
1121 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1122 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1123
1124 prod_rx_buf->skb = cons_rx_buf->skb;
1125 pci_unmap_addr_set(prod_rx_buf, mapping,
1126 pci_unmap_addr(cons_rx_buf, mapping));
1127 *prod_bd = *cons_bd;
1128}
1129
7a9b2557
VZ
1130static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1131 u16 idx)
1132{
1133 u16 last_max = fp->last_max_sge;
1134
1135 if (SUB_S16(idx, last_max) > 0)
1136 fp->last_max_sge = idx;
1137}
1138
1139static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1140{
1141 int i, j;
1142
1143 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1144 int idx = RX_SGE_CNT * i - 1;
1145
1146 for (j = 0; j < 2; j++) {
1147 SGE_MASK_CLEAR_BIT(fp, idx);
1148 idx--;
1149 }
1150 }
1151}
1152
1153static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1154 struct eth_fast_path_rx_cqe *fp_cqe)
1155{
1156 struct bnx2x *bp = fp->bp;
4f40f2cb 1157 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1158 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1159 SGE_PAGE_SHIFT;
7a9b2557
VZ
1160 u16 last_max, last_elem, first_elem;
1161 u16 delta = 0;
1162 u16 i;
1163
1164 if (!sge_len)
1165 return;
1166
1167 /* First mark all used pages */
1168 for (i = 0; i < sge_len; i++)
1169 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1170
1171 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1172 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1173
1174 /* Here we assume that the last SGE index is the biggest */
1175 prefetch((void *)(fp->sge_mask));
1176 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1177
1178 last_max = RX_SGE(fp->last_max_sge);
1179 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1180 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1181
1182 /* If ring is not full */
1183 if (last_elem + 1 != first_elem)
1184 last_elem++;
1185
1186 /* Now update the prod */
1187 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1188 if (likely(fp->sge_mask[i]))
1189 break;
1190
1191 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1192 delta += RX_SGE_MASK_ELEM_SZ;
1193 }
1194
1195 if (delta > 0) {
1196 fp->rx_sge_prod += delta;
1197 /* clear page-end entries */
1198 bnx2x_clear_sge_mask_next_elems(fp);
1199 }
1200
1201 DP(NETIF_MSG_RX_STATUS,
1202 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1203 fp->last_max_sge, fp->rx_sge_prod);
1204}
1205
1206static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1207{
1208 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1209 memset(fp->sge_mask, 0xff,
1210 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1211
33471629
EG
1212 /* Clear the two last indices in the page to 1:
1213 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1214 hence will never be indicated and should be removed from
1215 the calculations. */
1216 bnx2x_clear_sge_mask_next_elems(fp);
1217}
1218
1219static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1220 struct sk_buff *skb, u16 cons, u16 prod)
1221{
1222 struct bnx2x *bp = fp->bp;
1223 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1224 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1225 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1226 dma_addr_t mapping;
1227
1228 /* move empty skb from pool to prod and map it */
1229 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1230 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1231 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1232 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1233
1234 /* move partial skb from cons to pool (don't unmap yet) */
1235 fp->tpa_pool[queue] = *cons_rx_buf;
1236
1237 /* mark bin state as start - print error if current state != stop */
1238 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1239 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1240
1241 fp->tpa_state[queue] = BNX2X_TPA_START;
1242
1243 /* point prod_bd to new skb */
1244 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1245 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1246
1247#ifdef BNX2X_STOP_ON_ERROR
1248 fp->tpa_queue_used |= (1 << queue);
1249#ifdef __powerpc64__
1250 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1251#else
1252 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1253#endif
1254 fp->tpa_queue_used);
1255#endif
1256}
1257
1258static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1259 struct sk_buff *skb,
1260 struct eth_fast_path_rx_cqe *fp_cqe,
1261 u16 cqe_idx)
1262{
1263 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1264 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1265 u32 i, frag_len, frag_size, pages;
1266 int err;
1267 int j;
1268
1269 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1270 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1271
1272 /* This is needed in order to enable forwarding support */
1273 if (frag_size)
4f40f2cb 1274 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1275 max(frag_size, (u32)len_on_bd));
1276
1277#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1278 if (pages >
1279 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1280 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1281 pages, cqe_idx);
1282 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1283 fp_cqe->pkt_len, len_on_bd);
1284 bnx2x_panic();
1285 return -EINVAL;
1286 }
1287#endif
1288
1289 /* Run through the SGL and compose the fragmented skb */
1290 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1291 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1292
1293 /* FW gives the indices of the SGE as if the ring is an array
1294 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1295 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1296 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1297 old_rx_pg = *rx_pg;
1298
1299 /* If we fail to allocate a substitute page, we simply stop
1300 where we are and drop the whole packet */
1301 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1302 if (unlikely(err)) {
de832a55 1303 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1304 return err;
1305 }
1306
1307 /* Unmap the page as we r going to pass it to the stack */
1308 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1309 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1310
1311 /* Add one frag and update the appropriate fields in the skb */
1312 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1313
1314 skb->data_len += frag_len;
1315 skb->truesize += frag_len;
1316 skb->len += frag_len;
1317
1318 frag_size -= frag_len;
1319 }
1320
1321 return 0;
1322}
1323
1324static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1325 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1326 u16 cqe_idx)
1327{
1328 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1329 struct sk_buff *skb = rx_buf->skb;
1330 /* alloc new skb */
1331 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1332
1333 /* Unmap skb in the pool anyway, as we are going to change
1334 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1335 fails. */
1336 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1337 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1338
7a9b2557 1339 if (likely(new_skb)) {
66e855f3
YG
1340 /* fix ip xsum and give it to the stack */
1341 /* (no need to map the new skb) */
0c6671b0
EG
1342#ifdef BCM_VLAN
1343 int is_vlan_cqe =
1344 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1345 PARSING_FLAGS_VLAN);
1346 int is_not_hwaccel_vlan_cqe =
1347 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1348#endif
7a9b2557
VZ
1349
1350 prefetch(skb);
1351 prefetch(((char *)(skb)) + 128);
1352
7a9b2557
VZ
1353#ifdef BNX2X_STOP_ON_ERROR
1354 if (pad + len > bp->rx_buf_size) {
1355 BNX2X_ERR("skb_put is about to fail... "
1356 "pad %d len %d rx_buf_size %d\n",
1357 pad, len, bp->rx_buf_size);
1358 bnx2x_panic();
1359 return;
1360 }
1361#endif
1362
1363 skb_reserve(skb, pad);
1364 skb_put(skb, len);
1365
1366 skb->protocol = eth_type_trans(skb, bp->dev);
1367 skb->ip_summed = CHECKSUM_UNNECESSARY;
1368
1369 {
1370 struct iphdr *iph;
1371
1372 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1373#ifdef BCM_VLAN
1374 /* If there is no Rx VLAN offloading -
1375 take VLAN tag into an account */
1376 if (unlikely(is_not_hwaccel_vlan_cqe))
1377 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1378#endif
7a9b2557
VZ
1379 iph->check = 0;
1380 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1381 }
1382
1383 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1384 &cqe->fast_path_cqe, cqe_idx)) {
1385#ifdef BCM_VLAN
0c6671b0
EG
1386 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1387 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1388 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1389 le16_to_cpu(cqe->fast_path_cqe.
1390 vlan_tag));
1391 else
1392#endif
1393 netif_receive_skb(skb);
1394 } else {
1395 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1396 " - dropping packet!\n");
1397 dev_kfree_skb(skb);
1398 }
1399
7a9b2557
VZ
1400
1401 /* put new skb in bin */
1402 fp->tpa_pool[queue].skb = new_skb;
1403
1404 } else {
66e855f3 1405 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1406 DP(NETIF_MSG_RX_STATUS,
1407 "Failed to allocate new skb - dropping packet!\n");
de832a55 1408 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1409 }
1410
1411 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1412}
1413
1414static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1415 struct bnx2x_fastpath *fp,
1416 u16 bd_prod, u16 rx_comp_prod,
1417 u16 rx_sge_prod)
1418{
8d9c5f34 1419 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1420 int i;
1421
1422 /* Update producers */
1423 rx_prods.bd_prod = bd_prod;
1424 rx_prods.cqe_prod = rx_comp_prod;
1425 rx_prods.sge_prod = rx_sge_prod;
1426
58f4c4cf
EG
1427 /*
1428 * Make sure that the BD and SGE data is updated before updating the
1429 * producers since FW might read the BD/SGE right after the producer
1430 * is updated.
1431 * This is only applicable for weak-ordered memory model archs such
1432 * as IA-64. The following barrier is also mandatory since FW will
1433 * assumes BDs must have buffers.
1434 */
1435 wmb();
1436
8d9c5f34
EG
1437 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1438 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1439 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1440 ((u32 *)&rx_prods)[i]);
1441
58f4c4cf
EG
1442 mmiowb(); /* keep prod updates ordered */
1443
7a9b2557 1444 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1445 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1446 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1447}
1448
a2fbb9ea
ET
1449static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1450{
1451 struct bnx2x *bp = fp->bp;
34f80b04 1452 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1453 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1454 int rx_pkt = 0;
1455
1456#ifdef BNX2X_STOP_ON_ERROR
1457 if (unlikely(bp->panic))
1458 return 0;
1459#endif
1460
34f80b04
EG
1461 /* CQ "next element" is of the size of the regular element,
1462 that's why it's ok here */
a2fbb9ea
ET
1463 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1464 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1465 hw_comp_cons++;
1466
1467 bd_cons = fp->rx_bd_cons;
1468 bd_prod = fp->rx_bd_prod;
34f80b04 1469 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1470 sw_comp_cons = fp->rx_comp_cons;
1471 sw_comp_prod = fp->rx_comp_prod;
1472
1473 /* Memory barrier necessary as speculative reads of the rx
1474 * buffer can be ahead of the index in the status block
1475 */
1476 rmb();
1477
1478 DP(NETIF_MSG_RX_STATUS,
1479 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1480 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1481
1482 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1483 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1484 struct sk_buff *skb;
1485 union eth_rx_cqe *cqe;
34f80b04
EG
1486 u8 cqe_fp_flags;
1487 u16 len, pad;
a2fbb9ea
ET
1488
1489 comp_ring_cons = RCQ_BD(sw_comp_cons);
1490 bd_prod = RX_BD(bd_prod);
1491 bd_cons = RX_BD(bd_cons);
1492
1493 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1494 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1495
a2fbb9ea 1496 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1497 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1498 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1499 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1500 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1501 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1502
1503 /* is this a slowpath msg? */
34f80b04 1504 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1505 bnx2x_sp_event(fp, cqe);
1506 goto next_cqe;
1507
1508 /* this is an rx packet */
1509 } else {
1510 rx_buf = &fp->rx_buf_ring[bd_cons];
1511 skb = rx_buf->skb;
a2fbb9ea
ET
1512 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1513 pad = cqe->fast_path_cqe.placement_offset;
1514
7a9b2557
VZ
1515 /* If CQE is marked both TPA_START and TPA_END
1516 it is a non-TPA CQE */
1517 if ((!fp->disable_tpa) &&
1518 (TPA_TYPE(cqe_fp_flags) !=
1519 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1520 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1521
1522 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1523 DP(NETIF_MSG_RX_STATUS,
1524 "calling tpa_start on queue %d\n",
1525 queue);
1526
1527 bnx2x_tpa_start(fp, queue, skb,
1528 bd_cons, bd_prod);
1529 goto next_rx;
1530 }
1531
1532 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1533 DP(NETIF_MSG_RX_STATUS,
1534 "calling tpa_stop on queue %d\n",
1535 queue);
1536
1537 if (!BNX2X_RX_SUM_FIX(cqe))
1538 BNX2X_ERR("STOP on none TCP "
1539 "data\n");
1540
1541 /* This is a size of the linear data
1542 on this skb */
1543 len = le16_to_cpu(cqe->fast_path_cqe.
1544 len_on_bd);
1545 bnx2x_tpa_stop(bp, fp, queue, pad,
1546 len, cqe, comp_ring_cons);
1547#ifdef BNX2X_STOP_ON_ERROR
1548 if (bp->panic)
17cb4006 1549 return 0;
7a9b2557
VZ
1550#endif
1551
1552 bnx2x_update_sge_prod(fp,
1553 &cqe->fast_path_cqe);
1554 goto next_cqe;
1555 }
1556 }
1557
a2fbb9ea
ET
1558 pci_dma_sync_single_for_device(bp->pdev,
1559 pci_unmap_addr(rx_buf, mapping),
1560 pad + RX_COPY_THRESH,
1561 PCI_DMA_FROMDEVICE);
1562 prefetch(skb);
1563 prefetch(((char *)(skb)) + 128);
1564
1565 /* is this an error packet? */
34f80b04 1566 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1567 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1568 "ERROR flags %x rx packet %u\n",
1569 cqe_fp_flags, sw_comp_cons);
de832a55 1570 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1571 goto reuse_rx;
1572 }
1573
1574 /* Since we don't have a jumbo ring
1575 * copy small packets if mtu > 1500
1576 */
1577 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1578 (len <= RX_COPY_THRESH)) {
1579 struct sk_buff *new_skb;
1580
1581 new_skb = netdev_alloc_skb(bp->dev,
1582 len + pad);
1583 if (new_skb == NULL) {
1584 DP(NETIF_MSG_RX_ERR,
34f80b04 1585 "ERROR packet dropped "
a2fbb9ea 1586 "because of alloc failure\n");
de832a55 1587 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1588 goto reuse_rx;
1589 }
1590
1591 /* aligned copy */
1592 skb_copy_from_linear_data_offset(skb, pad,
1593 new_skb->data + pad, len);
1594 skb_reserve(new_skb, pad);
1595 skb_put(new_skb, len);
1596
1597 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1598
1599 skb = new_skb;
1600
1601 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1602 pci_unmap_single(bp->pdev,
1603 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1604 bp->rx_buf_size,
a2fbb9ea
ET
1605 PCI_DMA_FROMDEVICE);
1606 skb_reserve(skb, pad);
1607 skb_put(skb, len);
1608
1609 } else {
1610 DP(NETIF_MSG_RX_ERR,
34f80b04 1611 "ERROR packet dropped because "
a2fbb9ea 1612 "of alloc failure\n");
de832a55 1613 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1614reuse_rx:
1615 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1616 goto next_rx;
1617 }
1618
1619 skb->protocol = eth_type_trans(skb, bp->dev);
1620
1621 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1622 if (bp->rx_csum) {
1adcd8be
EG
1623 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1624 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1625 else
de832a55 1626 fp->eth_q_stats.hw_csum_err++;
66e855f3 1627 }
a2fbb9ea
ET
1628 }
1629
748e5439 1630 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1631#ifdef BCM_VLAN
0c6671b0 1632 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1633 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1634 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1635 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1636 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1637 else
1638#endif
34f80b04 1639 netif_receive_skb(skb);
a2fbb9ea 1640
a2fbb9ea
ET
1641
1642next_rx:
1643 rx_buf->skb = NULL;
1644
1645 bd_cons = NEXT_RX_IDX(bd_cons);
1646 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1647 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1648 rx_pkt++;
a2fbb9ea
ET
1649next_cqe:
1650 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1651 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1652
34f80b04 1653 if (rx_pkt == budget)
a2fbb9ea
ET
1654 break;
1655 } /* while */
1656
1657 fp->rx_bd_cons = bd_cons;
34f80b04 1658 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1659 fp->rx_comp_cons = sw_comp_cons;
1660 fp->rx_comp_prod = sw_comp_prod;
1661
7a9b2557
VZ
1662 /* Update producers */
1663 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1664 fp->rx_sge_prod);
a2fbb9ea
ET
1665
1666 fp->rx_pkt += rx_pkt;
1667 fp->rx_calls++;
1668
1669 return rx_pkt;
1670}
1671
1672static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1673{
1674 struct bnx2x_fastpath *fp = fp_cookie;
1675 struct bnx2x *bp = fp->bp;
0626b899 1676 int index = fp->index;
a2fbb9ea 1677
da5a662a
VZ
1678 /* Return here if interrupt is disabled */
1679 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1680 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1681 return IRQ_HANDLED;
1682 }
1683
34f80b04 1684 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1685 index, fp->sb_id);
1686 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1687
1688#ifdef BNX2X_STOP_ON_ERROR
1689 if (unlikely(bp->panic))
1690 return IRQ_HANDLED;
1691#endif
1692
1693 prefetch(fp->rx_cons_sb);
1694 prefetch(fp->tx_cons_sb);
1695 prefetch(&fp->status_blk->c_status_block.status_block_index);
1696 prefetch(&fp->status_blk->u_status_block.status_block_index);
1697
288379f0 1698 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1699
a2fbb9ea
ET
1700 return IRQ_HANDLED;
1701}
1702
1703static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1704{
555f6c78 1705 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1706 u16 status = bnx2x_ack_int(bp);
34f80b04 1707 u16 mask;
a2fbb9ea 1708
34f80b04 1709 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1710 if (unlikely(status == 0)) {
1711 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1712 return IRQ_NONE;
1713 }
f5372251 1714 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1715
34f80b04 1716 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1717 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1718 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1719 return IRQ_HANDLED;
1720 }
1721
3196a88a
EG
1722#ifdef BNX2X_STOP_ON_ERROR
1723 if (unlikely(bp->panic))
1724 return IRQ_HANDLED;
1725#endif
1726
34f80b04
EG
1727 mask = 0x2 << bp->fp[0].sb_id;
1728 if (status & mask) {
a2fbb9ea
ET
1729 struct bnx2x_fastpath *fp = &bp->fp[0];
1730
1731 prefetch(fp->rx_cons_sb);
1732 prefetch(fp->tx_cons_sb);
1733 prefetch(&fp->status_blk->c_status_block.status_block_index);
1734 prefetch(&fp->status_blk->u_status_block.status_block_index);
1735
288379f0 1736 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1737
34f80b04 1738 status &= ~mask;
a2fbb9ea
ET
1739 }
1740
a2fbb9ea 1741
34f80b04 1742 if (unlikely(status & 0x1)) {
1cf167f2 1743 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1744
1745 status &= ~0x1;
1746 if (!status)
1747 return IRQ_HANDLED;
1748 }
1749
34f80b04
EG
1750 if (status)
1751 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1752 status);
a2fbb9ea 1753
c18487ee 1754 return IRQ_HANDLED;
a2fbb9ea
ET
1755}
1756
c18487ee 1757/* end of fast path */
a2fbb9ea 1758
bb2a0f7a 1759static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1760
c18487ee
YR
1761/* Link */
1762
1763/*
1764 * General service functions
1765 */
a2fbb9ea 1766
4a37fb66 1767static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1768{
1769 u32 lock_status;
1770 u32 resource_bit = (1 << resource);
4a37fb66
YG
1771 int func = BP_FUNC(bp);
1772 u32 hw_lock_control_reg;
c18487ee 1773 int cnt;
a2fbb9ea 1774
c18487ee
YR
1775 /* Validating that the resource is within range */
1776 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1777 DP(NETIF_MSG_HW,
1778 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1779 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1780 return -EINVAL;
1781 }
a2fbb9ea 1782
4a37fb66
YG
1783 if (func <= 5) {
1784 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1785 } else {
1786 hw_lock_control_reg =
1787 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1788 }
1789
c18487ee 1790 /* Validating that the resource is not already taken */
4a37fb66 1791 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1792 if (lock_status & resource_bit) {
1793 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1794 lock_status, resource_bit);
1795 return -EEXIST;
1796 }
a2fbb9ea 1797
46230476
EG
1798 /* Try for 5 second every 5ms */
1799 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1800 /* Try to acquire the lock */
4a37fb66
YG
1801 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1802 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1803 if (lock_status & resource_bit)
1804 return 0;
a2fbb9ea 1805
c18487ee 1806 msleep(5);
a2fbb9ea 1807 }
c18487ee
YR
1808 DP(NETIF_MSG_HW, "Timeout\n");
1809 return -EAGAIN;
1810}
a2fbb9ea 1811
4a37fb66 1812static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1813{
1814 u32 lock_status;
1815 u32 resource_bit = (1 << resource);
4a37fb66
YG
1816 int func = BP_FUNC(bp);
1817 u32 hw_lock_control_reg;
a2fbb9ea 1818
c18487ee
YR
1819 /* Validating that the resource is within range */
1820 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1821 DP(NETIF_MSG_HW,
1822 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1823 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1824 return -EINVAL;
1825 }
1826
4a37fb66
YG
1827 if (func <= 5) {
1828 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1829 } else {
1830 hw_lock_control_reg =
1831 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1832 }
1833
c18487ee 1834 /* Validating that the resource is currently taken */
4a37fb66 1835 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1836 if (!(lock_status & resource_bit)) {
1837 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1838 lock_status, resource_bit);
1839 return -EFAULT;
a2fbb9ea
ET
1840 }
1841
4a37fb66 1842 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1843 return 0;
1844}
1845
1846/* HW Lock for shared dual port PHYs */
4a37fb66 1847static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1848{
34f80b04 1849 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1850
46c6a674
EG
1851 if (bp->port.need_hw_lock)
1852 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1853}
a2fbb9ea 1854
4a37fb66 1855static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1856{
46c6a674
EG
1857 if (bp->port.need_hw_lock)
1858 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1859
34f80b04 1860 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1861}
a2fbb9ea 1862
4acac6a5
EG
1863int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1864{
1865 /* The GPIO should be swapped if swap register is set and active */
1866 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1867 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1868 int gpio_shift = gpio_num +
1869 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1870 u32 gpio_mask = (1 << gpio_shift);
1871 u32 gpio_reg;
1872 int value;
1873
1874 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1875 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1876 return -EINVAL;
1877 }
1878
1879 /* read GPIO value */
1880 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1881
1882 /* get the requested pin value */
1883 if ((gpio_reg & gpio_mask) == gpio_mask)
1884 value = 1;
1885 else
1886 value = 0;
1887
1888 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1889
1890 return value;
1891}
1892
17de50b7 1893int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1894{
1895 /* The GPIO should be swapped if swap register is set and active */
1896 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1897 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1898 int gpio_shift = gpio_num +
1899 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1900 u32 gpio_mask = (1 << gpio_shift);
1901 u32 gpio_reg;
a2fbb9ea 1902
c18487ee
YR
1903 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1904 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1905 return -EINVAL;
1906 }
a2fbb9ea 1907
4a37fb66 1908 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1909 /* read GPIO and mask except the float bits */
1910 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1911
c18487ee
YR
1912 switch (mode) {
1913 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1914 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1915 gpio_num, gpio_shift);
1916 /* clear FLOAT and set CLR */
1917 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1918 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1919 break;
a2fbb9ea 1920
c18487ee
YR
1921 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1922 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1923 gpio_num, gpio_shift);
1924 /* clear FLOAT and set SET */
1925 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1927 break;
a2fbb9ea 1928
17de50b7 1929 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1930 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1931 gpio_num, gpio_shift);
1932 /* set FLOAT */
1933 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1934 break;
a2fbb9ea 1935
c18487ee
YR
1936 default:
1937 break;
a2fbb9ea
ET
1938 }
1939
c18487ee 1940 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1941 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1942
c18487ee 1943 return 0;
a2fbb9ea
ET
1944}
1945
4acac6a5
EG
1946int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1947{
1948 /* The GPIO should be swapped if swap register is set and active */
1949 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1950 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1951 int gpio_shift = gpio_num +
1952 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1953 u32 gpio_mask = (1 << gpio_shift);
1954 u32 gpio_reg;
1955
1956 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1957 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1958 return -EINVAL;
1959 }
1960
1961 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1962 /* read GPIO int */
1963 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1964
1965 switch (mode) {
1966 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1967 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1968 "output low\n", gpio_num, gpio_shift);
1969 /* clear SET and set CLR */
1970 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1971 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 break;
1973
1974 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1975 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1976 "output high\n", gpio_num, gpio_shift);
1977 /* clear CLR and set SET */
1978 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1979 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1980 break;
1981
1982 default:
1983 break;
1984 }
1985
1986 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1988
1989 return 0;
1990}
1991
c18487ee 1992static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1993{
c18487ee
YR
1994 u32 spio_mask = (1 << spio_num);
1995 u32 spio_reg;
a2fbb9ea 1996
c18487ee
YR
1997 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1998 (spio_num > MISC_REGISTERS_SPIO_7)) {
1999 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2000 return -EINVAL;
a2fbb9ea
ET
2001 }
2002
4a37fb66 2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2004 /* read SPIO and mask except the float bits */
2005 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2006
c18487ee 2007 switch (mode) {
6378c025 2008 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2010 /* clear FLOAT and set CLR */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2013 break;
a2fbb9ea 2014
6378c025 2015 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2017 /* clear FLOAT and set SET */
2018 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2020 break;
a2fbb9ea 2021
c18487ee
YR
2022 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2023 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2024 /* set FLOAT */
2025 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026 break;
a2fbb9ea 2027
c18487ee
YR
2028 default:
2029 break;
a2fbb9ea
ET
2030 }
2031
c18487ee 2032 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2033 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2034
a2fbb9ea
ET
2035 return 0;
2036}
2037
c18487ee 2038static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2039{
ad33ea3a
EG
2040 switch (bp->link_vars.ieee_fc &
2041 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2042 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2043 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2044 ADVERTISED_Pause);
2045 break;
356e2385 2046
c18487ee 2047 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2048 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2049 ADVERTISED_Pause);
2050 break;
356e2385 2051
c18487ee 2052 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2053 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2054 break;
356e2385 2055
c18487ee 2056 default:
34f80b04 2057 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2058 ADVERTISED_Pause);
2059 break;
2060 }
2061}
f1410647 2062
c18487ee
YR
2063static void bnx2x_link_report(struct bnx2x *bp)
2064{
2065 if (bp->link_vars.link_up) {
2066 if (bp->state == BNX2X_STATE_OPEN)
2067 netif_carrier_on(bp->dev);
2068 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2069
c18487ee 2070 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2071
c18487ee
YR
2072 if (bp->link_vars.duplex == DUPLEX_FULL)
2073 printk("full duplex");
2074 else
2075 printk("half duplex");
f1410647 2076
c0700f90
DM
2077 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2078 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2079 printk(", receive ");
356e2385
EG
2080 if (bp->link_vars.flow_ctrl &
2081 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2082 printk("& transmit ");
2083 } else {
2084 printk(", transmit ");
2085 }
2086 printk("flow control ON");
2087 }
2088 printk("\n");
f1410647 2089
c18487ee
YR
2090 } else { /* link_down */
2091 netif_carrier_off(bp->dev);
2092 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2093 }
c18487ee
YR
2094}
2095
b5bf9068 2096static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2097{
19680c48
EG
2098 if (!BP_NOMCP(bp)) {
2099 u8 rc;
a2fbb9ea 2100
19680c48 2101 /* Initialize link parameters structure variables */
8c99e7b0
YR
2102 /* It is recommended to turn off RX FC for jumbo frames
2103 for better performance */
2104 if (IS_E1HMF(bp))
c0700f90 2105 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2106 else if (bp->dev->mtu > 5000)
c0700f90 2107 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2108 else
c0700f90 2109 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2110
4a37fb66 2111 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2112
2113 if (load_mode == LOAD_DIAG)
2114 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2115
19680c48 2116 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2117
4a37fb66 2118 bnx2x_release_phy_lock(bp);
a2fbb9ea 2119
3c96c68b
EG
2120 bnx2x_calc_fc_adv(bp);
2121
b5bf9068
EG
2122 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2123 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2124 bnx2x_link_report(bp);
b5bf9068 2125 }
34f80b04 2126
19680c48
EG
2127 return rc;
2128 }
f5372251 2129 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2130 return -EINVAL;
a2fbb9ea
ET
2131}
2132
c18487ee 2133static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2134{
19680c48 2135 if (!BP_NOMCP(bp)) {
4a37fb66 2136 bnx2x_acquire_phy_lock(bp);
19680c48 2137 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2138 bnx2x_release_phy_lock(bp);
a2fbb9ea 2139
19680c48
EG
2140 bnx2x_calc_fc_adv(bp);
2141 } else
f5372251 2142 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2143}
a2fbb9ea 2144
c18487ee
YR
2145static void bnx2x__link_reset(struct bnx2x *bp)
2146{
19680c48 2147 if (!BP_NOMCP(bp)) {
4a37fb66 2148 bnx2x_acquire_phy_lock(bp);
589abe3a 2149 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2150 bnx2x_release_phy_lock(bp);
19680c48 2151 } else
f5372251 2152 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2153}
a2fbb9ea 2154
c18487ee
YR
2155static u8 bnx2x_link_test(struct bnx2x *bp)
2156{
2157 u8 rc;
a2fbb9ea 2158
4a37fb66 2159 bnx2x_acquire_phy_lock(bp);
c18487ee 2160 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2161 bnx2x_release_phy_lock(bp);
a2fbb9ea 2162
c18487ee
YR
2163 return rc;
2164}
a2fbb9ea 2165
8a1c38d1 2166static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2167{
8a1c38d1
EG
2168 u32 r_param = bp->link_vars.line_speed / 8;
2169 u32 fair_periodic_timeout_usec;
2170 u32 t_fair;
34f80b04 2171
8a1c38d1
EG
2172 memset(&(bp->cmng.rs_vars), 0,
2173 sizeof(struct rate_shaping_vars_per_port));
2174 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2175
8a1c38d1
EG
2176 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2177 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2178
8a1c38d1
EG
2179 /* this is the threshold below which no timer arming will occur
2180 1.25 coefficient is for the threshold to be a little bigger
2181 than the real time, to compensate for timer in-accuracy */
2182 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2183 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2184
8a1c38d1
EG
2185 /* resolution of fairness timer */
2186 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2187 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2188 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2189
8a1c38d1
EG
2190 /* this is the threshold below which we won't arm the timer anymore */
2191 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2192
8a1c38d1
EG
2193 /* we multiply by 1e3/8 to get bytes/msec.
2194 We don't want the credits to pass a credit
2195 of the t_fair*FAIR_MEM (algorithm resolution) */
2196 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2197 /* since each tick is 4 usec */
2198 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2199}
2200
8a1c38d1 2201static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2202{
2203 struct rate_shaping_vars_per_vn m_rs_vn;
2204 struct fairness_vars_per_vn m_fair_vn;
2205 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2206 u16 vn_min_rate, vn_max_rate;
2207 int i;
2208
2209 /* If function is hidden - set min and max to zeroes */
2210 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2211 vn_min_rate = 0;
2212 vn_max_rate = 0;
2213
2214 } else {
2215 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2216 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2217 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2218 if current min rate is zero - set it to 1.
33471629 2219 This is a requirement of the algorithm. */
8a1c38d1 2220 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2221 vn_min_rate = DEF_MIN_RATE;
2222 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2223 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2224 }
2225
8a1c38d1
EG
2226 DP(NETIF_MSG_IFUP,
2227 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2228 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2229
2230 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2231 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2232
2233 /* global vn counter - maximal Mbps for this vn */
2234 m_rs_vn.vn_counter.rate = vn_max_rate;
2235
2236 /* quota - number of bytes transmitted in this period */
2237 m_rs_vn.vn_counter.quota =
2238 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2239
8a1c38d1 2240 if (bp->vn_weight_sum) {
34f80b04
EG
2241 /* credit for each period of the fairness algorithm:
2242 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2243 vn_weight_sum should not be larger than 10000, thus
2244 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2245 than zero */
34f80b04 2246 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2247 max((u32)(vn_min_rate * (T_FAIR_COEF /
2248 (8 * bp->vn_weight_sum))),
2249 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2250 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2251 m_fair_vn.vn_credit_delta);
2252 }
2253
34f80b04
EG
2254 /* Store it to internal memory */
2255 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2256 REG_WR(bp, BAR_XSTRORM_INTMEM +
2257 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2258 ((u32 *)(&m_rs_vn))[i]);
2259
2260 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2261 REG_WR(bp, BAR_XSTRORM_INTMEM +
2262 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2263 ((u32 *)(&m_fair_vn))[i]);
2264}
2265
8a1c38d1 2266
c18487ee
YR
2267/* This function is called upon link interrupt */
2268static void bnx2x_link_attn(struct bnx2x *bp)
2269{
bb2a0f7a
YG
2270 /* Make sure that we are synced with the current statistics */
2271 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2272
c18487ee 2273 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2274
bb2a0f7a
YG
2275 if (bp->link_vars.link_up) {
2276
1c06328c
EG
2277 /* dropless flow control */
2278 if (CHIP_IS_E1H(bp)) {
2279 int port = BP_PORT(bp);
2280 u32 pause_enabled = 0;
2281
2282 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2283 pause_enabled = 1;
2284
2285 REG_WR(bp, BAR_USTRORM_INTMEM +
2286 USTORM_PAUSE_ENABLED_OFFSET(port),
2287 pause_enabled);
2288 }
2289
bb2a0f7a
YG
2290 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2291 struct host_port_stats *pstats;
2292
2293 pstats = bnx2x_sp(bp, port_stats);
2294 /* reset old bmac stats */
2295 memset(&(pstats->mac_stx[0]), 0,
2296 sizeof(struct mac_stx));
2297 }
2298 if ((bp->state == BNX2X_STATE_OPEN) ||
2299 (bp->state == BNX2X_STATE_DISABLED))
2300 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2301 }
2302
c18487ee
YR
2303 /* indicate link status */
2304 bnx2x_link_report(bp);
34f80b04
EG
2305
2306 if (IS_E1HMF(bp)) {
8a1c38d1 2307 int port = BP_PORT(bp);
34f80b04 2308 int func;
8a1c38d1 2309 int vn;
34f80b04
EG
2310
2311 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2312 if (vn == BP_E1HVN(bp))
2313 continue;
2314
8a1c38d1 2315 func = ((vn << 1) | port);
34f80b04
EG
2316
2317 /* Set the attention towards other drivers
2318 on the same port */
2319 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2320 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2321 }
34f80b04 2322
8a1c38d1
EG
2323 if (bp->link_vars.link_up) {
2324 int i;
2325
2326 /* Init rate shaping and fairness contexts */
2327 bnx2x_init_port_minmax(bp);
34f80b04 2328
34f80b04 2329 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2330 bnx2x_init_vn_minmax(bp, 2*vn + port);
2331
2332 /* Store it to internal memory */
2333 for (i = 0;
2334 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2335 REG_WR(bp, BAR_XSTRORM_INTMEM +
2336 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2337 ((u32 *)(&bp->cmng))[i]);
2338 }
34f80b04 2339 }
c18487ee 2340}
a2fbb9ea 2341
c18487ee
YR
2342static void bnx2x__link_status_update(struct bnx2x *bp)
2343{
2344 if (bp->state != BNX2X_STATE_OPEN)
2345 return;
a2fbb9ea 2346
c18487ee 2347 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2348
bb2a0f7a
YG
2349 if (bp->link_vars.link_up)
2350 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2351 else
2352 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2353
c18487ee
YR
2354 /* indicate link status */
2355 bnx2x_link_report(bp);
a2fbb9ea 2356}
a2fbb9ea 2357
34f80b04
EG
2358static void bnx2x_pmf_update(struct bnx2x *bp)
2359{
2360 int port = BP_PORT(bp);
2361 u32 val;
2362
2363 bp->port.pmf = 1;
2364 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2365
2366 /* enable nig attention */
2367 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2368 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2369 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2370
2371 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2372}
2373
c18487ee 2374/* end of Link */
a2fbb9ea
ET
2375
2376/* slow path */
2377
2378/*
2379 * General service functions
2380 */
2381
2382/* the slow path queue is odd since completions arrive on the fastpath ring */
2383static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2384 u32 data_hi, u32 data_lo, int common)
2385{
34f80b04 2386 int func = BP_FUNC(bp);
a2fbb9ea 2387
34f80b04
EG
2388 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2389 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2390 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2391 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2392 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2393
2394#ifdef BNX2X_STOP_ON_ERROR
2395 if (unlikely(bp->panic))
2396 return -EIO;
2397#endif
2398
34f80b04 2399 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2400
2401 if (!bp->spq_left) {
2402 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2403 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2404 bnx2x_panic();
2405 return -EBUSY;
2406 }
f1410647 2407
a2fbb9ea
ET
2408 /* CID needs port number to be encoded int it */
2409 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2410 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2411 HW_CID(bp, cid)));
2412 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2413 if (common)
2414 bp->spq_prod_bd->hdr.type |=
2415 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2416
2417 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2418 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2419
2420 bp->spq_left--;
2421
2422 if (bp->spq_prod_bd == bp->spq_last_bd) {
2423 bp->spq_prod_bd = bp->spq;
2424 bp->spq_prod_idx = 0;
2425 DP(NETIF_MSG_TIMER, "end of spq\n");
2426
2427 } else {
2428 bp->spq_prod_bd++;
2429 bp->spq_prod_idx++;
2430 }
2431
34f80b04 2432 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2433 bp->spq_prod_idx);
2434
34f80b04 2435 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2436 return 0;
2437}
2438
2439/* acquire split MCP access lock register */
4a37fb66 2440static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2441{
a2fbb9ea 2442 u32 i, j, val;
34f80b04 2443 int rc = 0;
a2fbb9ea
ET
2444
2445 might_sleep();
2446 i = 100;
2447 for (j = 0; j < i*10; j++) {
2448 val = (1UL << 31);
2449 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2450 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2451 if (val & (1L << 31))
2452 break;
2453
2454 msleep(5);
2455 }
a2fbb9ea 2456 if (!(val & (1L << 31))) {
19680c48 2457 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2458 rc = -EBUSY;
2459 }
2460
2461 return rc;
2462}
2463
4a37fb66
YG
2464/* release split MCP access lock register */
2465static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2466{
2467 u32 val = 0;
2468
2469 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2470}
2471
2472static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2473{
2474 struct host_def_status_block *def_sb = bp->def_status_blk;
2475 u16 rc = 0;
2476
2477 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2478 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2479 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2480 rc |= 1;
2481 }
2482 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2483 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2484 rc |= 2;
2485 }
2486 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2487 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2488 rc |= 4;
2489 }
2490 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2491 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2492 rc |= 8;
2493 }
2494 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2495 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2496 rc |= 16;
2497 }
2498 return rc;
2499}
2500
2501/*
2502 * slow path service functions
2503 */
2504
2505static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2506{
34f80b04 2507 int port = BP_PORT(bp);
5c862848
EG
2508 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2509 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2510 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2511 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2512 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2513 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2514 u32 aeu_mask;
87942b46 2515 u32 nig_mask = 0;
a2fbb9ea 2516
a2fbb9ea
ET
2517 if (bp->attn_state & asserted)
2518 BNX2X_ERR("IGU ERROR\n");
2519
3fcaf2e5
EG
2520 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2521 aeu_mask = REG_RD(bp, aeu_addr);
2522
a2fbb9ea 2523 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2524 aeu_mask, asserted);
2525 aeu_mask &= ~(asserted & 0xff);
2526 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2527
3fcaf2e5
EG
2528 REG_WR(bp, aeu_addr, aeu_mask);
2529 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2530
3fcaf2e5 2531 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2532 bp->attn_state |= asserted;
3fcaf2e5 2533 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2534
2535 if (asserted & ATTN_HARD_WIRED_MASK) {
2536 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2537
a5e9a7cf
EG
2538 bnx2x_acquire_phy_lock(bp);
2539
877e9aa4 2540 /* save nig interrupt mask */
87942b46 2541 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2542 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2543
c18487ee 2544 bnx2x_link_attn(bp);
a2fbb9ea
ET
2545
2546 /* handle unicore attn? */
2547 }
2548 if (asserted & ATTN_SW_TIMER_4_FUNC)
2549 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2550
2551 if (asserted & GPIO_2_FUNC)
2552 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2553
2554 if (asserted & GPIO_3_FUNC)
2555 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2556
2557 if (asserted & GPIO_4_FUNC)
2558 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2559
2560 if (port == 0) {
2561 if (asserted & ATTN_GENERAL_ATTN_1) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2564 }
2565 if (asserted & ATTN_GENERAL_ATTN_2) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2568 }
2569 if (asserted & ATTN_GENERAL_ATTN_3) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2572 }
2573 } else {
2574 if (asserted & ATTN_GENERAL_ATTN_4) {
2575 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2576 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2577 }
2578 if (asserted & ATTN_GENERAL_ATTN_5) {
2579 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2580 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2581 }
2582 if (asserted & ATTN_GENERAL_ATTN_6) {
2583 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2584 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2585 }
2586 }
2587
2588 } /* if hardwired */
2589
5c862848
EG
2590 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2591 asserted, hc_addr);
2592 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2593
2594 /* now set back the mask */
a5e9a7cf 2595 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2596 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2597 bnx2x_release_phy_lock(bp);
2598 }
a2fbb9ea
ET
2599}
2600
fd4ef40d
EG
2601static inline void bnx2x_fan_failure(struct bnx2x *bp)
2602{
2603 int port = BP_PORT(bp);
2604
2605 /* mark the failure */
2606 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2607 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2608 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2609 bp->link_params.ext_phy_config);
2610
2611 /* log the failure */
2612 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2613 " the driver to shutdown the card to prevent permanent"
2614 " damage. Please contact Dell Support for assistance\n",
2615 bp->dev->name);
2616}
877e9aa4 2617static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2618{
34f80b04 2619 int port = BP_PORT(bp);
877e9aa4
ET
2620 int reg_offset;
2621 u32 val;
2622
34f80b04
EG
2623 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2624 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2625
34f80b04 2626 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2627
2628 val = REG_RD(bp, reg_offset);
2629 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2630 REG_WR(bp, reg_offset, val);
2631
2632 BNX2X_ERR("SPIO5 hw attention\n");
2633
fd4ef40d 2634 /* Fan failure attention */
35b19ba5
EG
2635 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2636 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2637 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2638 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2639 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2640 /* The PHY reset is controlled by GPIO 1 */
2641 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2642 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2643 break;
2644
2645 default:
2646 break;
2647 }
fd4ef40d 2648 bnx2x_fan_failure(bp);
877e9aa4 2649 }
34f80b04 2650
589abe3a
EG
2651 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2652 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2653 bnx2x_acquire_phy_lock(bp);
2654 bnx2x_handle_module_detect_int(&bp->link_params);
2655 bnx2x_release_phy_lock(bp);
2656 }
2657
34f80b04
EG
2658 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2659
2660 val = REG_RD(bp, reg_offset);
2661 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2662 REG_WR(bp, reg_offset, val);
2663
2664 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2665 (attn & HW_INTERRUT_ASSERT_SET_0));
2666 bnx2x_panic();
2667 }
877e9aa4
ET
2668}
2669
2670static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2671{
2672 u32 val;
2673
0626b899 2674 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2675
2676 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2677 BNX2X_ERR("DB hw attention 0x%x\n", val);
2678 /* DORQ discard attention */
2679 if (val & 0x2)
2680 BNX2X_ERR("FATAL error from DORQ\n");
2681 }
34f80b04
EG
2682
2683 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2684
2685 int port = BP_PORT(bp);
2686 int reg_offset;
2687
2688 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2689 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2690
2691 val = REG_RD(bp, reg_offset);
2692 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2693 REG_WR(bp, reg_offset, val);
2694
2695 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2696 (attn & HW_INTERRUT_ASSERT_SET_1));
2697 bnx2x_panic();
2698 }
877e9aa4
ET
2699}
2700
2701static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2702{
2703 u32 val;
2704
2705 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2706
2707 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2708 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2709 /* CFC error attention */
2710 if (val & 0x2)
2711 BNX2X_ERR("FATAL error from CFC\n");
2712 }
2713
2714 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2715
2716 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2717 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2718 /* RQ_USDMDP_FIFO_OVERFLOW */
2719 if (val & 0x18000)
2720 BNX2X_ERR("FATAL error from PXP\n");
2721 }
34f80b04
EG
2722
2723 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2724
2725 int port = BP_PORT(bp);
2726 int reg_offset;
2727
2728 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2729 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2730
2731 val = REG_RD(bp, reg_offset);
2732 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2733 REG_WR(bp, reg_offset, val);
2734
2735 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2736 (attn & HW_INTERRUT_ASSERT_SET_2));
2737 bnx2x_panic();
2738 }
877e9aa4
ET
2739}
2740
2741static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2742{
34f80b04
EG
2743 u32 val;
2744
877e9aa4
ET
2745 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2746
34f80b04
EG
2747 if (attn & BNX2X_PMF_LINK_ASSERT) {
2748 int func = BP_FUNC(bp);
2749
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2751 bnx2x__link_status_update(bp);
2752 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2753 DRV_STATUS_PMF)
2754 bnx2x_pmf_update(bp);
2755
2756 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2757
2758 BNX2X_ERR("MC assert!\n");
2759 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2760 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2761 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2762 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2763 bnx2x_panic();
2764
2765 } else if (attn & BNX2X_MCP_ASSERT) {
2766
2767 BNX2X_ERR("MCP assert!\n");
2768 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2769 bnx2x_fw_dump(bp);
877e9aa4
ET
2770
2771 } else
2772 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2773 }
2774
2775 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2776 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2777 if (attn & BNX2X_GRC_TIMEOUT) {
2778 val = CHIP_IS_E1H(bp) ?
2779 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2780 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2781 }
2782 if (attn & BNX2X_GRC_RSV) {
2783 val = CHIP_IS_E1H(bp) ?
2784 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2785 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2786 }
877e9aa4 2787 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2788 }
2789}
2790
2791static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2792{
a2fbb9ea
ET
2793 struct attn_route attn;
2794 struct attn_route group_mask;
34f80b04 2795 int port = BP_PORT(bp);
877e9aa4 2796 int index;
a2fbb9ea
ET
2797 u32 reg_addr;
2798 u32 val;
3fcaf2e5 2799 u32 aeu_mask;
a2fbb9ea
ET
2800
2801 /* need to take HW lock because MCP or other port might also
2802 try to handle this event */
4a37fb66 2803 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2804
2805 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2806 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2807 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2808 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2809 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2810 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2811
2812 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2813 if (deasserted & (1 << index)) {
2814 group_mask = bp->attn_group[index];
2815
34f80b04
EG
2816 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2817 index, group_mask.sig[0], group_mask.sig[1],
2818 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2819
877e9aa4
ET
2820 bnx2x_attn_int_deasserted3(bp,
2821 attn.sig[3] & group_mask.sig[3]);
2822 bnx2x_attn_int_deasserted1(bp,
2823 attn.sig[1] & group_mask.sig[1]);
2824 bnx2x_attn_int_deasserted2(bp,
2825 attn.sig[2] & group_mask.sig[2]);
2826 bnx2x_attn_int_deasserted0(bp,
2827 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2828
a2fbb9ea
ET
2829 if ((attn.sig[0] & group_mask.sig[0] &
2830 HW_PRTY_ASSERT_SET_0) ||
2831 (attn.sig[1] & group_mask.sig[1] &
2832 HW_PRTY_ASSERT_SET_1) ||
2833 (attn.sig[2] & group_mask.sig[2] &
2834 HW_PRTY_ASSERT_SET_2))
6378c025 2835 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2836 }
2837 }
2838
4a37fb66 2839 bnx2x_release_alr(bp);
a2fbb9ea 2840
5c862848 2841 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2842
2843 val = ~deasserted;
3fcaf2e5
EG
2844 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2845 val, reg_addr);
5c862848 2846 REG_WR(bp, reg_addr, val);
a2fbb9ea 2847
a2fbb9ea 2848 if (~bp->attn_state & deasserted)
3fcaf2e5 2849 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2850
2851 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2852 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2853
3fcaf2e5
EG
2854 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2855 aeu_mask = REG_RD(bp, reg_addr);
2856
2857 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2858 aeu_mask, deasserted);
2859 aeu_mask |= (deasserted & 0xff);
2860 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2861
3fcaf2e5
EG
2862 REG_WR(bp, reg_addr, aeu_mask);
2863 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2864
2865 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2866 bp->attn_state &= ~deasserted;
2867 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2868}
2869
2870static void bnx2x_attn_int(struct bnx2x *bp)
2871{
2872 /* read local copy of bits */
68d59484
EG
2873 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2874 attn_bits);
2875 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2876 attn_bits_ack);
a2fbb9ea
ET
2877 u32 attn_state = bp->attn_state;
2878
2879 /* look for changed bits */
2880 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2881 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2882
2883 DP(NETIF_MSG_HW,
2884 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2885 attn_bits, attn_ack, asserted, deasserted);
2886
2887 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2888 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2889
2890 /* handle bits that were raised */
2891 if (asserted)
2892 bnx2x_attn_int_asserted(bp, asserted);
2893
2894 if (deasserted)
2895 bnx2x_attn_int_deasserted(bp, deasserted);
2896}
2897
2898static void bnx2x_sp_task(struct work_struct *work)
2899{
1cf167f2 2900 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2901 u16 status;
2902
34f80b04 2903
a2fbb9ea
ET
2904 /* Return here if interrupt is disabled */
2905 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2906 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2907 return;
2908 }
2909
2910 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2911/* if (status == 0) */
2912/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2913
3196a88a 2914 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2915
877e9aa4
ET
2916 /* HW attentions */
2917 if (status & 0x1)
a2fbb9ea 2918 bnx2x_attn_int(bp);
a2fbb9ea 2919
68d59484 2920 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2921 IGU_INT_NOP, 1);
2922 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2923 IGU_INT_NOP, 1);
2924 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2925 IGU_INT_NOP, 1);
2926 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2927 IGU_INT_NOP, 1);
2928 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2929 IGU_INT_ENABLE, 1);
877e9aa4 2930
a2fbb9ea
ET
2931}
2932
2933static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2934{
2935 struct net_device *dev = dev_instance;
2936 struct bnx2x *bp = netdev_priv(dev);
2937
2938 /* Return here if interrupt is disabled */
2939 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2940 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2941 return IRQ_HANDLED;
2942 }
2943
8d9c5f34 2944 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2945
2946#ifdef BNX2X_STOP_ON_ERROR
2947 if (unlikely(bp->panic))
2948 return IRQ_HANDLED;
2949#endif
2950
1cf167f2 2951 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2952
2953 return IRQ_HANDLED;
2954}
2955
2956/* end of slow path */
2957
2958/* Statistics */
2959
2960/****************************************************************************
2961* Macros
2962****************************************************************************/
2963
a2fbb9ea
ET
2964/* sum[hi:lo] += add[hi:lo] */
2965#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2966 do { \
2967 s_lo += a_lo; \
f5ba6772 2968 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2969 } while (0)
2970
2971/* difference = minuend - subtrahend */
2972#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2973 do { \
bb2a0f7a
YG
2974 if (m_lo < s_lo) { \
2975 /* underflow */ \
a2fbb9ea 2976 d_hi = m_hi - s_hi; \
bb2a0f7a 2977 if (d_hi > 0) { \
6378c025 2978 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2979 d_hi--; \
2980 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2981 } else { \
6378c025 2982 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2983 d_hi = 0; \
2984 d_lo = 0; \
2985 } \
bb2a0f7a
YG
2986 } else { \
2987 /* m_lo >= s_lo */ \
a2fbb9ea 2988 if (m_hi < s_hi) { \
bb2a0f7a
YG
2989 d_hi = 0; \
2990 d_lo = 0; \
2991 } else { \
6378c025 2992 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2993 d_hi = m_hi - s_hi; \
2994 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2995 } \
2996 } \
2997 } while (0)
2998
bb2a0f7a 2999#define UPDATE_STAT64(s, t) \
a2fbb9ea 3000 do { \
bb2a0f7a
YG
3001 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3002 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3003 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3004 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3005 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3006 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3007 } while (0)
3008
bb2a0f7a 3009#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3010 do { \
bb2a0f7a
YG
3011 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3012 diff.lo, new->s##_lo, old->s##_lo); \
3013 ADD_64(estats->t##_hi, diff.hi, \
3014 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3015 } while (0)
3016
3017/* sum[hi:lo] += add */
3018#define ADD_EXTEND_64(s_hi, s_lo, a) \
3019 do { \
3020 s_lo += a; \
3021 s_hi += (s_lo < a) ? 1 : 0; \
3022 } while (0)
3023
bb2a0f7a 3024#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3025 do { \
bb2a0f7a
YG
3026 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3027 pstats->mac_stx[1].s##_lo, \
3028 new->s); \
a2fbb9ea
ET
3029 } while (0)
3030
bb2a0f7a 3031#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3032 do { \
4781bfad
EG
3033 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3034 old_tclient->s = tclient->s; \
de832a55
EG
3035 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3036 } while (0)
3037
3038#define UPDATE_EXTEND_USTAT(s, t) \
3039 do { \
3040 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3041 old_uclient->s = uclient->s; \
3042 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3043 } while (0)
3044
3045#define UPDATE_EXTEND_XSTAT(s, t) \
3046 do { \
4781bfad
EG
3047 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3048 old_xclient->s = xclient->s; \
de832a55
EG
3049 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3050 } while (0)
3051
3052/* minuend -= subtrahend */
3053#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3054 do { \
3055 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3056 } while (0)
3057
3058/* minuend[hi:lo] -= subtrahend */
3059#define SUB_EXTEND_64(m_hi, m_lo, s) \
3060 do { \
3061 SUB_64(m_hi, 0, m_lo, s); \
3062 } while (0)
3063
3064#define SUB_EXTEND_USTAT(s, t) \
3065 do { \
3066 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3067 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3068 } while (0)
3069
3070/*
3071 * General service functions
3072 */
3073
3074static inline long bnx2x_hilo(u32 *hiref)
3075{
3076 u32 lo = *(hiref + 1);
3077#if (BITS_PER_LONG == 64)
3078 u32 hi = *hiref;
3079
3080 return HILO_U64(hi, lo);
3081#else
3082 return lo;
3083#endif
3084}
3085
3086/*
3087 * Init service functions
3088 */
3089
bb2a0f7a
YG
3090static void bnx2x_storm_stats_post(struct bnx2x *bp)
3091{
3092 if (!bp->stats_pending) {
3093 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3094 int i, rc;
bb2a0f7a
YG
3095
3096 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3097 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3098 for_each_queue(bp, i)
3099 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3100
3101 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3102 ((u32 *)&ramrod_data)[1],
3103 ((u32 *)&ramrod_data)[0], 0);
3104 if (rc == 0) {
3105 /* stats ramrod has it's own slot on the spq */
3106 bp->spq_left++;
3107 bp->stats_pending = 1;
3108 }
3109 }
3110}
3111
3112static void bnx2x_stats_init(struct bnx2x *bp)
3113{
3114 int port = BP_PORT(bp);
de832a55 3115 int i;
bb2a0f7a 3116
de832a55 3117 bp->stats_pending = 0;
bb2a0f7a
YG
3118 bp->executer_idx = 0;
3119 bp->stats_counter = 0;
3120
3121 /* port stats */
3122 if (!BP_NOMCP(bp))
3123 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3124 else
3125 bp->port.port_stx = 0;
3126 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3127
3128 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3129 bp->port.old_nig_stats.brb_discard =
3130 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3131 bp->port.old_nig_stats.brb_truncate =
3132 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3133 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3134 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3135 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3136 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3137
3138 /* function stats */
de832a55
EG
3139 for_each_queue(bp, i) {
3140 struct bnx2x_fastpath *fp = &bp->fp[i];
3141
3142 memset(&fp->old_tclient, 0,
3143 sizeof(struct tstorm_per_client_stats));
3144 memset(&fp->old_uclient, 0,
3145 sizeof(struct ustorm_per_client_stats));
3146 memset(&fp->old_xclient, 0,
3147 sizeof(struct xstorm_per_client_stats));
3148 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3149 }
3150
bb2a0f7a 3151 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3152 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3153
3154 bp->stats_state = STATS_STATE_DISABLED;
3155 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3156 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3157}
3158
3159static void bnx2x_hw_stats_post(struct bnx2x *bp)
3160{
3161 struct dmae_command *dmae = &bp->stats_dmae;
3162 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3163
3164 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3165 if (CHIP_REV_IS_SLOW(bp))
3166 return;
bb2a0f7a
YG
3167
3168 /* loader */
3169 if (bp->executer_idx) {
3170 int loader_idx = PMF_DMAE_C(bp);
3171
3172 memset(dmae, 0, sizeof(struct dmae_command));
3173
3174 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3175 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3176 DMAE_CMD_DST_RESET |
3177#ifdef __BIG_ENDIAN
3178 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3179#else
3180 DMAE_CMD_ENDIANITY_DW_SWAP |
3181#endif
3182 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3183 DMAE_CMD_PORT_0) |
3184 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3185 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3186 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3187 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3188 sizeof(struct dmae_command) *
3189 (loader_idx + 1)) >> 2;
3190 dmae->dst_addr_hi = 0;
3191 dmae->len = sizeof(struct dmae_command) >> 2;
3192 if (CHIP_IS_E1(bp))
3193 dmae->len--;
3194 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3195 dmae->comp_addr_hi = 0;
3196 dmae->comp_val = 1;
3197
3198 *stats_comp = 0;
3199 bnx2x_post_dmae(bp, dmae, loader_idx);
3200
3201 } else if (bp->func_stx) {
3202 *stats_comp = 0;
3203 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3204 }
3205}
3206
3207static int bnx2x_stats_comp(struct bnx2x *bp)
3208{
3209 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3210 int cnt = 10;
3211
3212 might_sleep();
3213 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3214 if (!cnt) {
3215 BNX2X_ERR("timeout waiting for stats finished\n");
3216 break;
3217 }
3218 cnt--;
12469401 3219 msleep(1);
bb2a0f7a
YG
3220 }
3221 return 1;
3222}
3223
3224/*
3225 * Statistics service functions
3226 */
3227
3228static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3229{
3230 struct dmae_command *dmae;
3231 u32 opcode;
3232 int loader_idx = PMF_DMAE_C(bp);
3233 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3234
3235 /* sanity */
3236 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3237 BNX2X_ERR("BUG!\n");
3238 return;
3239 }
3240
3241 bp->executer_idx = 0;
3242
3243 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3244 DMAE_CMD_C_ENABLE |
3245 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3246#ifdef __BIG_ENDIAN
3247 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3248#else
3249 DMAE_CMD_ENDIANITY_DW_SWAP |
3250#endif
3251 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3252 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3253
3254 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3255 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3256 dmae->src_addr_lo = bp->port.port_stx >> 2;
3257 dmae->src_addr_hi = 0;
3258 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3259 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3260 dmae->len = DMAE_LEN32_RD_MAX;
3261 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3262 dmae->comp_addr_hi = 0;
3263 dmae->comp_val = 1;
3264
3265 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3266 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3267 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3268 dmae->src_addr_hi = 0;
7a9b2557
VZ
3269 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3270 DMAE_LEN32_RD_MAX * 4);
3271 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3272 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3273 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3274 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3275 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3276 dmae->comp_val = DMAE_COMP_VAL;
3277
3278 *stats_comp = 0;
3279 bnx2x_hw_stats_post(bp);
3280 bnx2x_stats_comp(bp);
3281}
3282
3283static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3284{
3285 struct dmae_command *dmae;
34f80b04 3286 int port = BP_PORT(bp);
bb2a0f7a 3287 int vn = BP_E1HVN(bp);
a2fbb9ea 3288 u32 opcode;
bb2a0f7a 3289 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3290 u32 mac_addr;
bb2a0f7a
YG
3291 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3292
3293 /* sanity */
3294 if (!bp->link_vars.link_up || !bp->port.pmf) {
3295 BNX2X_ERR("BUG!\n");
3296 return;
3297 }
a2fbb9ea
ET
3298
3299 bp->executer_idx = 0;
bb2a0f7a
YG
3300
3301 /* MCP */
3302 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3303 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3304 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3305#ifdef __BIG_ENDIAN
bb2a0f7a 3306 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3307#else
bb2a0f7a 3308 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3309#endif
bb2a0f7a
YG
3310 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3311 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3312
bb2a0f7a 3313 if (bp->port.port_stx) {
a2fbb9ea
ET
3314
3315 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3316 dmae->opcode = opcode;
bb2a0f7a
YG
3317 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3318 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3319 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3320 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3321 dmae->len = sizeof(struct host_port_stats) >> 2;
3322 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323 dmae->comp_addr_hi = 0;
3324 dmae->comp_val = 1;
a2fbb9ea
ET
3325 }
3326
bb2a0f7a
YG
3327 if (bp->func_stx) {
3328
3329 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3330 dmae->opcode = opcode;
3331 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3332 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3333 dmae->dst_addr_lo = bp->func_stx >> 2;
3334 dmae->dst_addr_hi = 0;
3335 dmae->len = sizeof(struct host_func_stats) >> 2;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3338 dmae->comp_val = 1;
a2fbb9ea
ET
3339 }
3340
bb2a0f7a 3341 /* MAC */
a2fbb9ea
ET
3342 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3343 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3344 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3345#ifdef __BIG_ENDIAN
3346 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3347#else
3348 DMAE_CMD_ENDIANITY_DW_SWAP |
3349#endif
bb2a0f7a
YG
3350 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3351 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3352
c18487ee 3353 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3354
3355 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3356 NIG_REG_INGRESS_BMAC0_MEM);
3357
3358 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3359 BIGMAC_REGISTER_TX_STAT_GTBYT */
3360 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3361 dmae->opcode = opcode;
3362 dmae->src_addr_lo = (mac_addr +
3363 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3364 dmae->src_addr_hi = 0;
3365 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3366 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3367 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3368 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3369 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3370 dmae->comp_addr_hi = 0;
3371 dmae->comp_val = 1;
3372
3373 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3374 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3375 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3376 dmae->opcode = opcode;
3377 dmae->src_addr_lo = (mac_addr +
3378 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3379 dmae->src_addr_hi = 0;
3380 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3381 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3382 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3383 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3384 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3385 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3386 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3387 dmae->comp_addr_hi = 0;
3388 dmae->comp_val = 1;
3389
c18487ee 3390 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3391
3392 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3393
3394 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3396 dmae->opcode = opcode;
3397 dmae->src_addr_lo = (mac_addr +
3398 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3399 dmae->src_addr_hi = 0;
3400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3401 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3402 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3404 dmae->comp_addr_hi = 0;
3405 dmae->comp_val = 1;
3406
3407 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3408 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3409 dmae->opcode = opcode;
3410 dmae->src_addr_lo = (mac_addr +
3411 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3412 dmae->src_addr_hi = 0;
3413 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3414 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3415 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3416 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3417 dmae->len = 1;
3418 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3419 dmae->comp_addr_hi = 0;
3420 dmae->comp_val = 1;
3421
3422 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3423 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3424 dmae->opcode = opcode;
3425 dmae->src_addr_lo = (mac_addr +
3426 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3427 dmae->src_addr_hi = 0;
3428 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3429 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3430 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3431 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3432 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3433 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3434 dmae->comp_addr_hi = 0;
3435 dmae->comp_val = 1;
3436 }
3437
3438 /* NIG */
bb2a0f7a
YG
3439 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440 dmae->opcode = opcode;
3441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3442 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3443 dmae->src_addr_hi = 0;
3444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3445 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3446 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3447 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3448 dmae->comp_addr_hi = 0;
3449 dmae->comp_val = 1;
3450
3451 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3452 dmae->opcode = opcode;
3453 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3454 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3455 dmae->src_addr_hi = 0;
3456 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3457 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3458 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3459 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3460 dmae->len = (2*sizeof(u32)) >> 2;
3461 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3462 dmae->comp_addr_hi = 0;
3463 dmae->comp_val = 1;
3464
a2fbb9ea
ET
3465 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3466 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3467 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3468 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3469#ifdef __BIG_ENDIAN
3470 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3471#else
3472 DMAE_CMD_ENDIANITY_DW_SWAP |
3473#endif
bb2a0f7a
YG
3474 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3475 (vn << DMAE_CMD_E1HVN_SHIFT));
3476 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3477 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3478 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3479 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3480 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3481 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3482 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3483 dmae->len = (2*sizeof(u32)) >> 2;
3484 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3485 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3486 dmae->comp_val = DMAE_COMP_VAL;
3487
3488 *stats_comp = 0;
a2fbb9ea
ET
3489}
3490
bb2a0f7a 3491static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3492{
bb2a0f7a
YG
3493 struct dmae_command *dmae = &bp->stats_dmae;
3494 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3495
bb2a0f7a
YG
3496 /* sanity */
3497 if (!bp->func_stx) {
3498 BNX2X_ERR("BUG!\n");
3499 return;
3500 }
a2fbb9ea 3501
bb2a0f7a
YG
3502 bp->executer_idx = 0;
3503 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3504
bb2a0f7a
YG
3505 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3506 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3507 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3508#ifdef __BIG_ENDIAN
3509 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3510#else
3511 DMAE_CMD_ENDIANITY_DW_SWAP |
3512#endif
3513 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3514 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3515 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3516 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3517 dmae->dst_addr_lo = bp->func_stx >> 2;
3518 dmae->dst_addr_hi = 0;
3519 dmae->len = sizeof(struct host_func_stats) >> 2;
3520 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3521 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3522 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3523
bb2a0f7a
YG
3524 *stats_comp = 0;
3525}
a2fbb9ea 3526
bb2a0f7a
YG
3527static void bnx2x_stats_start(struct bnx2x *bp)
3528{
3529 if (bp->port.pmf)
3530 bnx2x_port_stats_init(bp);
3531
3532 else if (bp->func_stx)
3533 bnx2x_func_stats_init(bp);
3534
3535 bnx2x_hw_stats_post(bp);
3536 bnx2x_storm_stats_post(bp);
3537}
3538
3539static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3540{
3541 bnx2x_stats_comp(bp);
3542 bnx2x_stats_pmf_update(bp);
3543 bnx2x_stats_start(bp);
3544}
3545
3546static void bnx2x_stats_restart(struct bnx2x *bp)
3547{
3548 bnx2x_stats_comp(bp);
3549 bnx2x_stats_start(bp);
3550}
3551
3552static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3553{
3554 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3555 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3556 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3557 struct {
3558 u32 lo;
3559 u32 hi;
3560 } diff;
bb2a0f7a
YG
3561
3562 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3563 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3564 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3565 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3566 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3567 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3568 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3569 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3570 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3571 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3572 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3573 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3574 UPDATE_STAT64(tx_stat_gt127,
3575 tx_stat_etherstatspkts65octetsto127octets);
3576 UPDATE_STAT64(tx_stat_gt255,
3577 tx_stat_etherstatspkts128octetsto255octets);
3578 UPDATE_STAT64(tx_stat_gt511,
3579 tx_stat_etherstatspkts256octetsto511octets);
3580 UPDATE_STAT64(tx_stat_gt1023,
3581 tx_stat_etherstatspkts512octetsto1023octets);
3582 UPDATE_STAT64(tx_stat_gt1518,
3583 tx_stat_etherstatspkts1024octetsto1522octets);
3584 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3585 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3586 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3587 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3588 UPDATE_STAT64(tx_stat_gterr,
3589 tx_stat_dot3statsinternalmactransmiterrors);
3590 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3591
3592 estats->pause_frames_received_hi =
3593 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3594 estats->pause_frames_received_lo =
3595 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3596
3597 estats->pause_frames_sent_hi =
3598 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3599 estats->pause_frames_sent_lo =
3600 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3601}
3602
3603static void bnx2x_emac_stats_update(struct bnx2x *bp)
3604{
3605 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3606 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3607 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3608
3609 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3610 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3611 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3612 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3613 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3614 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3615 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3616 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3617 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3618 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3619 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3620 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3621 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3622 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3623 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3624 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3625 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3627 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3628 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3629 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3630 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3631 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3632 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3633 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3634 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3635 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3636 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3637 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3638 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3639 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3640
3641 estats->pause_frames_received_hi =
3642 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3643 estats->pause_frames_received_lo =
3644 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3645 ADD_64(estats->pause_frames_received_hi,
3646 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3647 estats->pause_frames_received_lo,
3648 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3649
3650 estats->pause_frames_sent_hi =
3651 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3652 estats->pause_frames_sent_lo =
3653 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3654 ADD_64(estats->pause_frames_sent_hi,
3655 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3656 estats->pause_frames_sent_lo,
3657 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3658}
3659
3660static int bnx2x_hw_stats_update(struct bnx2x *bp)
3661{
3662 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3663 struct nig_stats *old = &(bp->port.old_nig_stats);
3664 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3665 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3666 struct {
3667 u32 lo;
3668 u32 hi;
3669 } diff;
de832a55 3670 u32 nig_timer_max;
bb2a0f7a
YG
3671
3672 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3673 bnx2x_bmac_stats_update(bp);
3674
3675 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3676 bnx2x_emac_stats_update(bp);
3677
3678 else { /* unreached */
c3eefaf6 3679 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3680 return -1;
3681 }
a2fbb9ea 3682
bb2a0f7a
YG
3683 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3684 new->brb_discard - old->brb_discard);
66e855f3
YG
3685 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3686 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3687
bb2a0f7a
YG
3688 UPDATE_STAT64_NIG(egress_mac_pkt0,
3689 etherstatspkts1024octetsto1522octets);
3690 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3691
bb2a0f7a 3692 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3693
bb2a0f7a
YG
3694 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3695 sizeof(struct mac_stx));
3696 estats->brb_drop_hi = pstats->brb_drop_hi;
3697 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3698
bb2a0f7a 3699 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3700
de832a55
EG
3701 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3702 if (nig_timer_max != estats->nig_timer_max) {
3703 estats->nig_timer_max = nig_timer_max;
3704 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3705 }
3706
bb2a0f7a 3707 return 0;
a2fbb9ea
ET
3708}
3709
bb2a0f7a 3710static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3711{
3712 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3713 struct tstorm_per_port_stats *tport =
de832a55 3714 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3715 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3716 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3717 int i;
3718
3719 memset(&(fstats->total_bytes_received_hi), 0,
3720 sizeof(struct host_func_stats) - 2*sizeof(u32));
3721 estats->error_bytes_received_hi = 0;
3722 estats->error_bytes_received_lo = 0;
3723 estats->etherstatsoverrsizepkts_hi = 0;
3724 estats->etherstatsoverrsizepkts_lo = 0;
3725 estats->no_buff_discard_hi = 0;
3726 estats->no_buff_discard_lo = 0;
a2fbb9ea 3727
de832a55
EG
3728 for_each_queue(bp, i) {
3729 struct bnx2x_fastpath *fp = &bp->fp[i];
3730 int cl_id = fp->cl_id;
3731 struct tstorm_per_client_stats *tclient =
3732 &stats->tstorm_common.client_statistics[cl_id];
3733 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3734 struct ustorm_per_client_stats *uclient =
3735 &stats->ustorm_common.client_statistics[cl_id];
3736 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3737 struct xstorm_per_client_stats *xclient =
3738 &stats->xstorm_common.client_statistics[cl_id];
3739 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3740 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3741 u32 diff;
3742
3743 /* are storm stats valid? */
3744 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3745 bp->stats_counter) {
de832a55
EG
3746 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3747 " xstorm counter (%d) != stats_counter (%d)\n",
3748 i, xclient->stats_counter, bp->stats_counter);
3749 return -1;
3750 }
3751 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3752 bp->stats_counter) {
de832a55
EG
3753 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3754 " tstorm counter (%d) != stats_counter (%d)\n",
3755 i, tclient->stats_counter, bp->stats_counter);
3756 return -2;
3757 }
3758 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3759 bp->stats_counter) {
3760 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3761 " ustorm counter (%d) != stats_counter (%d)\n",
3762 i, uclient->stats_counter, bp->stats_counter);
3763 return -4;
3764 }
a2fbb9ea 3765
de832a55
EG
3766 qstats->total_bytes_received_hi =
3767 qstats->valid_bytes_received_hi =
a2fbb9ea 3768 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3769 qstats->total_bytes_received_lo =
3770 qstats->valid_bytes_received_lo =
a2fbb9ea 3771 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3772
de832a55 3773 qstats->error_bytes_received_hi =
bb2a0f7a 3774 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3775 qstats->error_bytes_received_lo =
bb2a0f7a 3776 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3777
de832a55
EG
3778 ADD_64(qstats->total_bytes_received_hi,
3779 qstats->error_bytes_received_hi,
3780 qstats->total_bytes_received_lo,
3781 qstats->error_bytes_received_lo);
3782
3783 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3784 total_unicast_packets_received);
3785 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3786 total_multicast_packets_received);
3787 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3788 total_broadcast_packets_received);
3789 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3790 etherstatsoverrsizepkts);
3791 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3792
3793 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3794 total_unicast_packets_received);
3795 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3796 total_multicast_packets_received);
3797 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3798 total_broadcast_packets_received);
3799 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3800 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3801 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3802
3803 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3804 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3805 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3806 le32_to_cpu(xclient->total_sent_bytes.lo);
3807
de832a55
EG
3808 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3809 total_unicast_packets_transmitted);
3810 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3811 total_multicast_packets_transmitted);
3812 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3813 total_broadcast_packets_transmitted);
3814
3815 old_tclient->checksum_discard = tclient->checksum_discard;
3816 old_tclient->ttl0_discard = tclient->ttl0_discard;
3817
3818 ADD_64(fstats->total_bytes_received_hi,
3819 qstats->total_bytes_received_hi,
3820 fstats->total_bytes_received_lo,
3821 qstats->total_bytes_received_lo);
3822 ADD_64(fstats->total_bytes_transmitted_hi,
3823 qstats->total_bytes_transmitted_hi,
3824 fstats->total_bytes_transmitted_lo,
3825 qstats->total_bytes_transmitted_lo);
3826 ADD_64(fstats->total_unicast_packets_received_hi,
3827 qstats->total_unicast_packets_received_hi,
3828 fstats->total_unicast_packets_received_lo,
3829 qstats->total_unicast_packets_received_lo);
3830 ADD_64(fstats->total_multicast_packets_received_hi,
3831 qstats->total_multicast_packets_received_hi,
3832 fstats->total_multicast_packets_received_lo,
3833 qstats->total_multicast_packets_received_lo);
3834 ADD_64(fstats->total_broadcast_packets_received_hi,
3835 qstats->total_broadcast_packets_received_hi,
3836 fstats->total_broadcast_packets_received_lo,
3837 qstats->total_broadcast_packets_received_lo);
3838 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3839 qstats->total_unicast_packets_transmitted_hi,
3840 fstats->total_unicast_packets_transmitted_lo,
3841 qstats->total_unicast_packets_transmitted_lo);
3842 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3843 qstats->total_multicast_packets_transmitted_hi,
3844 fstats->total_multicast_packets_transmitted_lo,
3845 qstats->total_multicast_packets_transmitted_lo);
3846 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3847 qstats->total_broadcast_packets_transmitted_hi,
3848 fstats->total_broadcast_packets_transmitted_lo,
3849 qstats->total_broadcast_packets_transmitted_lo);
3850 ADD_64(fstats->valid_bytes_received_hi,
3851 qstats->valid_bytes_received_hi,
3852 fstats->valid_bytes_received_lo,
3853 qstats->valid_bytes_received_lo);
3854
3855 ADD_64(estats->error_bytes_received_hi,
3856 qstats->error_bytes_received_hi,
3857 estats->error_bytes_received_lo,
3858 qstats->error_bytes_received_lo);
3859 ADD_64(estats->etherstatsoverrsizepkts_hi,
3860 qstats->etherstatsoverrsizepkts_hi,
3861 estats->etherstatsoverrsizepkts_lo,
3862 qstats->etherstatsoverrsizepkts_lo);
3863 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3864 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3865 }
3866
3867 ADD_64(fstats->total_bytes_received_hi,
3868 estats->rx_stat_ifhcinbadoctets_hi,
3869 fstats->total_bytes_received_lo,
3870 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3871
3872 memcpy(estats, &(fstats->total_bytes_received_hi),
3873 sizeof(struct host_func_stats) - 2*sizeof(u32));
3874
de832a55
EG
3875 ADD_64(estats->etherstatsoverrsizepkts_hi,
3876 estats->rx_stat_dot3statsframestoolong_hi,
3877 estats->etherstatsoverrsizepkts_lo,
3878 estats->rx_stat_dot3statsframestoolong_lo);
3879 ADD_64(estats->error_bytes_received_hi,
3880 estats->rx_stat_ifhcinbadoctets_hi,
3881 estats->error_bytes_received_lo,
3882 estats->rx_stat_ifhcinbadoctets_lo);
3883
3884 if (bp->port.pmf) {
3885 estats->mac_filter_discard =
3886 le32_to_cpu(tport->mac_filter_discard);
3887 estats->xxoverflow_discard =
3888 le32_to_cpu(tport->xxoverflow_discard);
3889 estats->brb_truncate_discard =
bb2a0f7a 3890 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3891 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3892 }
bb2a0f7a
YG
3893
3894 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3895
de832a55
EG
3896 bp->stats_pending = 0;
3897
a2fbb9ea
ET
3898 return 0;
3899}
3900
bb2a0f7a 3901static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3902{
bb2a0f7a 3903 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3904 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3905 int i;
a2fbb9ea
ET
3906
3907 nstats->rx_packets =
3908 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3909 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3910 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3911
3912 nstats->tx_packets =
3913 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3914 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3915 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3916
de832a55 3917 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3918
0e39e645 3919 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3920
de832a55
EG
3921 nstats->rx_dropped = estats->mac_discard;
3922 for_each_queue(bp, i)
3923 nstats->rx_dropped +=
3924 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3925
a2fbb9ea
ET
3926 nstats->tx_dropped = 0;
3927
3928 nstats->multicast =
de832a55 3929 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3930
bb2a0f7a 3931 nstats->collisions =
de832a55 3932 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3933
3934 nstats->rx_length_errors =
de832a55
EG
3935 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3936 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3937 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3938 bnx2x_hilo(&estats->brb_truncate_hi);
3939 nstats->rx_crc_errors =
3940 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3941 nstats->rx_frame_errors =
3942 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3943 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3944 nstats->rx_missed_errors = estats->xxoverflow_discard;
3945
3946 nstats->rx_errors = nstats->rx_length_errors +
3947 nstats->rx_over_errors +
3948 nstats->rx_crc_errors +
3949 nstats->rx_frame_errors +
0e39e645
ET
3950 nstats->rx_fifo_errors +
3951 nstats->rx_missed_errors;
a2fbb9ea 3952
bb2a0f7a 3953 nstats->tx_aborted_errors =
de832a55
EG
3954 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3955 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3956 nstats->tx_carrier_errors =
3957 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3958 nstats->tx_fifo_errors = 0;
3959 nstats->tx_heartbeat_errors = 0;
3960 nstats->tx_window_errors = 0;
3961
3962 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3963 nstats->tx_carrier_errors +
3964 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3965}
3966
3967static void bnx2x_drv_stats_update(struct bnx2x *bp)
3968{
3969 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3970 int i;
3971
3972 estats->driver_xoff = 0;
3973 estats->rx_err_discard_pkt = 0;
3974 estats->rx_skb_alloc_failed = 0;
3975 estats->hw_csum_err = 0;
3976 for_each_queue(bp, i) {
3977 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3978
3979 estats->driver_xoff += qstats->driver_xoff;
3980 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3981 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3982 estats->hw_csum_err += qstats->hw_csum_err;
3983 }
a2fbb9ea
ET
3984}
3985
bb2a0f7a 3986static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3987{
bb2a0f7a 3988 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3989
bb2a0f7a
YG
3990 if (*stats_comp != DMAE_COMP_VAL)
3991 return;
3992
3993 if (bp->port.pmf)
de832a55 3994 bnx2x_hw_stats_update(bp);
a2fbb9ea 3995
de832a55
EG
3996 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3997 BNX2X_ERR("storm stats were not updated for 3 times\n");
3998 bnx2x_panic();
3999 return;
a2fbb9ea
ET
4000 }
4001
de832a55
EG
4002 bnx2x_net_stats_update(bp);
4003 bnx2x_drv_stats_update(bp);
4004
a2fbb9ea 4005 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
4006 struct tstorm_per_client_stats *old_tclient =
4007 &bp->fp->old_tclient;
4008 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4009 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4010 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4011 int i;
a2fbb9ea
ET
4012
4013 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4014 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4015 " tx pkt (%lx)\n",
4016 bnx2x_tx_avail(bp->fp),
7a9b2557 4017 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4018 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4019 " rx pkt (%lx)\n",
7a9b2557
VZ
4020 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4021 bp->fp->rx_comp_cons),
4022 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4023 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4024 "brb truncate %u\n",
4025 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4026 qstats->driver_xoff,
4027 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4028 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4029 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4030 "mac_discard %u mac_filter_discard %u "
4031 "xxovrflow_discard %u brb_truncate_discard %u "
4032 "ttl0_discard %u\n",
4781bfad 4033 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4034 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4035 bnx2x_hilo(&qstats->no_buff_discard_hi),
4036 estats->mac_discard, estats->mac_filter_discard,
4037 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4038 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4039
4040 for_each_queue(bp, i) {
4041 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4042 bnx2x_fp(bp, i, tx_pkt),
4043 bnx2x_fp(bp, i, rx_pkt),
4044 bnx2x_fp(bp, i, rx_calls));
4045 }
4046 }
4047
bb2a0f7a
YG
4048 bnx2x_hw_stats_post(bp);
4049 bnx2x_storm_stats_post(bp);
4050}
a2fbb9ea 4051
bb2a0f7a
YG
4052static void bnx2x_port_stats_stop(struct bnx2x *bp)
4053{
4054 struct dmae_command *dmae;
4055 u32 opcode;
4056 int loader_idx = PMF_DMAE_C(bp);
4057 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4058
bb2a0f7a 4059 bp->executer_idx = 0;
a2fbb9ea 4060
bb2a0f7a
YG
4061 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4062 DMAE_CMD_C_ENABLE |
4063 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4064#ifdef __BIG_ENDIAN
bb2a0f7a 4065 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4066#else
bb2a0f7a 4067 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4068#endif
bb2a0f7a
YG
4069 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4070 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4071
4072 if (bp->port.port_stx) {
4073
4074 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4075 if (bp->func_stx)
4076 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4077 else
4078 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4079 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4080 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4081 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4082 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4083 dmae->len = sizeof(struct host_port_stats) >> 2;
4084 if (bp->func_stx) {
4085 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4086 dmae->comp_addr_hi = 0;
4087 dmae->comp_val = 1;
4088 } else {
4089 dmae->comp_addr_lo =
4090 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4091 dmae->comp_addr_hi =
4092 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4093 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4094
bb2a0f7a
YG
4095 *stats_comp = 0;
4096 }
a2fbb9ea
ET
4097 }
4098
bb2a0f7a
YG
4099 if (bp->func_stx) {
4100
4101 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4102 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4103 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4104 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4105 dmae->dst_addr_lo = bp->func_stx >> 2;
4106 dmae->dst_addr_hi = 0;
4107 dmae->len = sizeof(struct host_func_stats) >> 2;
4108 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4109 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4110 dmae->comp_val = DMAE_COMP_VAL;
4111
4112 *stats_comp = 0;
a2fbb9ea 4113 }
bb2a0f7a
YG
4114}
4115
4116static void bnx2x_stats_stop(struct bnx2x *bp)
4117{
4118 int update = 0;
4119
4120 bnx2x_stats_comp(bp);
4121
4122 if (bp->port.pmf)
4123 update = (bnx2x_hw_stats_update(bp) == 0);
4124
4125 update |= (bnx2x_storm_stats_update(bp) == 0);
4126
4127 if (update) {
4128 bnx2x_net_stats_update(bp);
a2fbb9ea 4129
bb2a0f7a
YG
4130 if (bp->port.pmf)
4131 bnx2x_port_stats_stop(bp);
4132
4133 bnx2x_hw_stats_post(bp);
4134 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4135 }
4136}
4137
bb2a0f7a
YG
4138static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4139{
4140}
4141
4142static const struct {
4143 void (*action)(struct bnx2x *bp);
4144 enum bnx2x_stats_state next_state;
4145} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4146/* state event */
4147{
4148/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4149/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4150/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4151/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4152},
4153{
4154/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4155/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4156/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4157/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4158}
4159};
4160
4161static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4162{
4163 enum bnx2x_stats_state state = bp->stats_state;
4164
4165 bnx2x_stats_stm[state][event].action(bp);
4166 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4167
4168 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4169 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4170 state, event, bp->stats_state);
4171}
4172
a2fbb9ea
ET
4173static void bnx2x_timer(unsigned long data)
4174{
4175 struct bnx2x *bp = (struct bnx2x *) data;
4176
4177 if (!netif_running(bp->dev))
4178 return;
4179
4180 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4181 goto timer_restart;
a2fbb9ea
ET
4182
4183 if (poll) {
4184 struct bnx2x_fastpath *fp = &bp->fp[0];
4185 int rc;
4186
7961f791 4187 bnx2x_tx_int(fp);
a2fbb9ea
ET
4188 rc = bnx2x_rx_int(fp, 1000);
4189 }
4190
34f80b04
EG
4191 if (!BP_NOMCP(bp)) {
4192 int func = BP_FUNC(bp);
a2fbb9ea
ET
4193 u32 drv_pulse;
4194 u32 mcp_pulse;
4195
4196 ++bp->fw_drv_pulse_wr_seq;
4197 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4198 /* TBD - add SYSTEM_TIME */
4199 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4200 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4201
34f80b04 4202 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4203 MCP_PULSE_SEQ_MASK);
4204 /* The delta between driver pulse and mcp response
4205 * should be 1 (before mcp response) or 0 (after mcp response)
4206 */
4207 if ((drv_pulse != mcp_pulse) &&
4208 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4209 /* someone lost a heartbeat... */
4210 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4211 drv_pulse, mcp_pulse);
4212 }
4213 }
4214
bb2a0f7a
YG
4215 if ((bp->state == BNX2X_STATE_OPEN) ||
4216 (bp->state == BNX2X_STATE_DISABLED))
4217 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4218
f1410647 4219timer_restart:
a2fbb9ea
ET
4220 mod_timer(&bp->timer, jiffies + bp->current_interval);
4221}
4222
4223/* end of Statistics */
4224
4225/* nic init */
4226
4227/*
4228 * nic init service functions
4229 */
4230
34f80b04 4231static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4232{
34f80b04
EG
4233 int port = BP_PORT(bp);
4234
490c3c9b 4235 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04 4236 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4237 sizeof(struct ustorm_status_block)/4);
490c3c9b 4238 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04 4239 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4240 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4241}
4242
5c862848
EG
4243static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4244 dma_addr_t mapping, int sb_id)
34f80b04
EG
4245{
4246 int port = BP_PORT(bp);
bb2a0f7a 4247 int func = BP_FUNC(bp);
a2fbb9ea 4248 int index;
34f80b04 4249 u64 section;
a2fbb9ea
ET
4250
4251 /* USTORM */
4252 section = ((u64)mapping) + offsetof(struct host_status_block,
4253 u_status_block);
34f80b04 4254 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4255
4256 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4257 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4258 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4259 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4260 U64_HI(section));
bb2a0f7a
YG
4261 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4262 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4263
4264 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4265 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4266 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4267
4268 /* CSTORM */
4269 section = ((u64)mapping) + offsetof(struct host_status_block,
4270 c_status_block);
34f80b04 4271 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4272
4273 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4274 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4275 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4276 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4277 U64_HI(section));
7a9b2557
VZ
4278 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4279 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4280
4281 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4282 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4283 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4284
4285 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4286}
4287
4288static void bnx2x_zero_def_sb(struct bnx2x *bp)
4289{
4290 int func = BP_FUNC(bp);
a2fbb9ea 4291
490c3c9b
EG
4292 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4293 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4294 sizeof(struct tstorm_def_status_block)/4);
4295 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04
EG
4296 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4297 sizeof(struct ustorm_def_status_block)/4);
490c3c9b 4298 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04
EG
4299 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4300 sizeof(struct cstorm_def_status_block)/4);
490c3c9b 4301 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
34f80b04
EG
4302 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4303 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4304}
4305
4306static void bnx2x_init_def_sb(struct bnx2x *bp,
4307 struct host_def_status_block *def_sb,
34f80b04 4308 dma_addr_t mapping, int sb_id)
a2fbb9ea 4309{
34f80b04
EG
4310 int port = BP_PORT(bp);
4311 int func = BP_FUNC(bp);
a2fbb9ea
ET
4312 int index, val, reg_offset;
4313 u64 section;
4314
4315 /* ATTN */
4316 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4317 atten_status_block);
34f80b04 4318 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4319
49d66772
ET
4320 bp->attn_state = 0;
4321
a2fbb9ea
ET
4322 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4323 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4324
34f80b04 4325 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4326 bp->attn_group[index].sig[0] = REG_RD(bp,
4327 reg_offset + 0x10*index);
4328 bp->attn_group[index].sig[1] = REG_RD(bp,
4329 reg_offset + 0x4 + 0x10*index);
4330 bp->attn_group[index].sig[2] = REG_RD(bp,
4331 reg_offset + 0x8 + 0x10*index);
4332 bp->attn_group[index].sig[3] = REG_RD(bp,
4333 reg_offset + 0xc + 0x10*index);
4334 }
4335
a2fbb9ea
ET
4336 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4337 HC_REG_ATTN_MSG0_ADDR_L);
4338
4339 REG_WR(bp, reg_offset, U64_LO(section));
4340 REG_WR(bp, reg_offset + 4, U64_HI(section));
4341
4342 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4343
4344 val = REG_RD(bp, reg_offset);
34f80b04 4345 val |= sb_id;
a2fbb9ea
ET
4346 REG_WR(bp, reg_offset, val);
4347
4348 /* USTORM */
4349 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4350 u_def_status_block);
34f80b04 4351 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4352
4353 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4354 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4355 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4356 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4357 U64_HI(section));
5c862848 4358 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4359 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4360
4361 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4362 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4363 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4364
4365 /* CSTORM */
4366 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4367 c_def_status_block);
34f80b04 4368 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4369
4370 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4371 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4372 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4373 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4374 U64_HI(section));
5c862848 4375 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4376 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4377
4378 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4379 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4380 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4381
4382 /* TSTORM */
4383 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4384 t_def_status_block);
34f80b04 4385 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4386
4387 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4388 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4389 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4390 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4391 U64_HI(section));
5c862848 4392 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4393 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4394
4395 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4396 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4397 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4398
4399 /* XSTORM */
4400 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4401 x_def_status_block);
34f80b04 4402 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4403
4404 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4405 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4406 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4407 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4408 U64_HI(section));
5c862848 4409 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4410 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4411
4412 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4413 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4414 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4415
bb2a0f7a 4416 bp->stats_pending = 0;
66e855f3 4417 bp->set_mac_pending = 0;
bb2a0f7a 4418
34f80b04 4419 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4420}
4421
4422static void bnx2x_update_coalesce(struct bnx2x *bp)
4423{
34f80b04 4424 int port = BP_PORT(bp);
a2fbb9ea
ET
4425 int i;
4426
4427 for_each_queue(bp, i) {
34f80b04 4428 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4429
4430 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4431 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4432 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4433 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4434 bp->rx_ticks/12);
a2fbb9ea 4435 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4436 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4437 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4438 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4439
4440 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4441 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4442 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4443 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4444 bp->tx_ticks/12);
a2fbb9ea 4445 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4446 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4447 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4448 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4449 }
4450}
4451
7a9b2557
VZ
4452static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4453 struct bnx2x_fastpath *fp, int last)
4454{
4455 int i;
4456
4457 for (i = 0; i < last; i++) {
4458 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4459 struct sk_buff *skb = rx_buf->skb;
4460
4461 if (skb == NULL) {
4462 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4463 continue;
4464 }
4465
4466 if (fp->tpa_state[i] == BNX2X_TPA_START)
4467 pci_unmap_single(bp->pdev,
4468 pci_unmap_addr(rx_buf, mapping),
356e2385 4469 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4470
4471 dev_kfree_skb(skb);
4472 rx_buf->skb = NULL;
4473 }
4474}
4475
a2fbb9ea
ET
4476static void bnx2x_init_rx_rings(struct bnx2x *bp)
4477{
7a9b2557 4478 int func = BP_FUNC(bp);
32626230
EG
4479 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4480 ETH_MAX_AGGREGATION_QUEUES_E1H;
4481 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4482 int i, j;
a2fbb9ea 4483
87942b46 4484 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4485 DP(NETIF_MSG_IFUP,
4486 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4487
7a9b2557 4488 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4489
555f6c78 4490 for_each_rx_queue(bp, j) {
32626230 4491 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4492
32626230 4493 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4494 fp->tpa_pool[i].skb =
4495 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4496 if (!fp->tpa_pool[i].skb) {
4497 BNX2X_ERR("Failed to allocate TPA "
4498 "skb pool for queue[%d] - "
4499 "disabling TPA on this "
4500 "queue!\n", j);
4501 bnx2x_free_tpa_pool(bp, fp, i);
4502 fp->disable_tpa = 1;
4503 break;
4504 }
4505 pci_unmap_addr_set((struct sw_rx_bd *)
4506 &bp->fp->tpa_pool[i],
4507 mapping, 0);
4508 fp->tpa_state[i] = BNX2X_TPA_STOP;
4509 }
4510 }
4511 }
4512
555f6c78 4513 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4514 struct bnx2x_fastpath *fp = &bp->fp[j];
4515
4516 fp->rx_bd_cons = 0;
4517 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4518 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4519
4520 /* "next page" elements initialization */
4521 /* SGE ring */
4522 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4523 struct eth_rx_sge *sge;
4524
4525 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4526 sge->addr_hi =
4527 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4528 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4529 sge->addr_lo =
4530 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4531 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4532 }
4533
4534 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4535
7a9b2557 4536 /* RX BD ring */
a2fbb9ea
ET
4537 for (i = 1; i <= NUM_RX_RINGS; i++) {
4538 struct eth_rx_bd *rx_bd;
4539
4540 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4541 rx_bd->addr_hi =
4542 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4543 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4544 rx_bd->addr_lo =
4545 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4546 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4547 }
4548
34f80b04 4549 /* CQ ring */
a2fbb9ea
ET
4550 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4551 struct eth_rx_cqe_next_page *nextpg;
4552
4553 nextpg = (struct eth_rx_cqe_next_page *)
4554 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4555 nextpg->addr_hi =
4556 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4557 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4558 nextpg->addr_lo =
4559 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4560 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4561 }
4562
7a9b2557
VZ
4563 /* Allocate SGEs and initialize the ring elements */
4564 for (i = 0, ring_prod = 0;
4565 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4566
7a9b2557
VZ
4567 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4568 BNX2X_ERR("was only able to allocate "
4569 "%d rx sges\n", i);
4570 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4571 /* Cleanup already allocated elements */
4572 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4573 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4574 fp->disable_tpa = 1;
4575 ring_prod = 0;
4576 break;
4577 }
4578 ring_prod = NEXT_SGE_IDX(ring_prod);
4579 }
4580 fp->rx_sge_prod = ring_prod;
4581
4582 /* Allocate BDs and initialize BD ring */
66e855f3 4583 fp->rx_comp_cons = 0;
7a9b2557 4584 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4585 for (i = 0; i < bp->rx_ring_size; i++) {
4586 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4587 BNX2X_ERR("was only able to allocate "
de832a55
EG
4588 "%d rx skbs on queue[%d]\n", i, j);
4589 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4590 break;
4591 }
4592 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4593 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4594 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4595 }
4596
7a9b2557
VZ
4597 fp->rx_bd_prod = ring_prod;
4598 /* must not have more available CQEs than BDs */
4599 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4600 cqe_ring_prod);
a2fbb9ea
ET
4601 fp->rx_pkt = fp->rx_calls = 0;
4602
7a9b2557
VZ
4603 /* Warning!
4604 * this will generate an interrupt (to the TSTORM)
4605 * must only be done after chip is initialized
4606 */
4607 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4608 fp->rx_sge_prod);
a2fbb9ea
ET
4609 if (j != 0)
4610 continue;
4611
4612 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4613 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4614 U64_LO(fp->rx_comp_mapping));
4615 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4616 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4617 U64_HI(fp->rx_comp_mapping));
4618 }
4619}
4620
4621static void bnx2x_init_tx_ring(struct bnx2x *bp)
4622{
4623 int i, j;
4624
555f6c78 4625 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4626 struct bnx2x_fastpath *fp = &bp->fp[j];
4627
4628 for (i = 1; i <= NUM_TX_RINGS; i++) {
4629 struct eth_tx_bd *tx_bd =
4630 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4631
4632 tx_bd->addr_hi =
4633 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4634 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4635 tx_bd->addr_lo =
4636 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4637 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4638 }
4639
4640 fp->tx_pkt_prod = 0;
4641 fp->tx_pkt_cons = 0;
4642 fp->tx_bd_prod = 0;
4643 fp->tx_bd_cons = 0;
4644 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4645 fp->tx_pkt = 0;
4646 }
4647}
4648
4649static void bnx2x_init_sp_ring(struct bnx2x *bp)
4650{
34f80b04 4651 int func = BP_FUNC(bp);
a2fbb9ea
ET
4652
4653 spin_lock_init(&bp->spq_lock);
4654
4655 bp->spq_left = MAX_SPQ_PENDING;
4656 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4657 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4658 bp->spq_prod_bd = bp->spq;
4659 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4660
34f80b04 4661 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4662 U64_LO(bp->spq_mapping));
34f80b04
EG
4663 REG_WR(bp,
4664 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4665 U64_HI(bp->spq_mapping));
4666
34f80b04 4667 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4668 bp->spq_prod_idx);
4669}
4670
4671static void bnx2x_init_context(struct bnx2x *bp)
4672{
4673 int i;
4674
4675 for_each_queue(bp, i) {
4676 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4677 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4678 u8 cl_id = fp->cl_id;
0626b899 4679 u8 sb_id = fp->sb_id;
a2fbb9ea 4680
34f80b04
EG
4681 context->ustorm_st_context.common.sb_index_numbers =
4682 BNX2X_RX_SB_INDEX_NUM;
0626b899 4683 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4684 context->ustorm_st_context.common.status_block_id = sb_id;
4685 context->ustorm_st_context.common.flags =
de832a55
EG
4686 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4687 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4688 context->ustorm_st_context.common.statistics_counter_id =
4689 cl_id;
8d9c5f34 4690 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4691 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4692 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4693 bp->rx_buf_size;
34f80b04 4694 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4695 U64_HI(fp->rx_desc_mapping);
34f80b04 4696 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4697 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4698 if (!fp->disable_tpa) {
4699 context->ustorm_st_context.common.flags |=
4700 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4701 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4702 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4703 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4704 (u32)0xffff);
7a9b2557
VZ
4705 context->ustorm_st_context.common.sge_page_base_hi =
4706 U64_HI(fp->rx_sge_mapping);
4707 context->ustorm_st_context.common.sge_page_base_lo =
4708 U64_LO(fp->rx_sge_mapping);
4709 }
4710
8d9c5f34
EG
4711 context->ustorm_ag_context.cdu_usage =
4712 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4713 CDU_REGION_NUMBER_UCM_AG,
4714 ETH_CONNECTION_TYPE);
4715
4716 context->xstorm_st_context.tx_bd_page_base_hi =
4717 U64_HI(fp->tx_desc_mapping);
4718 context->xstorm_st_context.tx_bd_page_base_lo =
4719 U64_LO(fp->tx_desc_mapping);
4720 context->xstorm_st_context.db_data_addr_hi =
4721 U64_HI(fp->tx_prods_mapping);
4722 context->xstorm_st_context.db_data_addr_lo =
4723 U64_LO(fp->tx_prods_mapping);
0626b899 4724 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4725 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4726 context->cstorm_st_context.sb_index_number =
5c862848 4727 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4728 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4729
4730 context->xstorm_ag_context.cdu_reserved =
4731 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4732 CDU_REGION_NUMBER_XCM_AG,
4733 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4734 }
4735}
4736
4737static void bnx2x_init_ind_table(struct bnx2x *bp)
4738{
26c8fa4d 4739 int func = BP_FUNC(bp);
a2fbb9ea
ET
4740 int i;
4741
555f6c78 4742 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4743 return;
4744
555f6c78
EG
4745 DP(NETIF_MSG_IFUP,
4746 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4747 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4748 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4749 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4750 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4751}
4752
49d66772
ET
4753static void bnx2x_set_client_config(struct bnx2x *bp)
4754{
49d66772 4755 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4756 int port = BP_PORT(bp);
4757 int i;
49d66772 4758
e7799c5f 4759 tstorm_client.mtu = bp->dev->mtu;
49d66772 4760 tstorm_client.config_flags =
de832a55
EG
4761 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4762 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4763#ifdef BCM_VLAN
0c6671b0 4764 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4765 tstorm_client.config_flags |=
8d9c5f34 4766 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4767 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4768 }
4769#endif
49d66772 4770
7a9b2557
VZ
4771 if (bp->flags & TPA_ENABLE_FLAG) {
4772 tstorm_client.max_sges_for_packet =
4f40f2cb 4773 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4774 tstorm_client.max_sges_for_packet =
4775 ((tstorm_client.max_sges_for_packet +
4776 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4777 PAGES_PER_SGE_SHIFT;
4778
4779 tstorm_client.config_flags |=
4780 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4781 }
4782
49d66772 4783 for_each_queue(bp, i) {
de832a55
EG
4784 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4785
49d66772 4786 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4787 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4788 ((u32 *)&tstorm_client)[0]);
4789 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4790 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4791 ((u32 *)&tstorm_client)[1]);
4792 }
4793
34f80b04
EG
4794 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4795 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4796}
4797
a2fbb9ea
ET
4798static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4799{
a2fbb9ea 4800 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4801 int mode = bp->rx_mode;
4802 int mask = (1 << BP_L_ID(bp));
4803 int func = BP_FUNC(bp);
a2fbb9ea
ET
4804 int i;
4805
3196a88a 4806 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4807
4808 switch (mode) {
4809 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4810 tstorm_mac_filter.ucast_drop_all = mask;
4811 tstorm_mac_filter.mcast_drop_all = mask;
4812 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4813 break;
356e2385 4814
a2fbb9ea 4815 case BNX2X_RX_MODE_NORMAL:
34f80b04 4816 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4817 break;
356e2385 4818
a2fbb9ea 4819 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4820 tstorm_mac_filter.mcast_accept_all = mask;
4821 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4822 break;
356e2385 4823
a2fbb9ea 4824 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4825 tstorm_mac_filter.ucast_accept_all = mask;
4826 tstorm_mac_filter.mcast_accept_all = mask;
4827 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4828 break;
356e2385 4829
a2fbb9ea 4830 default:
34f80b04
EG
4831 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4832 break;
a2fbb9ea
ET
4833 }
4834
4835 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4836 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4837 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4838 ((u32 *)&tstorm_mac_filter)[i]);
4839
34f80b04 4840/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4841 ((u32 *)&tstorm_mac_filter)[i]); */
4842 }
a2fbb9ea 4843
49d66772
ET
4844 if (mode != BNX2X_RX_MODE_NONE)
4845 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4846}
4847
471de716
EG
4848static void bnx2x_init_internal_common(struct bnx2x *bp)
4849{
4850 int i;
4851
3cdf1db7
YG
4852 if (bp->flags & TPA_ENABLE_FLAG) {
4853 struct tstorm_eth_tpa_exist tpa = {0};
4854
4855 tpa.tpa_exist = 1;
4856
4857 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4858 ((u32 *)&tpa)[0]);
4859 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4860 ((u32 *)&tpa)[1]);
4861 }
4862
471de716
EG
4863 /* Zero this manually as its initialization is
4864 currently missing in the initTool */
4865 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4866 REG_WR(bp, BAR_USTRORM_INTMEM +
4867 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4868}
4869
4870static void bnx2x_init_internal_port(struct bnx2x *bp)
4871{
4872 int port = BP_PORT(bp);
4873
4874 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4875 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4876 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4877 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4878}
4879
8a1c38d1
EG
4880/* Calculates the sum of vn_min_rates.
4881 It's needed for further normalizing of the min_rates.
4882 Returns:
4883 sum of vn_min_rates.
4884 or
4885 0 - if all the min_rates are 0.
4886 In the later case fainess algorithm should be deactivated.
4887 If not all min_rates are zero then those that are zeroes will be set to 1.
4888 */
4889static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4890{
4891 int all_zero = 1;
4892 int port = BP_PORT(bp);
4893 int vn;
4894
4895 bp->vn_weight_sum = 0;
4896 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4897 int func = 2*vn + port;
4898 u32 vn_cfg =
4899 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4900 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4901 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4902
4903 /* Skip hidden vns */
4904 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4905 continue;
4906
4907 /* If min rate is zero - set it to 1 */
4908 if (!vn_min_rate)
4909 vn_min_rate = DEF_MIN_RATE;
4910 else
4911 all_zero = 0;
4912
4913 bp->vn_weight_sum += vn_min_rate;
4914 }
4915
4916 /* ... only if all min rates are zeros - disable fairness */
4917 if (all_zero)
4918 bp->vn_weight_sum = 0;
4919}
4920
471de716 4921static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4922{
a2fbb9ea
ET
4923 struct tstorm_eth_function_common_config tstorm_config = {0};
4924 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4925 int port = BP_PORT(bp);
4926 int func = BP_FUNC(bp);
de832a55
EG
4927 int i, j;
4928 u32 offset;
471de716 4929 u16 max_agg_size;
a2fbb9ea
ET
4930
4931 if (is_multi(bp)) {
555f6c78 4932 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4933 tstorm_config.rss_result_mask = MULTI_MASK;
4934 }
8d9c5f34
EG
4935 if (IS_E1HMF(bp))
4936 tstorm_config.config_flags |=
4937 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4938
34f80b04
EG
4939 tstorm_config.leading_client_id = BP_L_ID(bp);
4940
a2fbb9ea 4941 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4942 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4943 (*(u32 *)&tstorm_config));
4944
c14423fe 4945 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4946 bnx2x_set_storm_rx_mode(bp);
4947
de832a55
EG
4948 for_each_queue(bp, i) {
4949 u8 cl_id = bp->fp[i].cl_id;
4950
4951 /* reset xstorm per client statistics */
4952 offset = BAR_XSTRORM_INTMEM +
4953 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4954 for (j = 0;
4955 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4956 REG_WR(bp, offset + j*4, 0);
4957
4958 /* reset tstorm per client statistics */
4959 offset = BAR_TSTRORM_INTMEM +
4960 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4961 for (j = 0;
4962 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4963 REG_WR(bp, offset + j*4, 0);
4964
4965 /* reset ustorm per client statistics */
4966 offset = BAR_USTRORM_INTMEM +
4967 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4968 for (j = 0;
4969 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4970 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4971 }
4972
4973 /* Init statistics related context */
34f80b04 4974 stats_flags.collect_eth = 1;
a2fbb9ea 4975
66e855f3 4976 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4977 ((u32 *)&stats_flags)[0]);
66e855f3 4978 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4979 ((u32 *)&stats_flags)[1]);
4980
66e855f3 4981 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4982 ((u32 *)&stats_flags)[0]);
66e855f3 4983 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4984 ((u32 *)&stats_flags)[1]);
4985
de832a55
EG
4986 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4987 ((u32 *)&stats_flags)[0]);
4988 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4989 ((u32 *)&stats_flags)[1]);
4990
66e855f3 4991 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4992 ((u32 *)&stats_flags)[0]);
66e855f3 4993 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4994 ((u32 *)&stats_flags)[1]);
4995
66e855f3
YG
4996 REG_WR(bp, BAR_XSTRORM_INTMEM +
4997 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4998 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4999 REG_WR(bp, BAR_XSTRORM_INTMEM +
5000 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5001 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5002
5003 REG_WR(bp, BAR_TSTRORM_INTMEM +
5004 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5005 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5006 REG_WR(bp, BAR_TSTRORM_INTMEM +
5007 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5008 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5009
de832a55
EG
5010 REG_WR(bp, BAR_USTRORM_INTMEM +
5011 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5012 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5013 REG_WR(bp, BAR_USTRORM_INTMEM +
5014 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5015 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5016
34f80b04
EG
5017 if (CHIP_IS_E1H(bp)) {
5018 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5019 IS_E1HMF(bp));
5020 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5021 IS_E1HMF(bp));
5022 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5023 IS_E1HMF(bp));
5024 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5025 IS_E1HMF(bp));
5026
7a9b2557
VZ
5027 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5028 bp->e1hov);
34f80b04
EG
5029 }
5030
4f40f2cb
EG
5031 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5032 max_agg_size =
5033 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5034 SGE_PAGE_SIZE * PAGES_PER_SGE),
5035 (u32)0xffff);
555f6c78 5036 for_each_rx_queue(bp, i) {
7a9b2557 5037 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5038
5039 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5040 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5041 U64_LO(fp->rx_comp_mapping));
5042 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5043 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5044 U64_HI(fp->rx_comp_mapping));
5045
7a9b2557 5046 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5047 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5048 max_agg_size);
5049 }
8a1c38d1 5050
1c06328c
EG
5051 /* dropless flow control */
5052 if (CHIP_IS_E1H(bp)) {
5053 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5054
5055 rx_pause.bd_thr_low = 250;
5056 rx_pause.cqe_thr_low = 250;
5057 rx_pause.cos = 1;
5058 rx_pause.sge_thr_low = 0;
5059 rx_pause.bd_thr_high = 350;
5060 rx_pause.cqe_thr_high = 350;
5061 rx_pause.sge_thr_high = 0;
5062
5063 for_each_rx_queue(bp, i) {
5064 struct bnx2x_fastpath *fp = &bp->fp[i];
5065
5066 if (!fp->disable_tpa) {
5067 rx_pause.sge_thr_low = 150;
5068 rx_pause.sge_thr_high = 250;
5069 }
5070
5071
5072 offset = BAR_USTRORM_INTMEM +
5073 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5074 fp->cl_id);
5075 for (j = 0;
5076 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5077 j++)
5078 REG_WR(bp, offset + j*4,
5079 ((u32 *)&rx_pause)[j]);
5080 }
5081 }
5082
8a1c38d1
EG
5083 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5084
5085 /* Init rate shaping and fairness contexts */
5086 if (IS_E1HMF(bp)) {
5087 int vn;
5088
5089 /* During init there is no active link
5090 Until link is up, set link rate to 10Gbps */
5091 bp->link_vars.line_speed = SPEED_10000;
5092 bnx2x_init_port_minmax(bp);
5093
5094 bnx2x_calc_vn_weight_sum(bp);
5095
5096 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5097 bnx2x_init_vn_minmax(bp, 2*vn + port);
5098
5099 /* Enable rate shaping and fairness */
5100 bp->cmng.flags.cmng_enables =
5101 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5102 if (bp->vn_weight_sum)
5103 bp->cmng.flags.cmng_enables |=
5104 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5105 else
5106 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5107 " fairness will be disabled\n");
5108 } else {
5109 /* rate shaping and fairness are disabled */
5110 DP(NETIF_MSG_IFUP,
5111 "single function mode minmax will be disabled\n");
5112 }
5113
5114
5115 /* Store it to internal memory */
5116 if (bp->port.pmf)
5117 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5118 REG_WR(bp, BAR_XSTRORM_INTMEM +
5119 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5120 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5121}
5122
471de716
EG
5123static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5124{
5125 switch (load_code) {
5126 case FW_MSG_CODE_DRV_LOAD_COMMON:
5127 bnx2x_init_internal_common(bp);
5128 /* no break */
5129
5130 case FW_MSG_CODE_DRV_LOAD_PORT:
5131 bnx2x_init_internal_port(bp);
5132 /* no break */
5133
5134 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5135 bnx2x_init_internal_func(bp);
5136 break;
5137
5138 default:
5139 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5140 break;
5141 }
5142}
5143
5144static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5145{
5146 int i;
5147
5148 for_each_queue(bp, i) {
5149 struct bnx2x_fastpath *fp = &bp->fp[i];
5150
34f80b04 5151 fp->bp = bp;
a2fbb9ea 5152 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5153 fp->index = i;
34f80b04
EG
5154 fp->cl_id = BP_L_ID(bp) + i;
5155 fp->sb_id = fp->cl_id;
5156 DP(NETIF_MSG_IFUP,
f5372251
EG
5157 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5158 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5159 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5160 fp->sb_id);
5c862848 5161 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5162 }
5163
16119785
EG
5164 /* ensure status block indices were read */
5165 rmb();
5166
5167
5c862848
EG
5168 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5169 DEF_SB_ID);
5170 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5171 bnx2x_update_coalesce(bp);
5172 bnx2x_init_rx_rings(bp);
5173 bnx2x_init_tx_ring(bp);
5174 bnx2x_init_sp_ring(bp);
5175 bnx2x_init_context(bp);
471de716 5176 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5177 bnx2x_init_ind_table(bp);
0ef00459
EG
5178 bnx2x_stats_init(bp);
5179
5180 /* At this point, we are ready for interrupts */
5181 atomic_set(&bp->intr_sem, 0);
5182
5183 /* flush all before enabling interrupts */
5184 mb();
5185 mmiowb();
5186
615f8fd9 5187 bnx2x_int_enable(bp);
eb8da205
EG
5188
5189 /* Check for SPIO5 */
5190 bnx2x_attn_int_deasserted0(bp,
5191 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5192 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5193}
5194
5195/* end of nic init */
5196
5197/*
5198 * gzip service functions
5199 */
5200
5201static int bnx2x_gunzip_init(struct bnx2x *bp)
5202{
5203 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5204 &bp->gunzip_mapping);
5205 if (bp->gunzip_buf == NULL)
5206 goto gunzip_nomem1;
5207
5208 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5209 if (bp->strm == NULL)
5210 goto gunzip_nomem2;
5211
5212 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5213 GFP_KERNEL);
5214 if (bp->strm->workspace == NULL)
5215 goto gunzip_nomem3;
5216
5217 return 0;
5218
5219gunzip_nomem3:
5220 kfree(bp->strm);
5221 bp->strm = NULL;
5222
5223gunzip_nomem2:
5224 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5225 bp->gunzip_mapping);
5226 bp->gunzip_buf = NULL;
5227
5228gunzip_nomem1:
5229 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5230 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5231 return -ENOMEM;
5232}
5233
5234static void bnx2x_gunzip_end(struct bnx2x *bp)
5235{
5236 kfree(bp->strm->workspace);
5237
5238 kfree(bp->strm);
5239 bp->strm = NULL;
5240
5241 if (bp->gunzip_buf) {
5242 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5243 bp->gunzip_mapping);
5244 bp->gunzip_buf = NULL;
5245 }
5246}
5247
94a78b79 5248static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5249{
5250 int n, rc;
5251
5252 /* check gzip header */
94a78b79
VZ
5253 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5254 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5255 return -EINVAL;
94a78b79 5256 }
a2fbb9ea
ET
5257
5258 n = 10;
5259
34f80b04 5260#define FNAME 0x8
a2fbb9ea
ET
5261
5262 if (zbuf[3] & FNAME)
5263 while ((zbuf[n++] != 0) && (n < len));
5264
94a78b79 5265 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5266 bp->strm->avail_in = len - n;
5267 bp->strm->next_out = bp->gunzip_buf;
5268 bp->strm->avail_out = FW_BUF_SIZE;
5269
5270 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5271 if (rc != Z_OK)
5272 return rc;
5273
5274 rc = zlib_inflate(bp->strm, Z_FINISH);
5275 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5276 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5277 bp->dev->name, bp->strm->msg);
5278
5279 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5280 if (bp->gunzip_outlen & 0x3)
5281 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5282 " gunzip_outlen (%d) not aligned\n",
5283 bp->dev->name, bp->gunzip_outlen);
5284 bp->gunzip_outlen >>= 2;
5285
5286 zlib_inflateEnd(bp->strm);
5287
5288 if (rc == Z_STREAM_END)
5289 return 0;
5290
5291 return rc;
5292}
5293
5294/* nic load/unload */
5295
5296/*
34f80b04 5297 * General service functions
a2fbb9ea
ET
5298 */
5299
5300/* send a NIG loopback debug packet */
5301static void bnx2x_lb_pckt(struct bnx2x *bp)
5302{
a2fbb9ea 5303 u32 wb_write[3];
a2fbb9ea
ET
5304
5305 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5306 wb_write[0] = 0x55555555;
5307 wb_write[1] = 0x55555555;
34f80b04 5308 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5309 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5310
5311 /* NON-IP protocol */
a2fbb9ea
ET
5312 wb_write[0] = 0x09000000;
5313 wb_write[1] = 0x55555555;
34f80b04 5314 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5315 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5316}
5317
5318/* some of the internal memories
5319 * are not directly readable from the driver
5320 * to test them we send debug packets
5321 */
5322static int bnx2x_int_mem_test(struct bnx2x *bp)
5323{
5324 int factor;
5325 int count, i;
5326 u32 val = 0;
5327
ad8d3948 5328 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5329 factor = 120;
ad8d3948
EG
5330 else if (CHIP_REV_IS_EMUL(bp))
5331 factor = 200;
5332 else
a2fbb9ea 5333 factor = 1;
a2fbb9ea
ET
5334
5335 DP(NETIF_MSG_HW, "start part1\n");
5336
5337 /* Disable inputs of parser neighbor blocks */
5338 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5339 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5340 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5341 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5342
5343 /* Write 0 to parser credits for CFC search request */
5344 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5345
5346 /* send Ethernet packet */
5347 bnx2x_lb_pckt(bp);
5348
5349 /* TODO do i reset NIG statistic? */
5350 /* Wait until NIG register shows 1 packet of size 0x10 */
5351 count = 1000 * factor;
5352 while (count) {
34f80b04 5353
a2fbb9ea
ET
5354 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5355 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5356 if (val == 0x10)
5357 break;
5358
5359 msleep(10);
5360 count--;
5361 }
5362 if (val != 0x10) {
5363 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5364 return -1;
5365 }
5366
5367 /* Wait until PRS register shows 1 packet */
5368 count = 1000 * factor;
5369 while (count) {
5370 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5371 if (val == 1)
5372 break;
5373
5374 msleep(10);
5375 count--;
5376 }
5377 if (val != 0x1) {
5378 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5379 return -2;
5380 }
5381
5382 /* Reset and init BRB, PRS */
34f80b04 5383 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5384 msleep(50);
34f80b04 5385 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5386 msleep(50);
94a78b79
VZ
5387 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5388 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5389
5390 DP(NETIF_MSG_HW, "part2\n");
5391
5392 /* Disable inputs of parser neighbor blocks */
5393 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5394 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5395 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5396 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5397
5398 /* Write 0 to parser credits for CFC search request */
5399 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5400
5401 /* send 10 Ethernet packets */
5402 for (i = 0; i < 10; i++)
5403 bnx2x_lb_pckt(bp);
5404
5405 /* Wait until NIG register shows 10 + 1
5406 packets of size 11*0x10 = 0xb0 */
5407 count = 1000 * factor;
5408 while (count) {
34f80b04 5409
a2fbb9ea
ET
5410 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5411 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5412 if (val == 0xb0)
5413 break;
5414
5415 msleep(10);
5416 count--;
5417 }
5418 if (val != 0xb0) {
5419 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5420 return -3;
5421 }
5422
5423 /* Wait until PRS register shows 2 packets */
5424 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5425 if (val != 2)
5426 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5427
5428 /* Write 1 to parser credits for CFC search request */
5429 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5430
5431 /* Wait until PRS register shows 3 packets */
5432 msleep(10 * factor);
5433 /* Wait until NIG register shows 1 packet of size 0x10 */
5434 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5435 if (val != 3)
5436 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5437
5438 /* clear NIG EOP FIFO */
5439 for (i = 0; i < 11; i++)
5440 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5441 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5442 if (val != 1) {
5443 BNX2X_ERR("clear of NIG failed\n");
5444 return -4;
5445 }
5446
5447 /* Reset and init BRB, PRS, NIG */
5448 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5449 msleep(50);
5450 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5451 msleep(50);
94a78b79
VZ
5452 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5453 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5454#ifndef BCM_ISCSI
5455 /* set NIC mode */
5456 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5457#endif
5458
5459 /* Enable inputs of parser neighbor blocks */
5460 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5461 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5462 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5463 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5464
5465 DP(NETIF_MSG_HW, "done\n");
5466
5467 return 0; /* OK */
5468}
5469
5470static void enable_blocks_attention(struct bnx2x *bp)
5471{
5472 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5473 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5474 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5475 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5476 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5477 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5478 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5479 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5480 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5481/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5482/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5483 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5484 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5485 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5486/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5487/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5488 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5489 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5490 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5491 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5492/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5493/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5494 if (CHIP_REV_IS_FPGA(bp))
5495 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5496 else
5497 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5498 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5499 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5500 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5501/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5502/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5503 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5504 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5505/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5506 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5507}
5508
34f80b04 5509
81f75bbf
EG
5510static void bnx2x_reset_common(struct bnx2x *bp)
5511{
5512 /* reset_common */
5513 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5514 0xd3ffff7f);
5515 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5516}
5517
fd4ef40d
EG
5518
5519static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5520{
5521 u32 val;
5522 u8 port;
5523 u8 is_required = 0;
5524
5525 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5526 SHARED_HW_CFG_FAN_FAILURE_MASK;
5527
5528 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5529 is_required = 1;
5530
5531 /*
5532 * The fan failure mechanism is usually related to the PHY type since
5533 * the power consumption of the board is affected by the PHY. Currently,
5534 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5535 */
5536 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5537 for (port = PORT_0; port < PORT_MAX; port++) {
5538 u32 phy_type =
5539 SHMEM_RD(bp, dev_info.port_hw_config[port].
5540 external_phy_config) &
5541 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5542 is_required |=
5543 ((phy_type ==
5544 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5545 (phy_type ==
5546 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5547 }
5548
5549 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5550
5551 if (is_required == 0)
5552 return;
5553
5554 /* Fan failure is indicated by SPIO 5 */
5555 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5556 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5557
5558 /* set to active low mode */
5559 val = REG_RD(bp, MISC_REG_SPIO_INT);
5560 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5561 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5562 REG_WR(bp, MISC_REG_SPIO_INT, val);
5563
5564 /* enable interrupt to signal the IGU */
5565 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5566 val |= (1 << MISC_REGISTERS_SPIO_5);
5567 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5568}
5569
34f80b04 5570static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5571{
a2fbb9ea 5572 u32 val, i;
a2fbb9ea 5573
34f80b04 5574 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5575
81f75bbf 5576 bnx2x_reset_common(bp);
34f80b04
EG
5577 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5578 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5579
94a78b79 5580 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5581 if (CHIP_IS_E1H(bp))
5582 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5583
34f80b04
EG
5584 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5585 msleep(30);
5586 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5587
94a78b79 5588 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5589 if (CHIP_IS_E1(bp)) {
5590 /* enable HW interrupt from PXP on USDM overflow
5591 bit 16 on INT_MASK_0 */
5592 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5593 }
a2fbb9ea 5594
94a78b79 5595 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5596 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5597
5598#ifdef __BIG_ENDIAN
34f80b04
EG
5599 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5600 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5601 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5602 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5603 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5604 /* make sure this value is 0 */
5605 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5606
5607/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5608 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5609 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5610 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5611 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5612#endif
5613
34f80b04 5614 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5615#ifdef BCM_ISCSI
34f80b04
EG
5616 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5617 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5618 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5619#endif
5620
34f80b04
EG
5621 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5622 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5623
34f80b04
EG
5624 /* let the HW do it's magic ... */
5625 msleep(100);
5626 /* finish PXP init */
5627 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5628 if (val != 1) {
5629 BNX2X_ERR("PXP2 CFG failed\n");
5630 return -EBUSY;
5631 }
5632 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5633 if (val != 1) {
5634 BNX2X_ERR("PXP2 RD_INIT failed\n");
5635 return -EBUSY;
5636 }
a2fbb9ea 5637
34f80b04
EG
5638 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5639 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5640
94a78b79 5641 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5642
34f80b04
EG
5643 /* clean the DMAE memory */
5644 bp->dmae_ready = 1;
5645 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5646
94a78b79
VZ
5647 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5648 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5649 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5650 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5651
34f80b04
EG
5652 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5653 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5654 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5655 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5656
94a78b79 5657 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
5658 /* soft reset pulse */
5659 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5660 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5661
5662#ifdef BCM_ISCSI
94a78b79 5663 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5664#endif
a2fbb9ea 5665
94a78b79 5666 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5667 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5668 if (!CHIP_REV_IS_SLOW(bp)) {
5669 /* enable hw interrupt from doorbell Q */
5670 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5671 }
a2fbb9ea 5672
94a78b79
VZ
5673 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5674 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5675 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5676 /* set NIC mode */
5677 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5678 if (CHIP_IS_E1H(bp))
5679 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5680
94a78b79
VZ
5681 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5682 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5683 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5684 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5685
490c3c9b
EG
5686 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5687 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5688 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5689 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5690
94a78b79
VZ
5691 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5692 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5693 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5694 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5695
34f80b04
EG
5696 /* sync semi rtc */
5697 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5698 0x80000000);
5699 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5700 0x80000000);
a2fbb9ea 5701
94a78b79
VZ
5702 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5703 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5704 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5705
34f80b04
EG
5706 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5707 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5708 REG_WR(bp, i, 0xc0cac01a);
5709 /* TODO: replace with something meaningful */
5710 }
94a78b79 5711 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 5712 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5713
34f80b04
EG
5714 if (sizeof(union cdu_context) != 1024)
5715 /* we currently assume that a context is 1024 bytes */
5716 printk(KERN_ALERT PFX "please adjust the size of"
5717 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5718
94a78b79 5719 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5720 val = (4 << 24) + (0 << 12) + 1024;
5721 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5722 if (CHIP_IS_E1(bp)) {
5723 /* !!! fix pxp client crdit until excel update */
5724 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5725 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5726 }
a2fbb9ea 5727
94a78b79 5728 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5729 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5730 /* enable context validation interrupt from CFC */
5731 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5732
5733 /* set the thresholds to prevent CFC/CDU race */
5734 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5735
94a78b79
VZ
5736 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5737 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5738
34f80b04 5739 /* PXPCS COMMON comes here */
94a78b79 5740 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5741 /* Reset PCIE errors for debug */
5742 REG_WR(bp, 0x2814, 0xffffffff);
5743 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5744
34f80b04 5745 /* EMAC0 COMMON comes here */
94a78b79 5746 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
34f80b04 5747 /* EMAC1 COMMON comes here */
94a78b79 5748 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
34f80b04 5749 /* DBU COMMON comes here */
94a78b79 5750 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
34f80b04 5751 /* DBG COMMON comes here */
94a78b79 5752 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5753
94a78b79 5754 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
5755 if (CHIP_IS_E1H(bp)) {
5756 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5757 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5758 }
5759
5760 if (CHIP_REV_IS_SLOW(bp))
5761 msleep(200);
5762
5763 /* finish CFC init */
5764 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5765 if (val != 1) {
5766 BNX2X_ERR("CFC LL_INIT failed\n");
5767 return -EBUSY;
5768 }
5769 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5770 if (val != 1) {
5771 BNX2X_ERR("CFC AC_INIT failed\n");
5772 return -EBUSY;
5773 }
5774 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5775 if (val != 1) {
5776 BNX2X_ERR("CFC CAM_INIT failed\n");
5777 return -EBUSY;
5778 }
5779 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5780
34f80b04
EG
5781 /* read NIG statistic
5782 to see if this is our first up since powerup */
5783 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5784 val = *bnx2x_sp(bp, wb_data[0]);
5785
5786 /* do internal memory self test */
5787 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5788 BNX2X_ERR("internal mem self test failed\n");
5789 return -EBUSY;
5790 }
5791
35b19ba5 5792 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5793 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5794 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5795 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5796 bp->port.need_hw_lock = 1;
5797 break;
5798
34f80b04
EG
5799 default:
5800 break;
5801 }
f1410647 5802
fd4ef40d
EG
5803 bnx2x_setup_fan_failure_detection(bp);
5804
34f80b04
EG
5805 /* clear PXP2 attentions */
5806 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5807
34f80b04 5808 enable_blocks_attention(bp);
a2fbb9ea 5809
6bbca910
YR
5810 if (!BP_NOMCP(bp)) {
5811 bnx2x_acquire_phy_lock(bp);
5812 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5813 bnx2x_release_phy_lock(bp);
5814 } else
5815 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5816
34f80b04
EG
5817 return 0;
5818}
a2fbb9ea 5819
34f80b04
EG
5820static int bnx2x_init_port(struct bnx2x *bp)
5821{
5822 int port = BP_PORT(bp);
94a78b79 5823 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5824 u32 low, high;
34f80b04 5825 u32 val;
a2fbb9ea 5826
34f80b04
EG
5827 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5828
5829 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5830
5831 /* Port PXP comes here */
94a78b79 5832 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
a2fbb9ea 5833 /* Port PXP2 comes here */
94a78b79 5834 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
a2fbb9ea
ET
5835#ifdef BCM_ISCSI
5836 /* Port0 1
5837 * Port1 385 */
5838 i++;
5839 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5840 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5841 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5842 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5843
5844 /* Port0 2
5845 * Port1 386 */
5846 i++;
5847 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5848 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5849 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5850 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5851
5852 /* Port0 3
5853 * Port1 387 */
5854 i++;
5855 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5856 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5857 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5858 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5859#endif
34f80b04 5860 /* Port CMs come here */
94a78b79 5861 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea
ET
5862
5863 /* Port QM comes here */
a2fbb9ea
ET
5864#ifdef BCM_ISCSI
5865 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5866 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5867
94a78b79 5868 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea
ET
5869#endif
5870 /* Port DQ comes here */
94a78b79 5871 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5872
94a78b79 5873 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
5874 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5875 /* no pause for emulation and FPGA */
5876 low = 0;
5877 high = 513;
5878 } else {
5879 if (IS_E1HMF(bp))
5880 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5881 else if (bp->dev->mtu > 4096) {
5882 if (bp->flags & ONE_PORT_FLAG)
5883 low = 160;
5884 else {
5885 val = bp->dev->mtu;
5886 /* (24*1024 + val*4)/256 */
5887 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5888 }
5889 } else
5890 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5891 high = low + 56; /* 14*1024/256 */
5892 }
5893 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5894 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5895
5896
ad8d3948 5897 /* Port PRS comes here */
94a78b79 5898 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
a2fbb9ea 5899 /* Port TSDM comes here */
94a78b79 5900 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
a2fbb9ea 5901 /* Port CSDM comes here */
94a78b79 5902 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
a2fbb9ea 5903 /* Port USDM comes here */
94a78b79 5904 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
a2fbb9ea 5905 /* Port XSDM comes here */
94a78b79 5906 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5907
94a78b79
VZ
5908 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5909 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5910 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5911 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 5912
a2fbb9ea 5913 /* Port UPB comes here */
94a78b79 5914 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
34f80b04 5915 /* Port XPB comes here */
94a78b79 5916 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5917
94a78b79 5918 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
5919
5920 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5921 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5922
5923 /* update threshold */
34f80b04 5924 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5925 /* update init credit */
34f80b04 5926 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5927
5928 /* probe changes */
34f80b04 5929 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5930 msleep(5);
34f80b04 5931 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5932
5933#ifdef BCM_ISCSI
5934 /* tell the searcher where the T2 table is */
5935 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5936
5937 wb_write[0] = U64_LO(bp->t2_mapping);
5938 wb_write[1] = U64_HI(bp->t2_mapping);
5939 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5940 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5941 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5942 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5943
5944 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5945 /* Port SRCH comes here */
5946#endif
5947 /* Port CDU comes here */
94a78b79 5948 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
a2fbb9ea 5949 /* Port CFC comes here */
94a78b79 5950 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5951
5952 if (CHIP_IS_E1(bp)) {
5953 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5954 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5955 }
94a78b79 5956 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5957
94a78b79 5958 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5959 /* init aeu_mask_attn_func_0/1:
5960 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5961 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5962 * bits 4-7 are used for "per vn group attention" */
5963 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5964 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5965
a2fbb9ea 5966 /* Port PXPCS comes here */
94a78b79 5967 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
a2fbb9ea 5968 /* Port EMAC0 comes here */
94a78b79 5969 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
a2fbb9ea 5970 /* Port EMAC1 comes here */
94a78b79 5971 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
a2fbb9ea 5972 /* Port DBU comes here */
94a78b79 5973 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
a2fbb9ea 5974 /* Port DBG comes here */
94a78b79 5975 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5976
94a78b79 5977 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5978
5979 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5980
5981 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5982 /* 0x2 disable e1hov, 0x1 enable */
5983 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5984 (IS_E1HMF(bp) ? 0x1 : 0x2));
5985
1c06328c
EG
5986 /* support pause requests from USDM, TSDM and BRB */
5987 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5988
5989 {
5990 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5991 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5992 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5993 }
34f80b04
EG
5994 }
5995
a2fbb9ea 5996 /* Port MCP comes here */
94a78b79 5997 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
a2fbb9ea 5998 /* Port DMAE comes here */
94a78b79 5999 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6000
35b19ba5 6001 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6002 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6003 {
6004 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6005
6006 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6007 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6008
6009 /* The GPIO should be swapped if the swap register is
6010 set and active */
6011 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6012 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6013
6014 /* Select function upon port-swap configuration */
6015 if (port == 0) {
6016 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6017 aeu_gpio_mask = (swap_val && swap_override) ?
6018 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6019 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6020 } else {
6021 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6022 aeu_gpio_mask = (swap_val && swap_override) ?
6023 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6024 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6025 }
6026 val = REG_RD(bp, offset);
6027 /* add GPIO3 to group */
6028 val |= aeu_gpio_mask;
6029 REG_WR(bp, offset, val);
6030 }
6031 break;
6032
35b19ba5 6033 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
6034 /* add SPIO 5 to group 0 */
6035 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6036 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6037 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6038 break;
6039
6040 default:
6041 break;
6042 }
6043
c18487ee 6044 bnx2x__link_reset(bp);
a2fbb9ea 6045
34f80b04
EG
6046 return 0;
6047}
6048
6049#define ILT_PER_FUNC (768/2)
6050#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6051/* the phys address is shifted right 12 bits and has an added
6052 1=valid bit added to the 53rd bit
6053 then since this is a wide register(TM)
6054 we split it into two 32 bit writes
6055 */
6056#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6057#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6058#define PXP_ONE_ILT(x) (((x) << 10) | x)
6059#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6060
6061#define CNIC_ILT_LINES 0
6062
6063static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6064{
6065 int reg;
6066
6067 if (CHIP_IS_E1H(bp))
6068 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6069 else /* E1 */
6070 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6071
6072 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6073}
6074
6075static int bnx2x_init_func(struct bnx2x *bp)
6076{
6077 int port = BP_PORT(bp);
6078 int func = BP_FUNC(bp);
8badd27a 6079 u32 addr, val;
34f80b04
EG
6080 int i;
6081
6082 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6083
8badd27a
EG
6084 /* set MSI reconfigure capability */
6085 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6086 val = REG_RD(bp, addr);
6087 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6088 REG_WR(bp, addr, val);
6089
34f80b04
EG
6090 i = FUNC_ILT_BASE(func);
6091
6092 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6093 if (CHIP_IS_E1H(bp)) {
6094 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6095 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6096 } else /* E1 */
6097 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6098 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6099
6100
6101 if (CHIP_IS_E1H(bp)) {
6102 for (i = 0; i < 9; i++)
6103 bnx2x_init_block(bp,
94a78b79 6104 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6105
6106 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6107 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6108 }
6109
6110 /* HC init per function */
6111 if (CHIP_IS_E1H(bp)) {
6112 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6113
6114 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6115 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6116 }
94a78b79 6117 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6118
c14423fe 6119 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6120 REG_WR(bp, 0x2114, 0xffffffff);
6121 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6122
34f80b04
EG
6123 return 0;
6124}
6125
6126static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6127{
6128 int i, rc = 0;
a2fbb9ea 6129
34f80b04
EG
6130 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6131 BP_FUNC(bp), load_code);
a2fbb9ea 6132
34f80b04
EG
6133 bp->dmae_ready = 0;
6134 mutex_init(&bp->dmae_mutex);
6135 bnx2x_gunzip_init(bp);
a2fbb9ea 6136
34f80b04
EG
6137 switch (load_code) {
6138 case FW_MSG_CODE_DRV_LOAD_COMMON:
6139 rc = bnx2x_init_common(bp);
6140 if (rc)
6141 goto init_hw_err;
6142 /* no break */
6143
6144 case FW_MSG_CODE_DRV_LOAD_PORT:
6145 bp->dmae_ready = 1;
6146 rc = bnx2x_init_port(bp);
6147 if (rc)
6148 goto init_hw_err;
6149 /* no break */
6150
6151 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6152 bp->dmae_ready = 1;
6153 rc = bnx2x_init_func(bp);
6154 if (rc)
6155 goto init_hw_err;
6156 break;
6157
6158 default:
6159 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6160 break;
6161 }
6162
6163 if (!BP_NOMCP(bp)) {
6164 int func = BP_FUNC(bp);
a2fbb9ea
ET
6165
6166 bp->fw_drv_pulse_wr_seq =
34f80b04 6167 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6168 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6169 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6170 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6171 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6172 } else
6173 bp->func_stx = 0;
a2fbb9ea 6174
34f80b04
EG
6175 /* this needs to be done before gunzip end */
6176 bnx2x_zero_def_sb(bp);
6177 for_each_queue(bp, i)
6178 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6179
6180init_hw_err:
6181 bnx2x_gunzip_end(bp);
6182
6183 return rc;
a2fbb9ea
ET
6184}
6185
c14423fe 6186/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6187static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6188{
34f80b04 6189 int func = BP_FUNC(bp);
f1410647
ET
6190 u32 seq = ++bp->fw_seq;
6191 u32 rc = 0;
19680c48
EG
6192 u32 cnt = 1;
6193 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6194
34f80b04 6195 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6196 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6197
19680c48
EG
6198 do {
6199 /* let the FW do it's magic ... */
6200 msleep(delay);
a2fbb9ea 6201
19680c48 6202 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6203
19680c48
EG
6204 /* Give the FW up to 2 second (200*10ms) */
6205 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6206
6207 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6208 cnt*delay, rc, seq);
a2fbb9ea
ET
6209
6210 /* is this a reply to our command? */
6211 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6212 rc &= FW_MSG_CODE_MASK;
f1410647 6213
a2fbb9ea
ET
6214 } else {
6215 /* FW BUG! */
6216 BNX2X_ERR("FW failed to respond!\n");
6217 bnx2x_fw_dump(bp);
6218 rc = 0;
6219 }
f1410647 6220
a2fbb9ea
ET
6221 return rc;
6222}
6223
6224static void bnx2x_free_mem(struct bnx2x *bp)
6225{
6226
6227#define BNX2X_PCI_FREE(x, y, size) \
6228 do { \
6229 if (x) { \
6230 pci_free_consistent(bp->pdev, size, x, y); \
6231 x = NULL; \
6232 y = 0; \
6233 } \
6234 } while (0)
6235
6236#define BNX2X_FREE(x) \
6237 do { \
6238 if (x) { \
6239 vfree(x); \
6240 x = NULL; \
6241 } \
6242 } while (0)
6243
6244 int i;
6245
6246 /* fastpath */
555f6c78 6247 /* Common */
a2fbb9ea
ET
6248 for_each_queue(bp, i) {
6249
555f6c78 6250 /* status blocks */
a2fbb9ea
ET
6251 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6252 bnx2x_fp(bp, i, status_blk_mapping),
6253 sizeof(struct host_status_block) +
6254 sizeof(struct eth_tx_db_data));
555f6c78
EG
6255 }
6256 /* Rx */
6257 for_each_rx_queue(bp, i) {
a2fbb9ea 6258
555f6c78 6259 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6260 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6261 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6262 bnx2x_fp(bp, i, rx_desc_mapping),
6263 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6264
6265 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6266 bnx2x_fp(bp, i, rx_comp_mapping),
6267 sizeof(struct eth_fast_path_rx_cqe) *
6268 NUM_RCQ_BD);
a2fbb9ea 6269
7a9b2557 6270 /* SGE ring */
32626230 6271 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6272 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6273 bnx2x_fp(bp, i, rx_sge_mapping),
6274 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6275 }
555f6c78
EG
6276 /* Tx */
6277 for_each_tx_queue(bp, i) {
6278
6279 /* fastpath tx rings: tx_buf tx_desc */
6280 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6281 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6282 bnx2x_fp(bp, i, tx_desc_mapping),
6283 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6284 }
a2fbb9ea
ET
6285 /* end of fastpath */
6286
6287 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6288 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6289
6290 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6291 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6292
6293#ifdef BCM_ISCSI
6294 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6295 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6296 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6297 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6298#endif
7a9b2557 6299 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6300
6301#undef BNX2X_PCI_FREE
6302#undef BNX2X_KFREE
6303}
6304
6305static int bnx2x_alloc_mem(struct bnx2x *bp)
6306{
6307
6308#define BNX2X_PCI_ALLOC(x, y, size) \
6309 do { \
6310 x = pci_alloc_consistent(bp->pdev, size, y); \
6311 if (x == NULL) \
6312 goto alloc_mem_err; \
6313 memset(x, 0, size); \
6314 } while (0)
6315
6316#define BNX2X_ALLOC(x, size) \
6317 do { \
6318 x = vmalloc(size); \
6319 if (x == NULL) \
6320 goto alloc_mem_err; \
6321 memset(x, 0, size); \
6322 } while (0)
6323
6324 int i;
6325
6326 /* fastpath */
555f6c78 6327 /* Common */
a2fbb9ea
ET
6328 for_each_queue(bp, i) {
6329 bnx2x_fp(bp, i, bp) = bp;
6330
555f6c78 6331 /* status blocks */
a2fbb9ea
ET
6332 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6333 &bnx2x_fp(bp, i, status_blk_mapping),
6334 sizeof(struct host_status_block) +
6335 sizeof(struct eth_tx_db_data));
555f6c78
EG
6336 }
6337 /* Rx */
6338 for_each_rx_queue(bp, i) {
a2fbb9ea 6339
555f6c78 6340 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6341 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6342 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6343 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6344 &bnx2x_fp(bp, i, rx_desc_mapping),
6345 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6346
6347 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6348 &bnx2x_fp(bp, i, rx_comp_mapping),
6349 sizeof(struct eth_fast_path_rx_cqe) *
6350 NUM_RCQ_BD);
6351
7a9b2557
VZ
6352 /* SGE ring */
6353 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6354 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6355 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6356 &bnx2x_fp(bp, i, rx_sge_mapping),
6357 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6358 }
555f6c78
EG
6359 /* Tx */
6360 for_each_tx_queue(bp, i) {
6361
6362 bnx2x_fp(bp, i, hw_tx_prods) =
6363 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6364
6365 bnx2x_fp(bp, i, tx_prods_mapping) =
6366 bnx2x_fp(bp, i, status_blk_mapping) +
6367 sizeof(struct host_status_block);
6368
6369 /* fastpath tx rings: tx_buf tx_desc */
6370 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6371 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6372 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6373 &bnx2x_fp(bp, i, tx_desc_mapping),
6374 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6375 }
a2fbb9ea
ET
6376 /* end of fastpath */
6377
6378 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6379 sizeof(struct host_def_status_block));
6380
6381 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6382 sizeof(struct bnx2x_slowpath));
6383
6384#ifdef BCM_ISCSI
6385 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6386
6387 /* Initialize T1 */
6388 for (i = 0; i < 64*1024; i += 64) {
6389 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6390 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6391 }
6392
6393 /* allocate searcher T2 table
6394 we allocate 1/4 of alloc num for T2
6395 (which is not entered into the ILT) */
6396 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6397
6398 /* Initialize T2 */
6399 for (i = 0; i < 16*1024; i += 64)
6400 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6401
c14423fe 6402 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6403 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6404
6405 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6406 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6407
6408 /* QM queues (128*MAX_CONN) */
6409 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6410#endif
6411
6412 /* Slow path ring */
6413 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6414
6415 return 0;
6416
6417alloc_mem_err:
6418 bnx2x_free_mem(bp);
6419 return -ENOMEM;
6420
6421#undef BNX2X_PCI_ALLOC
6422#undef BNX2X_ALLOC
6423}
6424
6425static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6426{
6427 int i;
6428
555f6c78 6429 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6430 struct bnx2x_fastpath *fp = &bp->fp[i];
6431
6432 u16 bd_cons = fp->tx_bd_cons;
6433 u16 sw_prod = fp->tx_pkt_prod;
6434 u16 sw_cons = fp->tx_pkt_cons;
6435
a2fbb9ea
ET
6436 while (sw_cons != sw_prod) {
6437 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6438 sw_cons++;
6439 }
6440 }
6441}
6442
6443static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6444{
6445 int i, j;
6446
555f6c78 6447 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6448 struct bnx2x_fastpath *fp = &bp->fp[j];
6449
a2fbb9ea
ET
6450 for (i = 0; i < NUM_RX_BD; i++) {
6451 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6452 struct sk_buff *skb = rx_buf->skb;
6453
6454 if (skb == NULL)
6455 continue;
6456
6457 pci_unmap_single(bp->pdev,
6458 pci_unmap_addr(rx_buf, mapping),
356e2385 6459 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6460
6461 rx_buf->skb = NULL;
6462 dev_kfree_skb(skb);
6463 }
7a9b2557 6464 if (!fp->disable_tpa)
32626230
EG
6465 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6466 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6467 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6468 }
6469}
6470
6471static void bnx2x_free_skbs(struct bnx2x *bp)
6472{
6473 bnx2x_free_tx_skbs(bp);
6474 bnx2x_free_rx_skbs(bp);
6475}
6476
6477static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6478{
34f80b04 6479 int i, offset = 1;
a2fbb9ea
ET
6480
6481 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6482 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6483 bp->msix_table[0].vector);
6484
6485 for_each_queue(bp, i) {
c14423fe 6486 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6487 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6488 bnx2x_fp(bp, i, state));
6489
34f80b04 6490 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6491 }
a2fbb9ea
ET
6492}
6493
6494static void bnx2x_free_irq(struct bnx2x *bp)
6495{
a2fbb9ea 6496 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6497 bnx2x_free_msix_irqs(bp);
6498 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6499 bp->flags &= ~USING_MSIX_FLAG;
6500
8badd27a
EG
6501 } else if (bp->flags & USING_MSI_FLAG) {
6502 free_irq(bp->pdev->irq, bp->dev);
6503 pci_disable_msi(bp->pdev);
6504 bp->flags &= ~USING_MSI_FLAG;
6505
a2fbb9ea
ET
6506 } else
6507 free_irq(bp->pdev->irq, bp->dev);
6508}
6509
6510static int bnx2x_enable_msix(struct bnx2x *bp)
6511{
8badd27a
EG
6512 int i, rc, offset = 1;
6513 int igu_vec = 0;
a2fbb9ea 6514
8badd27a
EG
6515 bp->msix_table[0].entry = igu_vec;
6516 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6517
34f80b04 6518 for_each_queue(bp, i) {
8badd27a 6519 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6520 bp->msix_table[i + offset].entry = igu_vec;
6521 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6522 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6523 }
6524
34f80b04 6525 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6526 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6527 if (rc) {
8badd27a
EG
6528 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6529 return rc;
34f80b04 6530 }
8badd27a 6531
a2fbb9ea
ET
6532 bp->flags |= USING_MSIX_FLAG;
6533
6534 return 0;
a2fbb9ea
ET
6535}
6536
a2fbb9ea
ET
6537static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6538{
34f80b04 6539 int i, rc, offset = 1;
a2fbb9ea 6540
a2fbb9ea
ET
6541 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6542 bp->dev->name, bp->dev);
a2fbb9ea
ET
6543 if (rc) {
6544 BNX2X_ERR("request sp irq failed\n");
6545 return -EBUSY;
6546 }
6547
6548 for_each_queue(bp, i) {
555f6c78
EG
6549 struct bnx2x_fastpath *fp = &bp->fp[i];
6550
6551 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6552 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6553 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6554 if (rc) {
555f6c78 6555 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6556 bnx2x_free_msix_irqs(bp);
6557 return -EBUSY;
6558 }
6559
555f6c78 6560 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6561 }
6562
555f6c78
EG
6563 i = BNX2X_NUM_QUEUES(bp);
6564 if (is_multi(bp))
6565 printk(KERN_INFO PFX
6566 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6567 bp->dev->name, bp->msix_table[0].vector,
6568 bp->msix_table[offset].vector,
6569 bp->msix_table[offset + i - 1].vector);
6570 else
6571 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6572 bp->dev->name, bp->msix_table[0].vector,
6573 bp->msix_table[offset + i - 1].vector);
6574
a2fbb9ea 6575 return 0;
a2fbb9ea
ET
6576}
6577
8badd27a
EG
6578static int bnx2x_enable_msi(struct bnx2x *bp)
6579{
6580 int rc;
6581
6582 rc = pci_enable_msi(bp->pdev);
6583 if (rc) {
6584 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6585 return -1;
6586 }
6587 bp->flags |= USING_MSI_FLAG;
6588
6589 return 0;
6590}
6591
a2fbb9ea
ET
6592static int bnx2x_req_irq(struct bnx2x *bp)
6593{
8badd27a 6594 unsigned long flags;
34f80b04 6595 int rc;
a2fbb9ea 6596
8badd27a
EG
6597 if (bp->flags & USING_MSI_FLAG)
6598 flags = 0;
6599 else
6600 flags = IRQF_SHARED;
6601
6602 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6603 bp->dev->name, bp->dev);
a2fbb9ea
ET
6604 if (!rc)
6605 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6606
6607 return rc;
a2fbb9ea
ET
6608}
6609
65abd74d
YG
6610static void bnx2x_napi_enable(struct bnx2x *bp)
6611{
6612 int i;
6613
555f6c78 6614 for_each_rx_queue(bp, i)
65abd74d
YG
6615 napi_enable(&bnx2x_fp(bp, i, napi));
6616}
6617
6618static void bnx2x_napi_disable(struct bnx2x *bp)
6619{
6620 int i;
6621
555f6c78 6622 for_each_rx_queue(bp, i)
65abd74d
YG
6623 napi_disable(&bnx2x_fp(bp, i, napi));
6624}
6625
6626static void bnx2x_netif_start(struct bnx2x *bp)
6627{
6628 if (atomic_dec_and_test(&bp->intr_sem)) {
6629 if (netif_running(bp->dev)) {
65abd74d
YG
6630 bnx2x_napi_enable(bp);
6631 bnx2x_int_enable(bp);
555f6c78
EG
6632 if (bp->state == BNX2X_STATE_OPEN)
6633 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6634 }
6635 }
6636}
6637
f8ef6e44 6638static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6639{
f8ef6e44 6640 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6641 bnx2x_napi_disable(bp);
762d5f6c
EG
6642 netif_tx_disable(bp->dev);
6643 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6644}
6645
a2fbb9ea
ET
6646/*
6647 * Init service functions
6648 */
6649
3101c2bc 6650static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6651{
6652 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6653 int port = BP_PORT(bp);
a2fbb9ea
ET
6654
6655 /* CAM allocation
6656 * unicasts 0-31:port0 32-63:port1
6657 * multicast 64-127:port0 128-191:port1
6658 */
8d9c5f34 6659 config->hdr.length = 2;
af246401 6660 config->hdr.offset = port ? 32 : 0;
0626b899 6661 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6662 config->hdr.reserved1 = 0;
6663
6664 /* primary MAC */
6665 config->config_table[0].cam_entry.msb_mac_addr =
6666 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6667 config->config_table[0].cam_entry.middle_mac_addr =
6668 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6669 config->config_table[0].cam_entry.lsb_mac_addr =
6670 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6671 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6672 if (set)
6673 config->config_table[0].target_table_entry.flags = 0;
6674 else
6675 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6676 config->config_table[0].target_table_entry.client_id = 0;
6677 config->config_table[0].target_table_entry.vlan_id = 0;
6678
3101c2bc
YG
6679 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6680 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6681 config->config_table[0].cam_entry.msb_mac_addr,
6682 config->config_table[0].cam_entry.middle_mac_addr,
6683 config->config_table[0].cam_entry.lsb_mac_addr);
6684
6685 /* broadcast */
4781bfad
EG
6686 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6687 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6688 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6689 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6690 if (set)
6691 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6692 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6693 else
6694 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6695 config->config_table[1].target_table_entry.client_id = 0;
6696 config->config_table[1].target_table_entry.vlan_id = 0;
6697
6698 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6699 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6700 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6701}
6702
3101c2bc 6703static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6704{
6705 struct mac_configuration_cmd_e1h *config =
6706 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6707
3101c2bc 6708 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6709 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6710 return;
6711 }
6712
6713 /* CAM allocation for E1H
6714 * unicasts: by func number
6715 * multicast: 20+FUNC*20, 20 each
6716 */
8d9c5f34 6717 config->hdr.length = 1;
34f80b04 6718 config->hdr.offset = BP_FUNC(bp);
0626b899 6719 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6720 config->hdr.reserved1 = 0;
6721
6722 /* primary MAC */
6723 config->config_table[0].msb_mac_addr =
6724 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6725 config->config_table[0].middle_mac_addr =
6726 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6727 config->config_table[0].lsb_mac_addr =
6728 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6729 config->config_table[0].client_id = BP_L_ID(bp);
6730 config->config_table[0].vlan_id = 0;
6731 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6732 if (set)
6733 config->config_table[0].flags = BP_PORT(bp);
6734 else
6735 config->config_table[0].flags =
6736 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6737
3101c2bc
YG
6738 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6739 (set ? "setting" : "clearing"),
34f80b04
EG
6740 config->config_table[0].msb_mac_addr,
6741 config->config_table[0].middle_mac_addr,
6742 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6743
6744 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6745 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6746 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6747}
6748
a2fbb9ea
ET
6749static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6750 int *state_p, int poll)
6751{
6752 /* can take a while if any port is running */
8b3a0f0b 6753 int cnt = 5000;
a2fbb9ea 6754
c14423fe
ET
6755 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6756 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6757
6758 might_sleep();
34f80b04 6759 while (cnt--) {
a2fbb9ea
ET
6760 if (poll) {
6761 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6762 /* if index is different from 0
6763 * the reply for some commands will
3101c2bc 6764 * be on the non default queue
a2fbb9ea
ET
6765 */
6766 if (idx)
6767 bnx2x_rx_int(&bp->fp[idx], 10);
6768 }
a2fbb9ea 6769
3101c2bc 6770 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6771 if (*state_p == state) {
6772#ifdef BNX2X_STOP_ON_ERROR
6773 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6774#endif
a2fbb9ea 6775 return 0;
8b3a0f0b 6776 }
a2fbb9ea 6777
a2fbb9ea 6778 msleep(1);
a2fbb9ea
ET
6779 }
6780
a2fbb9ea 6781 /* timeout! */
49d66772
ET
6782 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6783 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6784#ifdef BNX2X_STOP_ON_ERROR
6785 bnx2x_panic();
6786#endif
a2fbb9ea 6787
49d66772 6788 return -EBUSY;
a2fbb9ea
ET
6789}
6790
6791static int bnx2x_setup_leading(struct bnx2x *bp)
6792{
34f80b04 6793 int rc;
a2fbb9ea 6794
c14423fe 6795 /* reset IGU state */
34f80b04 6796 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6797
6798 /* SETUP ramrod */
6799 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6800
34f80b04
EG
6801 /* Wait for completion */
6802 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6803
34f80b04 6804 return rc;
a2fbb9ea
ET
6805}
6806
6807static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6808{
555f6c78
EG
6809 struct bnx2x_fastpath *fp = &bp->fp[index];
6810
a2fbb9ea 6811 /* reset IGU state */
555f6c78 6812 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6813
228241eb 6814 /* SETUP ramrod */
555f6c78
EG
6815 fp->state = BNX2X_FP_STATE_OPENING;
6816 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6817 fp->cl_id, 0);
a2fbb9ea
ET
6818
6819 /* Wait for completion */
6820 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6821 &(fp->state), 0);
a2fbb9ea
ET
6822}
6823
a2fbb9ea 6824static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6825
8badd27a 6826static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6827{
555f6c78 6828 int num_queues;
a2fbb9ea 6829
8badd27a
EG
6830 switch (int_mode) {
6831 case INT_MODE_INTx:
6832 case INT_MODE_MSI:
555f6c78
EG
6833 num_queues = 1;
6834 bp->num_rx_queues = num_queues;
6835 bp->num_tx_queues = num_queues;
6836 DP(NETIF_MSG_IFUP,
6837 "set number of queues to %d\n", num_queues);
8badd27a
EG
6838 break;
6839
6840 case INT_MODE_MSIX:
6841 default:
555f6c78
EG
6842 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6843 num_queues = min_t(u32, num_online_cpus(),
6844 BNX2X_MAX_QUEUES(bp));
34f80b04 6845 else
555f6c78
EG
6846 num_queues = 1;
6847 bp->num_rx_queues = num_queues;
6848 bp->num_tx_queues = num_queues;
6849 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6850 " number of tx queues to %d\n",
6851 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6852 /* if we can't use MSI-X we only need one fp,
6853 * so try to enable MSI-X with the requested number of fp's
6854 * and fallback to MSI or legacy INTx with one fp
6855 */
8badd27a 6856 if (bnx2x_enable_msix(bp)) {
34f80b04 6857 /* failed to enable MSI-X */
555f6c78
EG
6858 num_queues = 1;
6859 bp->num_rx_queues = num_queues;
6860 bp->num_tx_queues = num_queues;
6861 if (bp->multi_mode)
6862 BNX2X_ERR("Multi requested but failed to "
6863 "enable MSI-X set number of "
6864 "queues to %d\n", num_queues);
a2fbb9ea 6865 }
8badd27a 6866 break;
a2fbb9ea 6867 }
555f6c78 6868 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6869}
6870
6871static void bnx2x_set_rx_mode(struct net_device *dev);
6872
6873/* must be called with rtnl_lock */
6874static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6875{
6876 u32 load_code;
6877 int i, rc = 0;
6878#ifdef BNX2X_STOP_ON_ERROR
6879 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6880 if (unlikely(bp->panic))
6881 return -EPERM;
6882#endif
6883
6884 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6885
6886 bnx2x_set_int_mode(bp);
c14423fe 6887
a2fbb9ea
ET
6888 if (bnx2x_alloc_mem(bp))
6889 return -ENOMEM;
6890
555f6c78 6891 for_each_rx_queue(bp, i)
7a9b2557
VZ
6892 bnx2x_fp(bp, i, disable_tpa) =
6893 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6894
555f6c78 6895 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6896 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6897 bnx2x_poll, 128);
6898
6899#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6900 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6901 struct bnx2x_fastpath *fp = &bp->fp[i];
6902
6903 fp->poll_no_work = 0;
6904 fp->poll_calls = 0;
6905 fp->poll_max_calls = 0;
6906 fp->poll_complete = 0;
6907 fp->poll_exit = 0;
6908 }
6909#endif
6910 bnx2x_napi_enable(bp);
6911
34f80b04
EG
6912 if (bp->flags & USING_MSIX_FLAG) {
6913 rc = bnx2x_req_msix_irqs(bp);
6914 if (rc) {
6915 pci_disable_msix(bp->pdev);
2dfe0e1f 6916 goto load_error1;
34f80b04
EG
6917 }
6918 } else {
8badd27a
EG
6919 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6920 bnx2x_enable_msi(bp);
34f80b04
EG
6921 bnx2x_ack_int(bp);
6922 rc = bnx2x_req_irq(bp);
6923 if (rc) {
2dfe0e1f 6924 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6925 if (bp->flags & USING_MSI_FLAG)
6926 pci_disable_msi(bp->pdev);
2dfe0e1f 6927 goto load_error1;
a2fbb9ea 6928 }
8badd27a
EG
6929 if (bp->flags & USING_MSI_FLAG) {
6930 bp->dev->irq = bp->pdev->irq;
6931 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6932 bp->dev->name, bp->pdev->irq);
6933 }
a2fbb9ea
ET
6934 }
6935
2dfe0e1f
EG
6936 /* Send LOAD_REQUEST command to MCP
6937 Returns the type of LOAD command:
6938 if it is the first port to be initialized
6939 common blocks should be initialized, otherwise - not
6940 */
6941 if (!BP_NOMCP(bp)) {
6942 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6943 if (!load_code) {
6944 BNX2X_ERR("MCP response failure, aborting\n");
6945 rc = -EBUSY;
6946 goto load_error2;
6947 }
6948 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6949 rc = -EBUSY; /* other port in diagnostic mode */
6950 goto load_error2;
6951 }
6952
6953 } else {
6954 int port = BP_PORT(bp);
6955
f5372251 6956 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
6957 load_count[0], load_count[1], load_count[2]);
6958 load_count[0]++;
6959 load_count[1 + port]++;
f5372251 6960 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
6961 load_count[0], load_count[1], load_count[2]);
6962 if (load_count[0] == 1)
6963 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6964 else if (load_count[1 + port] == 1)
6965 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6966 else
6967 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6968 }
6969
6970 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6971 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6972 bp->port.pmf = 1;
6973 else
6974 bp->port.pmf = 0;
6975 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6976
a2fbb9ea 6977 /* Initialize HW */
34f80b04
EG
6978 rc = bnx2x_init_hw(bp, load_code);
6979 if (rc) {
a2fbb9ea 6980 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6981 goto load_error2;
a2fbb9ea
ET
6982 }
6983
a2fbb9ea 6984 /* Setup NIC internals and enable interrupts */
471de716 6985 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6986
6987 /* Send LOAD_DONE command to MCP */
34f80b04 6988 if (!BP_NOMCP(bp)) {
228241eb
ET
6989 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6990 if (!load_code) {
da5a662a 6991 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6992 rc = -EBUSY;
2dfe0e1f 6993 goto load_error3;
a2fbb9ea
ET
6994 }
6995 }
6996
6997 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6998
34f80b04
EG
6999 rc = bnx2x_setup_leading(bp);
7000 if (rc) {
da5a662a 7001 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7002 goto load_error3;
34f80b04 7003 }
a2fbb9ea 7004
34f80b04
EG
7005 if (CHIP_IS_E1H(bp))
7006 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7007 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7008 bp->state = BNX2X_STATE_DISABLED;
7009 }
a2fbb9ea 7010
34f80b04
EG
7011 if (bp->state == BNX2X_STATE_OPEN)
7012 for_each_nondefault_queue(bp, i) {
7013 rc = bnx2x_setup_multi(bp, i);
7014 if (rc)
2dfe0e1f 7015 goto load_error3;
34f80b04 7016 }
a2fbb9ea 7017
34f80b04 7018 if (CHIP_IS_E1(bp))
3101c2bc 7019 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 7020 else
3101c2bc 7021 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
7022
7023 if (bp->port.pmf)
b5bf9068 7024 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7025
7026 /* Start fast path */
34f80b04
EG
7027 switch (load_mode) {
7028 case LOAD_NORMAL:
7029 /* Tx queue should be only reenabled */
555f6c78 7030 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 7031 /* Initialize the receive filter. */
34f80b04
EG
7032 bnx2x_set_rx_mode(bp->dev);
7033 break;
7034
7035 case LOAD_OPEN:
555f6c78 7036 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 7037 /* Initialize the receive filter. */
34f80b04 7038 bnx2x_set_rx_mode(bp->dev);
34f80b04 7039 break;
a2fbb9ea 7040
34f80b04 7041 case LOAD_DIAG:
2dfe0e1f 7042 /* Initialize the receive filter. */
a2fbb9ea 7043 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7044 bp->state = BNX2X_STATE_DIAG;
7045 break;
7046
7047 default:
7048 break;
a2fbb9ea
ET
7049 }
7050
34f80b04
EG
7051 if (!bp->port.pmf)
7052 bnx2x__link_status_update(bp);
7053
a2fbb9ea
ET
7054 /* start the timer */
7055 mod_timer(&bp->timer, jiffies + bp->current_interval);
7056
34f80b04 7057
a2fbb9ea
ET
7058 return 0;
7059
2dfe0e1f
EG
7060load_error3:
7061 bnx2x_int_disable_sync(bp, 1);
7062 if (!BP_NOMCP(bp)) {
7063 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7064 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7065 }
7066 bp->port.pmf = 0;
7a9b2557
VZ
7067 /* Free SKBs, SGEs, TPA pool and driver internals */
7068 bnx2x_free_skbs(bp);
555f6c78 7069 for_each_rx_queue(bp, i)
3196a88a 7070 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7071load_error2:
d1014634
YG
7072 /* Release IRQs */
7073 bnx2x_free_irq(bp);
2dfe0e1f
EG
7074load_error1:
7075 bnx2x_napi_disable(bp);
555f6c78 7076 for_each_rx_queue(bp, i)
7cde1c8b 7077 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7078 bnx2x_free_mem(bp);
7079
34f80b04 7080 return rc;
a2fbb9ea
ET
7081}
7082
7083static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7084{
555f6c78 7085 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7086 int rc;
7087
c14423fe 7088 /* halt the connection */
555f6c78
EG
7089 fp->state = BNX2X_FP_STATE_HALTING;
7090 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7091
34f80b04 7092 /* Wait for completion */
a2fbb9ea 7093 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7094 &(fp->state), 1);
c14423fe 7095 if (rc) /* timeout */
a2fbb9ea
ET
7096 return rc;
7097
7098 /* delete cfc entry */
7099 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7100
34f80b04
EG
7101 /* Wait for completion */
7102 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7103 &(fp->state), 1);
34f80b04 7104 return rc;
a2fbb9ea
ET
7105}
7106
da5a662a 7107static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7108{
4781bfad 7109 __le16 dsb_sp_prod_idx;
c14423fe 7110 /* if the other port is handling traffic,
a2fbb9ea 7111 this can take a lot of time */
34f80b04
EG
7112 int cnt = 500;
7113 int rc;
a2fbb9ea
ET
7114
7115 might_sleep();
7116
7117 /* Send HALT ramrod */
7118 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7119 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7120
34f80b04
EG
7121 /* Wait for completion */
7122 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7123 &(bp->fp[0].state), 1);
7124 if (rc) /* timeout */
da5a662a 7125 return rc;
a2fbb9ea 7126
49d66772 7127 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7128
228241eb 7129 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7130 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7131
49d66772 7132 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7133 we are going to reset the chip anyway
7134 so there is not much to do if this times out
7135 */
34f80b04 7136 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7137 if (!cnt) {
7138 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7139 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7140 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7141#ifdef BNX2X_STOP_ON_ERROR
7142 bnx2x_panic();
7143#endif
36e552ab 7144 rc = -EBUSY;
34f80b04
EG
7145 break;
7146 }
7147 cnt--;
da5a662a 7148 msleep(1);
5650d9d4 7149 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7150 }
7151 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7152 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7153
7154 return rc;
a2fbb9ea
ET
7155}
7156
34f80b04
EG
7157static void bnx2x_reset_func(struct bnx2x *bp)
7158{
7159 int port = BP_PORT(bp);
7160 int func = BP_FUNC(bp);
7161 int base, i;
7162
7163 /* Configure IGU */
7164 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7165 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7166
34f80b04
EG
7167 /* Clear ILT */
7168 base = FUNC_ILT_BASE(func);
7169 for (i = base; i < base + ILT_PER_FUNC; i++)
7170 bnx2x_ilt_wr(bp, i, 0);
7171}
7172
7173static void bnx2x_reset_port(struct bnx2x *bp)
7174{
7175 int port = BP_PORT(bp);
7176 u32 val;
7177
7178 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7179
7180 /* Do not rcv packets to BRB */
7181 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7182 /* Do not direct rcv packets that are not for MCP to the BRB */
7183 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7184 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7185
7186 /* Configure AEU */
7187 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7188
7189 msleep(100);
7190 /* Check for BRB port occupancy */
7191 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7192 if (val)
7193 DP(NETIF_MSG_IFDOWN,
33471629 7194 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7195
7196 /* TODO: Close Doorbell port? */
7197}
7198
34f80b04
EG
7199static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7200{
7201 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7202 BP_FUNC(bp), reset_code);
7203
7204 switch (reset_code) {
7205 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7206 bnx2x_reset_port(bp);
7207 bnx2x_reset_func(bp);
7208 bnx2x_reset_common(bp);
7209 break;
7210
7211 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7212 bnx2x_reset_port(bp);
7213 bnx2x_reset_func(bp);
7214 break;
7215
7216 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7217 bnx2x_reset_func(bp);
7218 break;
49d66772 7219
34f80b04
EG
7220 default:
7221 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7222 break;
7223 }
7224}
7225
33471629 7226/* must be called with rtnl_lock */
34f80b04 7227static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7228{
da5a662a 7229 int port = BP_PORT(bp);
a2fbb9ea 7230 u32 reset_code = 0;
da5a662a 7231 int i, cnt, rc;
a2fbb9ea
ET
7232
7233 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7234
228241eb
ET
7235 bp->rx_mode = BNX2X_RX_MODE_NONE;
7236 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7237
f8ef6e44 7238 bnx2x_netif_stop(bp, 1);
e94d8af3 7239
34f80b04
EG
7240 del_timer_sync(&bp->timer);
7241 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7242 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7243 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7244
70b9986c
EG
7245 /* Release IRQs */
7246 bnx2x_free_irq(bp);
7247
555f6c78
EG
7248 /* Wait until tx fastpath tasks complete */
7249 for_each_tx_queue(bp, i) {
228241eb
ET
7250 struct bnx2x_fastpath *fp = &bp->fp[i];
7251
34f80b04 7252 cnt = 1000;
e8b5fc51 7253 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7254
7961f791 7255 bnx2x_tx_int(fp);
34f80b04
EG
7256 if (!cnt) {
7257 BNX2X_ERR("timeout waiting for queue[%d]\n",
7258 i);
7259#ifdef BNX2X_STOP_ON_ERROR
7260 bnx2x_panic();
7261 return -EBUSY;
7262#else
7263 break;
7264#endif
7265 }
7266 cnt--;
da5a662a 7267 msleep(1);
34f80b04 7268 }
228241eb 7269 }
da5a662a
VZ
7270 /* Give HW time to discard old tx messages */
7271 msleep(1);
a2fbb9ea 7272
3101c2bc
YG
7273 if (CHIP_IS_E1(bp)) {
7274 struct mac_configuration_cmd *config =
7275 bnx2x_sp(bp, mcast_config);
7276
7277 bnx2x_set_mac_addr_e1(bp, 0);
7278
8d9c5f34 7279 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7280 CAM_INVALIDATE(config->config_table[i]);
7281
8d9c5f34 7282 config->hdr.length = i;
3101c2bc
YG
7283 if (CHIP_REV_IS_SLOW(bp))
7284 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7285 else
7286 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7287 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7288 config->hdr.reserved1 = 0;
7289
7290 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7291 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7292 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7293
7294 } else { /* E1H */
65abd74d
YG
7295 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7296
3101c2bc
YG
7297 bnx2x_set_mac_addr_e1h(bp, 0);
7298
7299 for (i = 0; i < MC_HASH_SIZE; i++)
7300 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7301 }
7302
65abd74d
YG
7303 if (unload_mode == UNLOAD_NORMAL)
7304 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7305
7306 else if (bp->flags & NO_WOL_FLAG) {
7307 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7308 if (CHIP_IS_E1H(bp))
7309 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7310
7311 } else if (bp->wol) {
7312 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7313 u8 *mac_addr = bp->dev->dev_addr;
7314 u32 val;
7315 /* The mac address is written to entries 1-4 to
7316 preserve entry 0 which is used by the PMF */
7317 u8 entry = (BP_E1HVN(bp) + 1)*8;
7318
7319 val = (mac_addr[0] << 8) | mac_addr[1];
7320 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7321
7322 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7323 (mac_addr[4] << 8) | mac_addr[5];
7324 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7325
7326 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7327
7328 } else
7329 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7330
34f80b04
EG
7331 /* Close multi and leading connections
7332 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7333 for_each_nondefault_queue(bp, i)
7334 if (bnx2x_stop_multi(bp, i))
228241eb 7335 goto unload_error;
a2fbb9ea 7336
da5a662a
VZ
7337 rc = bnx2x_stop_leading(bp);
7338 if (rc) {
34f80b04 7339 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7340#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7341 return -EBUSY;
da5a662a
VZ
7342#else
7343 goto unload_error;
34f80b04 7344#endif
228241eb
ET
7345 }
7346
7347unload_error:
34f80b04 7348 if (!BP_NOMCP(bp))
228241eb 7349 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7350 else {
f5372251 7351 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7352 load_count[0], load_count[1], load_count[2]);
7353 load_count[0]--;
da5a662a 7354 load_count[1 + port]--;
f5372251 7355 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7356 load_count[0], load_count[1], load_count[2]);
7357 if (load_count[0] == 0)
7358 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7359 else if (load_count[1 + port] == 0)
34f80b04
EG
7360 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7361 else
7362 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7363 }
a2fbb9ea 7364
34f80b04
EG
7365 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7366 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7367 bnx2x__link_reset(bp);
a2fbb9ea
ET
7368
7369 /* Reset the chip */
228241eb 7370 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7371
7372 /* Report UNLOAD_DONE to MCP */
34f80b04 7373 if (!BP_NOMCP(bp))
a2fbb9ea 7374 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7375
9a035440 7376 bp->port.pmf = 0;
a2fbb9ea 7377
7a9b2557 7378 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7379 bnx2x_free_skbs(bp);
555f6c78 7380 for_each_rx_queue(bp, i)
3196a88a 7381 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7382 for_each_rx_queue(bp, i)
7cde1c8b 7383 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7384 bnx2x_free_mem(bp);
7385
7386 bp->state = BNX2X_STATE_CLOSED;
228241eb 7387
a2fbb9ea
ET
7388 netif_carrier_off(bp->dev);
7389
7390 return 0;
7391}
7392
34f80b04
EG
7393static void bnx2x_reset_task(struct work_struct *work)
7394{
7395 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7396
7397#ifdef BNX2X_STOP_ON_ERROR
7398 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7399 " so reset not done to allow debug dump,\n"
7400 KERN_ERR " you will need to reboot when done\n");
7401 return;
7402#endif
7403
7404 rtnl_lock();
7405
7406 if (!netif_running(bp->dev))
7407 goto reset_task_exit;
7408
7409 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7410 bnx2x_nic_load(bp, LOAD_NORMAL);
7411
7412reset_task_exit:
7413 rtnl_unlock();
7414}
7415
a2fbb9ea
ET
7416/* end of nic load/unload */
7417
7418/* ethtool_ops */
7419
7420/*
7421 * Init service functions
7422 */
7423
f1ef27ef
EG
7424static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7425{
7426 switch (func) {
7427 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7428 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7429 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7430 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7431 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7432 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7433 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7434 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7435 default:
7436 BNX2X_ERR("Unsupported function index: %d\n", func);
7437 return (u32)(-1);
7438 }
7439}
7440
7441static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7442{
7443 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7444
7445 /* Flush all outstanding writes */
7446 mmiowb();
7447
7448 /* Pretend to be function 0 */
7449 REG_WR(bp, reg, 0);
7450 /* Flush the GRC transaction (in the chip) */
7451 new_val = REG_RD(bp, reg);
7452 if (new_val != 0) {
7453 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7454 new_val);
7455 BUG();
7456 }
7457
7458 /* From now we are in the "like-E1" mode */
7459 bnx2x_int_disable(bp);
7460
7461 /* Flush all outstanding writes */
7462 mmiowb();
7463
7464 /* Restore the original funtion settings */
7465 REG_WR(bp, reg, orig_func);
7466 new_val = REG_RD(bp, reg);
7467 if (new_val != orig_func) {
7468 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7469 orig_func, new_val);
7470 BUG();
7471 }
7472}
7473
7474static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7475{
7476 if (CHIP_IS_E1H(bp))
7477 bnx2x_undi_int_disable_e1h(bp, func);
7478 else
7479 bnx2x_int_disable(bp);
7480}
7481
34f80b04
EG
7482static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7483{
7484 u32 val;
7485
7486 /* Check if there is any driver already loaded */
7487 val = REG_RD(bp, MISC_REG_UNPREPARED);
7488 if (val == 0x1) {
7489 /* Check if it is the UNDI driver
7490 * UNDI driver initializes CID offset for normal bell to 0x7
7491 */
4a37fb66 7492 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7493 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7494 if (val == 0x7) {
7495 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7496 /* save our func */
34f80b04 7497 int func = BP_FUNC(bp);
da5a662a
VZ
7498 u32 swap_en;
7499 u32 swap_val;
34f80b04 7500
b4661739
EG
7501 /* clear the UNDI indication */
7502 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7503
34f80b04
EG
7504 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7505
7506 /* try unload UNDI on port 0 */
7507 bp->func = 0;
da5a662a
VZ
7508 bp->fw_seq =
7509 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7510 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7511 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7512
7513 /* if UNDI is loaded on the other port */
7514 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7515
da5a662a
VZ
7516 /* send "DONE" for previous unload */
7517 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7518
7519 /* unload UNDI on port 1 */
34f80b04 7520 bp->func = 1;
da5a662a
VZ
7521 bp->fw_seq =
7522 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7523 DRV_MSG_SEQ_NUMBER_MASK);
7524 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7525
7526 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7527 }
7528
b4661739
EG
7529 /* now it's safe to release the lock */
7530 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7531
f1ef27ef 7532 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7533
7534 /* close input traffic and wait for it */
7535 /* Do not rcv packets to BRB */
7536 REG_WR(bp,
7537 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7538 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7539 /* Do not direct rcv packets that are not for MCP to
7540 * the BRB */
7541 REG_WR(bp,
7542 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7543 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7544 /* clear AEU */
7545 REG_WR(bp,
7546 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7547 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7548 msleep(10);
7549
7550 /* save NIG port swap info */
7551 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7552 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7553 /* reset device */
7554 REG_WR(bp,
7555 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7556 0xd3ffffff);
34f80b04
EG
7557 REG_WR(bp,
7558 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7559 0x1403);
da5a662a
VZ
7560 /* take the NIG out of reset and restore swap values */
7561 REG_WR(bp,
7562 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7563 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7564 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7565 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7566
7567 /* send unload done to the MCP */
7568 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7569
7570 /* restore our func and fw_seq */
7571 bp->func = func;
7572 bp->fw_seq =
7573 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7574 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7575
7576 } else
7577 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7578 }
7579}
7580
7581static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7582{
7583 u32 val, val2, val3, val4, id;
72ce58c3 7584 u16 pmc;
34f80b04
EG
7585
7586 /* Get the chip revision id and number. */
7587 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7588 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7589 id = ((val & 0xffff) << 16);
7590 val = REG_RD(bp, MISC_REG_CHIP_REV);
7591 id |= ((val & 0xf) << 12);
7592 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7593 id |= ((val & 0xff) << 4);
5a40e08e 7594 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7595 id |= (val & 0xf);
7596 bp->common.chip_id = id;
7597 bp->link_params.chip_id = bp->common.chip_id;
7598 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7599
1c06328c
EG
7600 val = (REG_RD(bp, 0x2874) & 0x55);
7601 if ((bp->common.chip_id & 0x1) ||
7602 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7603 bp->flags |= ONE_PORT_FLAG;
7604 BNX2X_DEV_INFO("single port device\n");
7605 }
7606
34f80b04
EG
7607 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7608 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7609 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7610 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7611 bp->common.flash_size, bp->common.flash_size);
7612
7613 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7614 bp->link_params.shmem_base = bp->common.shmem_base;
7615 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7616
7617 if (!bp->common.shmem_base ||
7618 (bp->common.shmem_base < 0xA0000) ||
7619 (bp->common.shmem_base >= 0xC0000)) {
7620 BNX2X_DEV_INFO("MCP not active\n");
7621 bp->flags |= NO_MCP_FLAG;
7622 return;
7623 }
7624
7625 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7626 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7627 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7628 BNX2X_ERR("BAD MCP validity signature\n");
7629
7630 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7631 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7632
7633 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7634 SHARED_HW_CFG_LED_MODE_MASK) >>
7635 SHARED_HW_CFG_LED_MODE_SHIFT);
7636
c2c8b03e
EG
7637 bp->link_params.feature_config_flags = 0;
7638 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7639 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7640 bp->link_params.feature_config_flags |=
7641 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7642 else
7643 bp->link_params.feature_config_flags &=
7644 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7645
34f80b04
EG
7646 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7647 bp->common.bc_ver = val;
7648 BNX2X_DEV_INFO("bc_ver %X\n", val);
7649 if (val < BNX2X_BC_VER) {
7650 /* for now only warn
7651 * later we might need to enforce this */
7652 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7653 " please upgrade BC\n", BNX2X_BC_VER, val);
7654 }
72ce58c3
EG
7655
7656 if (BP_E1HVN(bp) == 0) {
7657 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7658 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7659 } else {
7660 /* no WOL capability for E1HVN != 0 */
7661 bp->flags |= NO_WOL_FLAG;
7662 }
7663 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7664 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7665
7666 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7667 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7668 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7669 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7670
7671 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7672 val, val2, val3, val4);
7673}
7674
7675static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7676 u32 switch_cfg)
a2fbb9ea 7677{
34f80b04 7678 int port = BP_PORT(bp);
a2fbb9ea
ET
7679 u32 ext_phy_type;
7680
a2fbb9ea
ET
7681 switch (switch_cfg) {
7682 case SWITCH_CFG_1G:
7683 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7684
c18487ee
YR
7685 ext_phy_type =
7686 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7687 switch (ext_phy_type) {
7688 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7689 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7690 ext_phy_type);
7691
34f80b04
EG
7692 bp->port.supported |= (SUPPORTED_10baseT_Half |
7693 SUPPORTED_10baseT_Full |
7694 SUPPORTED_100baseT_Half |
7695 SUPPORTED_100baseT_Full |
7696 SUPPORTED_1000baseT_Full |
7697 SUPPORTED_2500baseX_Full |
7698 SUPPORTED_TP |
7699 SUPPORTED_FIBRE |
7700 SUPPORTED_Autoneg |
7701 SUPPORTED_Pause |
7702 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7703 break;
7704
7705 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7706 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7707 ext_phy_type);
7708
34f80b04
EG
7709 bp->port.supported |= (SUPPORTED_10baseT_Half |
7710 SUPPORTED_10baseT_Full |
7711 SUPPORTED_100baseT_Half |
7712 SUPPORTED_100baseT_Full |
7713 SUPPORTED_1000baseT_Full |
7714 SUPPORTED_TP |
7715 SUPPORTED_FIBRE |
7716 SUPPORTED_Autoneg |
7717 SUPPORTED_Pause |
7718 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7719 break;
7720
7721 default:
7722 BNX2X_ERR("NVRAM config error. "
7723 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7724 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7725 return;
7726 }
7727
34f80b04
EG
7728 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7729 port*0x10);
7730 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7731 break;
7732
7733 case SWITCH_CFG_10G:
7734 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7735
c18487ee
YR
7736 ext_phy_type =
7737 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7738 switch (ext_phy_type) {
7739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7740 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7741 ext_phy_type);
7742
34f80b04
EG
7743 bp->port.supported |= (SUPPORTED_10baseT_Half |
7744 SUPPORTED_10baseT_Full |
7745 SUPPORTED_100baseT_Half |
7746 SUPPORTED_100baseT_Full |
7747 SUPPORTED_1000baseT_Full |
7748 SUPPORTED_2500baseX_Full |
7749 SUPPORTED_10000baseT_Full |
7750 SUPPORTED_TP |
7751 SUPPORTED_FIBRE |
7752 SUPPORTED_Autoneg |
7753 SUPPORTED_Pause |
7754 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7755 break;
7756
589abe3a
EG
7757 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7758 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7759 ext_phy_type);
f1410647 7760
34f80b04 7761 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7762 SUPPORTED_1000baseT_Full |
34f80b04 7763 SUPPORTED_FIBRE |
589abe3a 7764 SUPPORTED_Autoneg |
34f80b04
EG
7765 SUPPORTED_Pause |
7766 SUPPORTED_Asym_Pause);
f1410647
ET
7767 break;
7768
589abe3a
EG
7769 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7770 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7771 ext_phy_type);
7772
34f80b04 7773 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7774 SUPPORTED_2500baseX_Full |
34f80b04 7775 SUPPORTED_1000baseT_Full |
589abe3a
EG
7776 SUPPORTED_FIBRE |
7777 SUPPORTED_Autoneg |
7778 SUPPORTED_Pause |
7779 SUPPORTED_Asym_Pause);
7780 break;
7781
7782 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7783 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7784 ext_phy_type);
7785
7786 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7787 SUPPORTED_FIBRE |
7788 SUPPORTED_Pause |
7789 SUPPORTED_Asym_Pause);
f1410647
ET
7790 break;
7791
589abe3a
EG
7792 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7793 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7794 ext_phy_type);
7795
34f80b04
EG
7796 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7797 SUPPORTED_1000baseT_Full |
7798 SUPPORTED_FIBRE |
34f80b04
EG
7799 SUPPORTED_Pause |
7800 SUPPORTED_Asym_Pause);
f1410647
ET
7801 break;
7802
589abe3a
EG
7803 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7804 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7805 ext_phy_type);
7806
34f80b04 7807 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7808 SUPPORTED_1000baseT_Full |
34f80b04 7809 SUPPORTED_Autoneg |
589abe3a 7810 SUPPORTED_FIBRE |
34f80b04
EG
7811 SUPPORTED_Pause |
7812 SUPPORTED_Asym_Pause);
c18487ee
YR
7813 break;
7814
f1410647
ET
7815 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7816 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7817 ext_phy_type);
7818
34f80b04
EG
7819 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7820 SUPPORTED_TP |
7821 SUPPORTED_Autoneg |
7822 SUPPORTED_Pause |
7823 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7824 break;
7825
28577185
EG
7826 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7827 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7828 ext_phy_type);
7829
7830 bp->port.supported |= (SUPPORTED_10baseT_Half |
7831 SUPPORTED_10baseT_Full |
7832 SUPPORTED_100baseT_Half |
7833 SUPPORTED_100baseT_Full |
7834 SUPPORTED_1000baseT_Full |
7835 SUPPORTED_10000baseT_Full |
7836 SUPPORTED_TP |
7837 SUPPORTED_Autoneg |
7838 SUPPORTED_Pause |
7839 SUPPORTED_Asym_Pause);
7840 break;
7841
c18487ee
YR
7842 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7843 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7844 bp->link_params.ext_phy_config);
7845 break;
7846
a2fbb9ea
ET
7847 default:
7848 BNX2X_ERR("NVRAM config error. "
7849 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7850 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7851 return;
7852 }
7853
34f80b04
EG
7854 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7855 port*0x18);
7856 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7857
a2fbb9ea
ET
7858 break;
7859
7860 default:
7861 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7862 bp->port.link_config);
a2fbb9ea
ET
7863 return;
7864 }
34f80b04 7865 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7866
7867 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7868 if (!(bp->link_params.speed_cap_mask &
7869 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7870 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7871
c18487ee
YR
7872 if (!(bp->link_params.speed_cap_mask &
7873 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7874 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7875
c18487ee
YR
7876 if (!(bp->link_params.speed_cap_mask &
7877 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7878 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7879
c18487ee
YR
7880 if (!(bp->link_params.speed_cap_mask &
7881 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7882 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7883
c18487ee
YR
7884 if (!(bp->link_params.speed_cap_mask &
7885 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7886 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7887 SUPPORTED_1000baseT_Full);
a2fbb9ea 7888
c18487ee
YR
7889 if (!(bp->link_params.speed_cap_mask &
7890 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7891 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7892
c18487ee
YR
7893 if (!(bp->link_params.speed_cap_mask &
7894 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7895 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7896
34f80b04 7897 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7898}
7899
34f80b04 7900static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7901{
c18487ee 7902 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7903
34f80b04 7904 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7905 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7906 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7907 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7908 bp->port.advertising = bp->port.supported;
a2fbb9ea 7909 } else {
c18487ee
YR
7910 u32 ext_phy_type =
7911 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7912
7913 if ((ext_phy_type ==
7914 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7915 (ext_phy_type ==
7916 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7917 /* force 10G, no AN */
c18487ee 7918 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7919 bp->port.advertising =
a2fbb9ea
ET
7920 (ADVERTISED_10000baseT_Full |
7921 ADVERTISED_FIBRE);
7922 break;
7923 }
7924 BNX2X_ERR("NVRAM config error. "
7925 "Invalid link_config 0x%x"
7926 " Autoneg not supported\n",
34f80b04 7927 bp->port.link_config);
a2fbb9ea
ET
7928 return;
7929 }
7930 break;
7931
7932 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7933 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7934 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7935 bp->port.advertising = (ADVERTISED_10baseT_Full |
7936 ADVERTISED_TP);
a2fbb9ea
ET
7937 } else {
7938 BNX2X_ERR("NVRAM config error. "
7939 "Invalid link_config 0x%x"
7940 " speed_cap_mask 0x%x\n",
34f80b04 7941 bp->port.link_config,
c18487ee 7942 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7943 return;
7944 }
7945 break;
7946
7947 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7948 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7949 bp->link_params.req_line_speed = SPEED_10;
7950 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7951 bp->port.advertising = (ADVERTISED_10baseT_Half |
7952 ADVERTISED_TP);
a2fbb9ea
ET
7953 } else {
7954 BNX2X_ERR("NVRAM config error. "
7955 "Invalid link_config 0x%x"
7956 " speed_cap_mask 0x%x\n",
34f80b04 7957 bp->port.link_config,
c18487ee 7958 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7959 return;
7960 }
7961 break;
7962
7963 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7964 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7965 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7966 bp->port.advertising = (ADVERTISED_100baseT_Full |
7967 ADVERTISED_TP);
a2fbb9ea
ET
7968 } else {
7969 BNX2X_ERR("NVRAM config error. "
7970 "Invalid link_config 0x%x"
7971 " speed_cap_mask 0x%x\n",
34f80b04 7972 bp->port.link_config,
c18487ee 7973 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7974 return;
7975 }
7976 break;
7977
7978 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7979 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7980 bp->link_params.req_line_speed = SPEED_100;
7981 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7982 bp->port.advertising = (ADVERTISED_100baseT_Half |
7983 ADVERTISED_TP);
a2fbb9ea
ET
7984 } else {
7985 BNX2X_ERR("NVRAM config error. "
7986 "Invalid link_config 0x%x"
7987 " speed_cap_mask 0x%x\n",
34f80b04 7988 bp->port.link_config,
c18487ee 7989 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7990 return;
7991 }
7992 break;
7993
7994 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7995 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7996 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7997 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7998 ADVERTISED_TP);
a2fbb9ea
ET
7999 } else {
8000 BNX2X_ERR("NVRAM config error. "
8001 "Invalid link_config 0x%x"
8002 " speed_cap_mask 0x%x\n",
34f80b04 8003 bp->port.link_config,
c18487ee 8004 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8005 return;
8006 }
8007 break;
8008
8009 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8010 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8011 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8012 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8013 ADVERTISED_TP);
a2fbb9ea
ET
8014 } else {
8015 BNX2X_ERR("NVRAM config error. "
8016 "Invalid link_config 0x%x"
8017 " speed_cap_mask 0x%x\n",
34f80b04 8018 bp->port.link_config,
c18487ee 8019 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8020 return;
8021 }
8022 break;
8023
8024 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8025 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8026 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8027 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8028 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8029 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8030 ADVERTISED_FIBRE);
a2fbb9ea
ET
8031 } else {
8032 BNX2X_ERR("NVRAM config error. "
8033 "Invalid link_config 0x%x"
8034 " speed_cap_mask 0x%x\n",
34f80b04 8035 bp->port.link_config,
c18487ee 8036 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8037 return;
8038 }
8039 break;
8040
8041 default:
8042 BNX2X_ERR("NVRAM config error. "
8043 "BAD link speed link_config 0x%x\n",
34f80b04 8044 bp->port.link_config);
c18487ee 8045 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8046 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8047 break;
8048 }
a2fbb9ea 8049
34f80b04
EG
8050 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8051 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8052 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8053 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8054 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8055
c18487ee 8056 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8057 " advertising 0x%x\n",
c18487ee
YR
8058 bp->link_params.req_line_speed,
8059 bp->link_params.req_duplex,
34f80b04 8060 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8061}
8062
34f80b04 8063static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8064{
34f80b04
EG
8065 int port = BP_PORT(bp);
8066 u32 val, val2;
589abe3a 8067 u32 config;
c2c8b03e 8068 u16 i;
a2fbb9ea 8069
c18487ee 8070 bp->link_params.bp = bp;
34f80b04 8071 bp->link_params.port = port;
c18487ee 8072
c18487ee 8073 bp->link_params.lane_config =
a2fbb9ea 8074 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8075 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8076 SHMEM_RD(bp,
8077 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8078 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8079 SHMEM_RD(bp,
8080 dev_info.port_hw_config[port].speed_capability_mask);
8081
34f80b04 8082 bp->port.link_config =
a2fbb9ea
ET
8083 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8084
c2c8b03e
EG
8085 /* Get the 4 lanes xgxs config rx and tx */
8086 for (i = 0; i < 2; i++) {
8087 val = SHMEM_RD(bp,
8088 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8089 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8090 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8091
8092 val = SHMEM_RD(bp,
8093 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8094 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8095 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8096 }
8097
589abe3a
EG
8098 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8099 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8100 bp->link_params.feature_config_flags |=
8101 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8102 else
8103 bp->link_params.feature_config_flags &=
8104 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8105
3ce2c3f9
EG
8106 /* If the device is capable of WoL, set the default state according
8107 * to the HW
8108 */
8109 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8110 (config & PORT_FEATURE_WOL_ENABLED));
8111
c2c8b03e
EG
8112 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8113 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8114 bp->link_params.lane_config,
8115 bp->link_params.ext_phy_config,
34f80b04 8116 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8117
34f80b04 8118 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8119 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8120 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8121
8122 bnx2x_link_settings_requested(bp);
8123
8124 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8125 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8126 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8127 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8128 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8129 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8130 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8131 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8132 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8133 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8134}
8135
8136static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8137{
8138 int func = BP_FUNC(bp);
8139 u32 val, val2;
8140 int rc = 0;
a2fbb9ea 8141
34f80b04 8142 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8143
34f80b04
EG
8144 bp->e1hov = 0;
8145 bp->e1hmf = 0;
8146 if (CHIP_IS_E1H(bp)) {
8147 bp->mf_config =
8148 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8149
3196a88a
EG
8150 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8151 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8152 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8153
34f80b04
EG
8154 bp->e1hov = val;
8155 bp->e1hmf = 1;
8156 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8157 "(0x%04x)\n",
8158 func, bp->e1hov, bp->e1hov);
8159 } else {
f5372251 8160 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8161 if (BP_E1HVN(bp)) {
8162 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8163 " aborting\n", func);
8164 rc = -EPERM;
8165 }
8166 }
8167 }
a2fbb9ea 8168
34f80b04
EG
8169 if (!BP_NOMCP(bp)) {
8170 bnx2x_get_port_hwinfo(bp);
8171
8172 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8173 DRV_MSG_SEQ_NUMBER_MASK);
8174 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8175 }
8176
8177 if (IS_E1HMF(bp)) {
8178 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8179 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8180 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8181 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8182 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8183 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8184 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8185 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8186 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8187 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8188 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8189 ETH_ALEN);
8190 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8191 ETH_ALEN);
a2fbb9ea 8192 }
34f80b04
EG
8193
8194 return rc;
a2fbb9ea
ET
8195 }
8196
34f80b04
EG
8197 if (BP_NOMCP(bp)) {
8198 /* only supposed to happen on emulation/FPGA */
33471629 8199 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8200 random_ether_addr(bp->dev->dev_addr);
8201 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8202 }
a2fbb9ea 8203
34f80b04
EG
8204 return rc;
8205}
8206
8207static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8208{
8209 int func = BP_FUNC(bp);
87942b46 8210 int timer_interval;
34f80b04
EG
8211 int rc;
8212
da5a662a
VZ
8213 /* Disable interrupt handling until HW is initialized */
8214 atomic_set(&bp->intr_sem, 1);
8215
34f80b04 8216 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8217
1cf167f2 8218 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8219 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8220
8221 rc = bnx2x_get_hwinfo(bp);
8222
8223 /* need to reset chip if undi was active */
8224 if (!BP_NOMCP(bp))
8225 bnx2x_undi_unload(bp);
8226
8227 if (CHIP_REV_IS_FPGA(bp))
8228 printk(KERN_ERR PFX "FPGA detected\n");
8229
8230 if (BP_NOMCP(bp) && (func == 0))
8231 printk(KERN_ERR PFX
8232 "MCP disabled, must load devices in order!\n");
8233
555f6c78 8234 /* Set multi queue mode */
8badd27a
EG
8235 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8236 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8237 printk(KERN_ERR PFX
8badd27a 8238 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8239 multi_mode = ETH_RSS_MODE_DISABLED;
8240 }
8241 bp->multi_mode = multi_mode;
8242
8243
7a9b2557
VZ
8244 /* Set TPA flags */
8245 if (disable_tpa) {
8246 bp->flags &= ~TPA_ENABLE_FLAG;
8247 bp->dev->features &= ~NETIF_F_LRO;
8248 } else {
8249 bp->flags |= TPA_ENABLE_FLAG;
8250 bp->dev->features |= NETIF_F_LRO;
8251 }
8252
8d5726c4 8253 bp->mrrs = mrrs;
7a9b2557 8254
34f80b04
EG
8255 bp->tx_ring_size = MAX_TX_AVAIL;
8256 bp->rx_ring_size = MAX_RX_AVAIL;
8257
8258 bp->rx_csum = 1;
34f80b04
EG
8259
8260 bp->tx_ticks = 50;
8261 bp->rx_ticks = 25;
8262
87942b46
EG
8263 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8264 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8265
8266 init_timer(&bp->timer);
8267 bp->timer.expires = jiffies + bp->current_interval;
8268 bp->timer.data = (unsigned long) bp;
8269 bp->timer.function = bnx2x_timer;
8270
8271 return rc;
a2fbb9ea
ET
8272}
8273
8274/*
8275 * ethtool service functions
8276 */
8277
8278/* All ethtool functions called with rtnl_lock */
8279
8280static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8281{
8282 struct bnx2x *bp = netdev_priv(dev);
8283
34f80b04
EG
8284 cmd->supported = bp->port.supported;
8285 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8286
8287 if (netif_carrier_ok(dev)) {
c18487ee
YR
8288 cmd->speed = bp->link_vars.line_speed;
8289 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8290 } else {
c18487ee
YR
8291 cmd->speed = bp->link_params.req_line_speed;
8292 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8293 }
34f80b04
EG
8294 if (IS_E1HMF(bp)) {
8295 u16 vn_max_rate;
8296
8297 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8298 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8299 if (vn_max_rate < cmd->speed)
8300 cmd->speed = vn_max_rate;
8301 }
a2fbb9ea 8302
c18487ee
YR
8303 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8304 u32 ext_phy_type =
8305 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8306
8307 switch (ext_phy_type) {
8308 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8309 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8310 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8312 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8313 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8314 cmd->port = PORT_FIBRE;
8315 break;
8316
8317 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8318 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8319 cmd->port = PORT_TP;
8320 break;
8321
c18487ee
YR
8322 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8323 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8324 bp->link_params.ext_phy_config);
8325 break;
8326
f1410647
ET
8327 default:
8328 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8329 bp->link_params.ext_phy_config);
8330 break;
f1410647
ET
8331 }
8332 } else
a2fbb9ea 8333 cmd->port = PORT_TP;
a2fbb9ea 8334
34f80b04 8335 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8336 cmd->transceiver = XCVR_INTERNAL;
8337
c18487ee 8338 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8339 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8340 else
a2fbb9ea 8341 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8342
8343 cmd->maxtxpkt = 0;
8344 cmd->maxrxpkt = 0;
8345
8346 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8347 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8348 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8349 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8350 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8351 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8352 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8353
8354 return 0;
8355}
8356
8357static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8358{
8359 struct bnx2x *bp = netdev_priv(dev);
8360 u32 advertising;
8361
34f80b04
EG
8362 if (IS_E1HMF(bp))
8363 return 0;
8364
a2fbb9ea
ET
8365 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8366 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8367 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8368 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8369 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8370 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8371 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8372
a2fbb9ea 8373 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8374 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8375 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8376 return -EINVAL;
f1410647 8377 }
a2fbb9ea
ET
8378
8379 /* advertise the requested speed and duplex if supported */
34f80b04 8380 cmd->advertising &= bp->port.supported;
a2fbb9ea 8381
c18487ee
YR
8382 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8383 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8384 bp->port.advertising |= (ADVERTISED_Autoneg |
8385 cmd->advertising);
a2fbb9ea
ET
8386
8387 } else { /* forced speed */
8388 /* advertise the requested speed and duplex if supported */
8389 switch (cmd->speed) {
8390 case SPEED_10:
8391 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8392 if (!(bp->port.supported &
f1410647
ET
8393 SUPPORTED_10baseT_Full)) {
8394 DP(NETIF_MSG_LINK,
8395 "10M full not supported\n");
a2fbb9ea 8396 return -EINVAL;
f1410647 8397 }
a2fbb9ea
ET
8398
8399 advertising = (ADVERTISED_10baseT_Full |
8400 ADVERTISED_TP);
8401 } else {
34f80b04 8402 if (!(bp->port.supported &
f1410647
ET
8403 SUPPORTED_10baseT_Half)) {
8404 DP(NETIF_MSG_LINK,
8405 "10M half not supported\n");
a2fbb9ea 8406 return -EINVAL;
f1410647 8407 }
a2fbb9ea
ET
8408
8409 advertising = (ADVERTISED_10baseT_Half |
8410 ADVERTISED_TP);
8411 }
8412 break;
8413
8414 case SPEED_100:
8415 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8416 if (!(bp->port.supported &
f1410647
ET
8417 SUPPORTED_100baseT_Full)) {
8418 DP(NETIF_MSG_LINK,
8419 "100M full not supported\n");
a2fbb9ea 8420 return -EINVAL;
f1410647 8421 }
a2fbb9ea
ET
8422
8423 advertising = (ADVERTISED_100baseT_Full |
8424 ADVERTISED_TP);
8425 } else {
34f80b04 8426 if (!(bp->port.supported &
f1410647
ET
8427 SUPPORTED_100baseT_Half)) {
8428 DP(NETIF_MSG_LINK,
8429 "100M half not supported\n");
a2fbb9ea 8430 return -EINVAL;
f1410647 8431 }
a2fbb9ea
ET
8432
8433 advertising = (ADVERTISED_100baseT_Half |
8434 ADVERTISED_TP);
8435 }
8436 break;
8437
8438 case SPEED_1000:
f1410647
ET
8439 if (cmd->duplex != DUPLEX_FULL) {
8440 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8441 return -EINVAL;
f1410647 8442 }
a2fbb9ea 8443
34f80b04 8444 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8445 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8446 return -EINVAL;
f1410647 8447 }
a2fbb9ea
ET
8448
8449 advertising = (ADVERTISED_1000baseT_Full |
8450 ADVERTISED_TP);
8451 break;
8452
8453 case SPEED_2500:
f1410647
ET
8454 if (cmd->duplex != DUPLEX_FULL) {
8455 DP(NETIF_MSG_LINK,
8456 "2.5G half not supported\n");
a2fbb9ea 8457 return -EINVAL;
f1410647 8458 }
a2fbb9ea 8459
34f80b04 8460 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8461 DP(NETIF_MSG_LINK,
8462 "2.5G full not supported\n");
a2fbb9ea 8463 return -EINVAL;
f1410647 8464 }
a2fbb9ea 8465
f1410647 8466 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8467 ADVERTISED_TP);
8468 break;
8469
8470 case SPEED_10000:
f1410647
ET
8471 if (cmd->duplex != DUPLEX_FULL) {
8472 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8473 return -EINVAL;
f1410647 8474 }
a2fbb9ea 8475
34f80b04 8476 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8477 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8478 return -EINVAL;
f1410647 8479 }
a2fbb9ea
ET
8480
8481 advertising = (ADVERTISED_10000baseT_Full |
8482 ADVERTISED_FIBRE);
8483 break;
8484
8485 default:
f1410647 8486 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8487 return -EINVAL;
8488 }
8489
c18487ee
YR
8490 bp->link_params.req_line_speed = cmd->speed;
8491 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8492 bp->port.advertising = advertising;
a2fbb9ea
ET
8493 }
8494
c18487ee 8495 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8496 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8497 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8498 bp->port.advertising);
a2fbb9ea 8499
34f80b04 8500 if (netif_running(dev)) {
bb2a0f7a 8501 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8502 bnx2x_link_set(bp);
8503 }
a2fbb9ea
ET
8504
8505 return 0;
8506}
8507
c18487ee
YR
8508#define PHY_FW_VER_LEN 10
8509
a2fbb9ea
ET
8510static void bnx2x_get_drvinfo(struct net_device *dev,
8511 struct ethtool_drvinfo *info)
8512{
8513 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8514 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8515
8516 strcpy(info->driver, DRV_MODULE_NAME);
8517 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8518
8519 phy_fw_ver[0] = '\0';
34f80b04 8520 if (bp->port.pmf) {
4a37fb66 8521 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8522 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8523 (bp->state != BNX2X_STATE_CLOSED),
8524 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8525 bnx2x_release_phy_lock(bp);
34f80b04 8526 }
c18487ee 8527
f0e53a84
EG
8528 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8529 (bp->common.bc_ver & 0xff0000) >> 16,
8530 (bp->common.bc_ver & 0xff00) >> 8,
8531 (bp->common.bc_ver & 0xff),
8532 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8533 strcpy(info->bus_info, pci_name(bp->pdev));
8534 info->n_stats = BNX2X_NUM_STATS;
8535 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8536 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8537 info->regdump_len = 0;
8538}
8539
0a64ea57
EG
8540#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8541#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8542
8543static int bnx2x_get_regs_len(struct net_device *dev)
8544{
8545 static u32 regdump_len;
8546 struct bnx2x *bp = netdev_priv(dev);
8547 int i;
8548
8549 if (regdump_len)
8550 return regdump_len;
8551
8552 if (CHIP_IS_E1(bp)) {
8553 for (i = 0; i < REGS_COUNT; i++)
8554 if (IS_E1_ONLINE(reg_addrs[i].info))
8555 regdump_len += reg_addrs[i].size;
8556
8557 for (i = 0; i < WREGS_COUNT_E1; i++)
8558 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8559 regdump_len += wreg_addrs_e1[i].size *
8560 (1 + wreg_addrs_e1[i].read_regs_count);
8561
8562 } else { /* E1H */
8563 for (i = 0; i < REGS_COUNT; i++)
8564 if (IS_E1H_ONLINE(reg_addrs[i].info))
8565 regdump_len += reg_addrs[i].size;
8566
8567 for (i = 0; i < WREGS_COUNT_E1H; i++)
8568 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8569 regdump_len += wreg_addrs_e1h[i].size *
8570 (1 + wreg_addrs_e1h[i].read_regs_count);
8571 }
8572 regdump_len *= 4;
8573 regdump_len += sizeof(struct dump_hdr);
8574
8575 return regdump_len;
8576}
8577
8578static void bnx2x_get_regs(struct net_device *dev,
8579 struct ethtool_regs *regs, void *_p)
8580{
8581 u32 *p = _p, i, j;
8582 struct bnx2x *bp = netdev_priv(dev);
8583 struct dump_hdr dump_hdr = {0};
8584
8585 regs->version = 0;
8586 memset(p, 0, regs->len);
8587
8588 if (!netif_running(bp->dev))
8589 return;
8590
8591 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8592 dump_hdr.dump_sign = dump_sign_all;
8593 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8594 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8595 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8596 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8597 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8598
8599 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8600 p += dump_hdr.hdr_size + 1;
8601
8602 if (CHIP_IS_E1(bp)) {
8603 for (i = 0; i < REGS_COUNT; i++)
8604 if (IS_E1_ONLINE(reg_addrs[i].info))
8605 for (j = 0; j < reg_addrs[i].size; j++)
8606 *p++ = REG_RD(bp,
8607 reg_addrs[i].addr + j*4);
8608
8609 } else { /* E1H */
8610 for (i = 0; i < REGS_COUNT; i++)
8611 if (IS_E1H_ONLINE(reg_addrs[i].info))
8612 for (j = 0; j < reg_addrs[i].size; j++)
8613 *p++ = REG_RD(bp,
8614 reg_addrs[i].addr + j*4);
8615 }
8616}
8617
a2fbb9ea
ET
8618static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8619{
8620 struct bnx2x *bp = netdev_priv(dev);
8621
8622 if (bp->flags & NO_WOL_FLAG) {
8623 wol->supported = 0;
8624 wol->wolopts = 0;
8625 } else {
8626 wol->supported = WAKE_MAGIC;
8627 if (bp->wol)
8628 wol->wolopts = WAKE_MAGIC;
8629 else
8630 wol->wolopts = 0;
8631 }
8632 memset(&wol->sopass, 0, sizeof(wol->sopass));
8633}
8634
8635static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8636{
8637 struct bnx2x *bp = netdev_priv(dev);
8638
8639 if (wol->wolopts & ~WAKE_MAGIC)
8640 return -EINVAL;
8641
8642 if (wol->wolopts & WAKE_MAGIC) {
8643 if (bp->flags & NO_WOL_FLAG)
8644 return -EINVAL;
8645
8646 bp->wol = 1;
34f80b04 8647 } else
a2fbb9ea 8648 bp->wol = 0;
34f80b04 8649
a2fbb9ea
ET
8650 return 0;
8651}
8652
8653static u32 bnx2x_get_msglevel(struct net_device *dev)
8654{
8655 struct bnx2x *bp = netdev_priv(dev);
8656
8657 return bp->msglevel;
8658}
8659
8660static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8661{
8662 struct bnx2x *bp = netdev_priv(dev);
8663
8664 if (capable(CAP_NET_ADMIN))
8665 bp->msglevel = level;
8666}
8667
8668static int bnx2x_nway_reset(struct net_device *dev)
8669{
8670 struct bnx2x *bp = netdev_priv(dev);
8671
34f80b04
EG
8672 if (!bp->port.pmf)
8673 return 0;
a2fbb9ea 8674
34f80b04 8675 if (netif_running(dev)) {
bb2a0f7a 8676 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8677 bnx2x_link_set(bp);
8678 }
a2fbb9ea
ET
8679
8680 return 0;
8681}
8682
01e53298
NO
8683static u32
8684bnx2x_get_link(struct net_device *dev)
8685{
8686 struct bnx2x *bp = netdev_priv(dev);
8687
8688 return bp->link_vars.link_up;
8689}
8690
a2fbb9ea
ET
8691static int bnx2x_get_eeprom_len(struct net_device *dev)
8692{
8693 struct bnx2x *bp = netdev_priv(dev);
8694
34f80b04 8695 return bp->common.flash_size;
a2fbb9ea
ET
8696}
8697
8698static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8699{
34f80b04 8700 int port = BP_PORT(bp);
a2fbb9ea
ET
8701 int count, i;
8702 u32 val = 0;
8703
8704 /* adjust timeout for emulation/FPGA */
8705 count = NVRAM_TIMEOUT_COUNT;
8706 if (CHIP_REV_IS_SLOW(bp))
8707 count *= 100;
8708
8709 /* request access to nvram interface */
8710 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8711 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8712
8713 for (i = 0; i < count*10; i++) {
8714 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8715 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8716 break;
8717
8718 udelay(5);
8719 }
8720
8721 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8722 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8723 return -EBUSY;
8724 }
8725
8726 return 0;
8727}
8728
8729static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8730{
34f80b04 8731 int port = BP_PORT(bp);
a2fbb9ea
ET
8732 int count, i;
8733 u32 val = 0;
8734
8735 /* adjust timeout for emulation/FPGA */
8736 count = NVRAM_TIMEOUT_COUNT;
8737 if (CHIP_REV_IS_SLOW(bp))
8738 count *= 100;
8739
8740 /* relinquish nvram interface */
8741 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8742 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8743
8744 for (i = 0; i < count*10; i++) {
8745 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8746 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8747 break;
8748
8749 udelay(5);
8750 }
8751
8752 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8753 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8754 return -EBUSY;
8755 }
8756
8757 return 0;
8758}
8759
8760static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8761{
8762 u32 val;
8763
8764 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8765
8766 /* enable both bits, even on read */
8767 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8768 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8769 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8770}
8771
8772static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8773{
8774 u32 val;
8775
8776 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8777
8778 /* disable both bits, even after read */
8779 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8780 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8781 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8782}
8783
4781bfad 8784static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8785 u32 cmd_flags)
8786{
f1410647 8787 int count, i, rc;
a2fbb9ea
ET
8788 u32 val;
8789
8790 /* build the command word */
8791 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8792
8793 /* need to clear DONE bit separately */
8794 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8795
8796 /* address of the NVRAM to read from */
8797 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8798 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8799
8800 /* issue a read command */
8801 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8802
8803 /* adjust timeout for emulation/FPGA */
8804 count = NVRAM_TIMEOUT_COUNT;
8805 if (CHIP_REV_IS_SLOW(bp))
8806 count *= 100;
8807
8808 /* wait for completion */
8809 *ret_val = 0;
8810 rc = -EBUSY;
8811 for (i = 0; i < count; i++) {
8812 udelay(5);
8813 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8814
8815 if (val & MCPR_NVM_COMMAND_DONE) {
8816 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8817 /* we read nvram data in cpu order
8818 * but ethtool sees it as an array of bytes
8819 * converting to big-endian will do the work */
4781bfad 8820 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8821 rc = 0;
8822 break;
8823 }
8824 }
8825
8826 return rc;
8827}
8828
8829static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8830 int buf_size)
8831{
8832 int rc;
8833 u32 cmd_flags;
4781bfad 8834 __be32 val;
a2fbb9ea
ET
8835
8836 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8837 DP(BNX2X_MSG_NVM,
c14423fe 8838 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8839 offset, buf_size);
8840 return -EINVAL;
8841 }
8842
34f80b04
EG
8843 if (offset + buf_size > bp->common.flash_size) {
8844 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8845 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8846 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8847 return -EINVAL;
8848 }
8849
8850 /* request access to nvram interface */
8851 rc = bnx2x_acquire_nvram_lock(bp);
8852 if (rc)
8853 return rc;
8854
8855 /* enable access to nvram interface */
8856 bnx2x_enable_nvram_access(bp);
8857
8858 /* read the first word(s) */
8859 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8860 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8861 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8862 memcpy(ret_buf, &val, 4);
8863
8864 /* advance to the next dword */
8865 offset += sizeof(u32);
8866 ret_buf += sizeof(u32);
8867 buf_size -= sizeof(u32);
8868 cmd_flags = 0;
8869 }
8870
8871 if (rc == 0) {
8872 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8873 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8874 memcpy(ret_buf, &val, 4);
8875 }
8876
8877 /* disable access to nvram interface */
8878 bnx2x_disable_nvram_access(bp);
8879 bnx2x_release_nvram_lock(bp);
8880
8881 return rc;
8882}
8883
8884static int bnx2x_get_eeprom(struct net_device *dev,
8885 struct ethtool_eeprom *eeprom, u8 *eebuf)
8886{
8887 struct bnx2x *bp = netdev_priv(dev);
8888 int rc;
8889
2add3acb
EG
8890 if (!netif_running(dev))
8891 return -EAGAIN;
8892
34f80b04 8893 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8894 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8895 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8896 eeprom->len, eeprom->len);
8897
8898 /* parameters already validated in ethtool_get_eeprom */
8899
8900 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8901
8902 return rc;
8903}
8904
8905static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8906 u32 cmd_flags)
8907{
f1410647 8908 int count, i, rc;
a2fbb9ea
ET
8909
8910 /* build the command word */
8911 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8912
8913 /* need to clear DONE bit separately */
8914 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8915
8916 /* write the data */
8917 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8918
8919 /* address of the NVRAM to write to */
8920 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8921 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8922
8923 /* issue the write command */
8924 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8925
8926 /* adjust timeout for emulation/FPGA */
8927 count = NVRAM_TIMEOUT_COUNT;
8928 if (CHIP_REV_IS_SLOW(bp))
8929 count *= 100;
8930
8931 /* wait for completion */
8932 rc = -EBUSY;
8933 for (i = 0; i < count; i++) {
8934 udelay(5);
8935 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8936 if (val & MCPR_NVM_COMMAND_DONE) {
8937 rc = 0;
8938 break;
8939 }
8940 }
8941
8942 return rc;
8943}
8944
f1410647 8945#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8946
8947static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8948 int buf_size)
8949{
8950 int rc;
8951 u32 cmd_flags;
8952 u32 align_offset;
4781bfad 8953 __be32 val;
a2fbb9ea 8954
34f80b04
EG
8955 if (offset + buf_size > bp->common.flash_size) {
8956 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8957 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8958 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8959 return -EINVAL;
8960 }
8961
8962 /* request access to nvram interface */
8963 rc = bnx2x_acquire_nvram_lock(bp);
8964 if (rc)
8965 return rc;
8966
8967 /* enable access to nvram interface */
8968 bnx2x_enable_nvram_access(bp);
8969
8970 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8971 align_offset = (offset & ~0x03);
8972 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8973
8974 if (rc == 0) {
8975 val &= ~(0xff << BYTE_OFFSET(offset));
8976 val |= (*data_buf << BYTE_OFFSET(offset));
8977
8978 /* nvram data is returned as an array of bytes
8979 * convert it back to cpu order */
8980 val = be32_to_cpu(val);
8981
a2fbb9ea
ET
8982 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8983 cmd_flags);
8984 }
8985
8986 /* disable access to nvram interface */
8987 bnx2x_disable_nvram_access(bp);
8988 bnx2x_release_nvram_lock(bp);
8989
8990 return rc;
8991}
8992
8993static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8994 int buf_size)
8995{
8996 int rc;
8997 u32 cmd_flags;
8998 u32 val;
8999 u32 written_so_far;
9000
34f80b04 9001 if (buf_size == 1) /* ethtool */
a2fbb9ea 9002 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9003
9004 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9005 DP(BNX2X_MSG_NVM,
c14423fe 9006 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9007 offset, buf_size);
9008 return -EINVAL;
9009 }
9010
34f80b04
EG
9011 if (offset + buf_size > bp->common.flash_size) {
9012 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9013 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9014 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9015 return -EINVAL;
9016 }
9017
9018 /* request access to nvram interface */
9019 rc = bnx2x_acquire_nvram_lock(bp);
9020 if (rc)
9021 return rc;
9022
9023 /* enable access to nvram interface */
9024 bnx2x_enable_nvram_access(bp);
9025
9026 written_so_far = 0;
9027 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9028 while ((written_so_far < buf_size) && (rc == 0)) {
9029 if (written_so_far == (buf_size - sizeof(u32)))
9030 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9031 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9032 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9033 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9034 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9035
9036 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9037
9038 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9039
9040 /* advance to the next dword */
9041 offset += sizeof(u32);
9042 data_buf += sizeof(u32);
9043 written_so_far += sizeof(u32);
9044 cmd_flags = 0;
9045 }
9046
9047 /* disable access to nvram interface */
9048 bnx2x_disable_nvram_access(bp);
9049 bnx2x_release_nvram_lock(bp);
9050
9051 return rc;
9052}
9053
9054static int bnx2x_set_eeprom(struct net_device *dev,
9055 struct ethtool_eeprom *eeprom, u8 *eebuf)
9056{
9057 struct bnx2x *bp = netdev_priv(dev);
9058 int rc;
9059
9f4c9583
EG
9060 if (!netif_running(dev))
9061 return -EAGAIN;
9062
34f80b04 9063 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9064 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9065 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9066 eeprom->len, eeprom->len);
9067
9068 /* parameters already validated in ethtool_set_eeprom */
9069
c18487ee 9070 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
9071 if (eeprom->magic == 0x00504859)
9072 if (bp->port.pmf) {
9073
4a37fb66 9074 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9075 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9076 bp->link_params.ext_phy_config,
9077 (bp->state != BNX2X_STATE_CLOSED),
9078 eebuf, eeprom->len);
bb2a0f7a
YG
9079 if ((bp->state == BNX2X_STATE_OPEN) ||
9080 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9081 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9082 &bp->link_vars, 1);
34f80b04
EG
9083 rc |= bnx2x_phy_init(&bp->link_params,
9084 &bp->link_vars);
bb2a0f7a 9085 }
4a37fb66 9086 bnx2x_release_phy_lock(bp);
34f80b04
EG
9087
9088 } else /* Only the PMF can access the PHY */
9089 return -EINVAL;
9090 else
c18487ee 9091 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9092
9093 return rc;
9094}
9095
9096static int bnx2x_get_coalesce(struct net_device *dev,
9097 struct ethtool_coalesce *coal)
9098{
9099 struct bnx2x *bp = netdev_priv(dev);
9100
9101 memset(coal, 0, sizeof(struct ethtool_coalesce));
9102
9103 coal->rx_coalesce_usecs = bp->rx_ticks;
9104 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9105
9106 return 0;
9107}
9108
9109static int bnx2x_set_coalesce(struct net_device *dev,
9110 struct ethtool_coalesce *coal)
9111{
9112 struct bnx2x *bp = netdev_priv(dev);
9113
9114 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
1e9d9987
EG
9115 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9116 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea
ET
9117
9118 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
1e9d9987
EG
9119 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9120 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 9121
34f80b04 9122 if (netif_running(dev))
a2fbb9ea
ET
9123 bnx2x_update_coalesce(bp);
9124
9125 return 0;
9126}
9127
9128static void bnx2x_get_ringparam(struct net_device *dev,
9129 struct ethtool_ringparam *ering)
9130{
9131 struct bnx2x *bp = netdev_priv(dev);
9132
9133 ering->rx_max_pending = MAX_RX_AVAIL;
9134 ering->rx_mini_max_pending = 0;
9135 ering->rx_jumbo_max_pending = 0;
9136
9137 ering->rx_pending = bp->rx_ring_size;
9138 ering->rx_mini_pending = 0;
9139 ering->rx_jumbo_pending = 0;
9140
9141 ering->tx_max_pending = MAX_TX_AVAIL;
9142 ering->tx_pending = bp->tx_ring_size;
9143}
9144
9145static int bnx2x_set_ringparam(struct net_device *dev,
9146 struct ethtool_ringparam *ering)
9147{
9148 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9149 int rc = 0;
a2fbb9ea
ET
9150
9151 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9152 (ering->tx_pending > MAX_TX_AVAIL) ||
9153 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9154 return -EINVAL;
9155
9156 bp->rx_ring_size = ering->rx_pending;
9157 bp->tx_ring_size = ering->tx_pending;
9158
34f80b04
EG
9159 if (netif_running(dev)) {
9160 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9161 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9162 }
9163
34f80b04 9164 return rc;
a2fbb9ea
ET
9165}
9166
9167static void bnx2x_get_pauseparam(struct net_device *dev,
9168 struct ethtool_pauseparam *epause)
9169{
9170 struct bnx2x *bp = netdev_priv(dev);
9171
356e2385
EG
9172 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9173 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9174 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9175
c0700f90
DM
9176 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9177 BNX2X_FLOW_CTRL_RX);
9178 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9179 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9180
9181 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9182 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9183 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9184}
9185
9186static int bnx2x_set_pauseparam(struct net_device *dev,
9187 struct ethtool_pauseparam *epause)
9188{
9189 struct bnx2x *bp = netdev_priv(dev);
9190
34f80b04
EG
9191 if (IS_E1HMF(bp))
9192 return 0;
9193
a2fbb9ea
ET
9194 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9195 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9196 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9197
c0700f90 9198 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9199
f1410647 9200 if (epause->rx_pause)
c0700f90 9201 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9202
f1410647 9203 if (epause->tx_pause)
c0700f90 9204 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9205
c0700f90
DM
9206 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9207 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9208
c18487ee 9209 if (epause->autoneg) {
34f80b04 9210 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9211 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9212 return -EINVAL;
9213 }
a2fbb9ea 9214
c18487ee 9215 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9216 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9217 }
a2fbb9ea 9218
c18487ee
YR
9219 DP(NETIF_MSG_LINK,
9220 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9221
9222 if (netif_running(dev)) {
bb2a0f7a 9223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9224 bnx2x_link_set(bp);
9225 }
a2fbb9ea
ET
9226
9227 return 0;
9228}
9229
df0f2343
VZ
9230static int bnx2x_set_flags(struct net_device *dev, u32 data)
9231{
9232 struct bnx2x *bp = netdev_priv(dev);
9233 int changed = 0;
9234 int rc = 0;
9235
9236 /* TPA requires Rx CSUM offloading */
9237 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9238 if (!(dev->features & NETIF_F_LRO)) {
9239 dev->features |= NETIF_F_LRO;
9240 bp->flags |= TPA_ENABLE_FLAG;
9241 changed = 1;
9242 }
9243
9244 } else if (dev->features & NETIF_F_LRO) {
9245 dev->features &= ~NETIF_F_LRO;
9246 bp->flags &= ~TPA_ENABLE_FLAG;
9247 changed = 1;
9248 }
9249
9250 if (changed && netif_running(dev)) {
9251 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9252 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9253 }
9254
9255 return rc;
9256}
9257
a2fbb9ea
ET
9258static u32 bnx2x_get_rx_csum(struct net_device *dev)
9259{
9260 struct bnx2x *bp = netdev_priv(dev);
9261
9262 return bp->rx_csum;
9263}
9264
9265static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9266{
9267 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9268 int rc = 0;
a2fbb9ea
ET
9269
9270 bp->rx_csum = data;
df0f2343
VZ
9271
9272 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9273 TPA'ed packets will be discarded due to wrong TCP CSUM */
9274 if (!data) {
9275 u32 flags = ethtool_op_get_flags(dev);
9276
9277 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9278 }
9279
9280 return rc;
a2fbb9ea
ET
9281}
9282
9283static int bnx2x_set_tso(struct net_device *dev, u32 data)
9284{
755735eb 9285 if (data) {
a2fbb9ea 9286 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9287 dev->features |= NETIF_F_TSO6;
9288 } else {
a2fbb9ea 9289 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9290 dev->features &= ~NETIF_F_TSO6;
9291 }
9292
a2fbb9ea
ET
9293 return 0;
9294}
9295
f3c87cdd 9296static const struct {
a2fbb9ea
ET
9297 char string[ETH_GSTRING_LEN];
9298} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9299 { "register_test (offline)" },
9300 { "memory_test (offline)" },
9301 { "loopback_test (offline)" },
9302 { "nvram_test (online)" },
9303 { "interrupt_test (online)" },
9304 { "link_test (online)" },
d3d4f495 9305 { "idle check (online)" }
a2fbb9ea
ET
9306};
9307
9308static int bnx2x_self_test_count(struct net_device *dev)
9309{
9310 return BNX2X_NUM_TESTS;
9311}
9312
f3c87cdd
YG
9313static int bnx2x_test_registers(struct bnx2x *bp)
9314{
9315 int idx, i, rc = -ENODEV;
9316 u32 wr_val = 0;
9dabc424 9317 int port = BP_PORT(bp);
f3c87cdd
YG
9318 static const struct {
9319 u32 offset0;
9320 u32 offset1;
9321 u32 mask;
9322 } reg_tbl[] = {
9323/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9324 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9325 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9326 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9327 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9328 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9329 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9330 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9331 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9332 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9333/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9334 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9335 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9336 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9337 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9338 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9339 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9340 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9341 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9342 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9343/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9344 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9345 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9346 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9347 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9348 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9349 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9350 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9351 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9352 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9353/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9354 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9355 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9356 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9357 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9358 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9359 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9360 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9361
9362 { 0xffffffff, 0, 0x00000000 }
9363 };
9364
9365 if (!netif_running(bp->dev))
9366 return rc;
9367
9368 /* Repeat the test twice:
9369 First by writing 0x00000000, second by writing 0xffffffff */
9370 for (idx = 0; idx < 2; idx++) {
9371
9372 switch (idx) {
9373 case 0:
9374 wr_val = 0;
9375 break;
9376 case 1:
9377 wr_val = 0xffffffff;
9378 break;
9379 }
9380
9381 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9382 u32 offset, mask, save_val, val;
f3c87cdd
YG
9383
9384 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9385 mask = reg_tbl[i].mask;
9386
9387 save_val = REG_RD(bp, offset);
9388
9389 REG_WR(bp, offset, wr_val);
9390 val = REG_RD(bp, offset);
9391
9392 /* Restore the original register's value */
9393 REG_WR(bp, offset, save_val);
9394
9395 /* verify that value is as expected value */
9396 if ((val & mask) != (wr_val & mask))
9397 goto test_reg_exit;
9398 }
9399 }
9400
9401 rc = 0;
9402
9403test_reg_exit:
9404 return rc;
9405}
9406
9407static int bnx2x_test_memory(struct bnx2x *bp)
9408{
9409 int i, j, rc = -ENODEV;
9410 u32 val;
9411 static const struct {
9412 u32 offset;
9413 int size;
9414 } mem_tbl[] = {
9415 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9416 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9417 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9418 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9419 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9420 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9421 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9422
9423 { 0xffffffff, 0 }
9424 };
9425 static const struct {
9426 char *name;
9427 u32 offset;
9dabc424
YG
9428 u32 e1_mask;
9429 u32 e1h_mask;
f3c87cdd 9430 } prty_tbl[] = {
9dabc424
YG
9431 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9432 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9433 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9434 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9435 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9436 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9437
9438 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9439 };
9440
9441 if (!netif_running(bp->dev))
9442 return rc;
9443
9444 /* Go through all the memories */
9445 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9446 for (j = 0; j < mem_tbl[i].size; j++)
9447 REG_RD(bp, mem_tbl[i].offset + j*4);
9448
9449 /* Check the parity status */
9450 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9451 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9452 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9453 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9454 DP(NETIF_MSG_HW,
9455 "%s is 0x%x\n", prty_tbl[i].name, val);
9456 goto test_mem_exit;
9457 }
9458 }
9459
9460 rc = 0;
9461
9462test_mem_exit:
9463 return rc;
9464}
9465
f3c87cdd
YG
9466static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9467{
9468 int cnt = 1000;
9469
9470 if (link_up)
9471 while (bnx2x_link_test(bp) && cnt--)
9472 msleep(10);
9473}
9474
9475static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9476{
9477 unsigned int pkt_size, num_pkts, i;
9478 struct sk_buff *skb;
9479 unsigned char *packet;
9480 struct bnx2x_fastpath *fp = &bp->fp[0];
9481 u16 tx_start_idx, tx_idx;
9482 u16 rx_start_idx, rx_idx;
9483 u16 pkt_prod;
9484 struct sw_tx_bd *tx_buf;
9485 struct eth_tx_bd *tx_bd;
9486 dma_addr_t mapping;
9487 union eth_rx_cqe *cqe;
9488 u8 cqe_fp_flags;
9489 struct sw_rx_bd *rx_buf;
9490 u16 len;
9491 int rc = -ENODEV;
9492
b5bf9068
EG
9493 /* check the loopback mode */
9494 switch (loopback_mode) {
9495 case BNX2X_PHY_LOOPBACK:
9496 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9497 return -EINVAL;
9498 break;
9499 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9500 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9501 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9502 break;
9503 default:
f3c87cdd 9504 return -EINVAL;
b5bf9068 9505 }
f3c87cdd 9506
b5bf9068
EG
9507 /* prepare the loopback packet */
9508 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9509 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9510 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9511 if (!skb) {
9512 rc = -ENOMEM;
9513 goto test_loopback_exit;
9514 }
9515 packet = skb_put(skb, pkt_size);
9516 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9517 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9518 for (i = ETH_HLEN; i < pkt_size; i++)
9519 packet[i] = (unsigned char) (i & 0xff);
9520
b5bf9068 9521 /* send the loopback packet */
f3c87cdd
YG
9522 num_pkts = 0;
9523 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9524 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9525
9526 pkt_prod = fp->tx_pkt_prod++;
9527 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9528 tx_buf->first_bd = fp->tx_bd_prod;
9529 tx_buf->skb = skb;
9530
9531 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9532 mapping = pci_map_single(bp->pdev, skb->data,
9533 skb_headlen(skb), PCI_DMA_TODEVICE);
9534 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9535 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9536 tx_bd->nbd = cpu_to_le16(1);
9537 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9538 tx_bd->vlan = cpu_to_le16(pkt_prod);
9539 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9540 ETH_TX_BD_FLAGS_END_BD);
9541 tx_bd->general_data = ((UNICAST_ADDRESS <<
9542 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9543
58f4c4cf
EG
9544 wmb();
9545
4781bfad 9546 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9547 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9548 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9549 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9550
9551 mmiowb();
9552
9553 num_pkts++;
9554 fp->tx_bd_prod++;
9555 bp->dev->trans_start = jiffies;
9556
9557 udelay(100);
9558
9559 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9560 if (tx_idx != tx_start_idx + num_pkts)
9561 goto test_loopback_exit;
9562
9563 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9564 if (rx_idx != rx_start_idx + num_pkts)
9565 goto test_loopback_exit;
9566
9567 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9568 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9569 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9570 goto test_loopback_rx_exit;
9571
9572 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9573 if (len != pkt_size)
9574 goto test_loopback_rx_exit;
9575
9576 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9577 skb = rx_buf->skb;
9578 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9579 for (i = ETH_HLEN; i < pkt_size; i++)
9580 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9581 goto test_loopback_rx_exit;
9582
9583 rc = 0;
9584
9585test_loopback_rx_exit:
f3c87cdd
YG
9586
9587 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9588 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9589 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9590 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9591
9592 /* Update producers */
9593 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9594 fp->rx_sge_prod);
f3c87cdd
YG
9595
9596test_loopback_exit:
9597 bp->link_params.loopback_mode = LOOPBACK_NONE;
9598
9599 return rc;
9600}
9601
9602static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9603{
b5bf9068 9604 int rc = 0, res;
f3c87cdd
YG
9605
9606 if (!netif_running(bp->dev))
9607 return BNX2X_LOOPBACK_FAILED;
9608
f8ef6e44 9609 bnx2x_netif_stop(bp, 1);
3910c8ae 9610 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9611
b5bf9068
EG
9612 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9613 if (res) {
9614 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9615 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9616 }
9617
b5bf9068
EG
9618 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9619 if (res) {
9620 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9621 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9622 }
9623
3910c8ae 9624 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9625 bnx2x_netif_start(bp);
9626
9627 return rc;
9628}
9629
9630#define CRC32_RESIDUAL 0xdebb20e3
9631
9632static int bnx2x_test_nvram(struct bnx2x *bp)
9633{
9634 static const struct {
9635 int offset;
9636 int size;
9637 } nvram_tbl[] = {
9638 { 0, 0x14 }, /* bootstrap */
9639 { 0x14, 0xec }, /* dir */
9640 { 0x100, 0x350 }, /* manuf_info */
9641 { 0x450, 0xf0 }, /* feature_info */
9642 { 0x640, 0x64 }, /* upgrade_key_info */
9643 { 0x6a4, 0x64 },
9644 { 0x708, 0x70 }, /* manuf_key_info */
9645 { 0x778, 0x70 },
9646 { 0, 0 }
9647 };
4781bfad 9648 __be32 buf[0x350 / 4];
f3c87cdd
YG
9649 u8 *data = (u8 *)buf;
9650 int i, rc;
9651 u32 magic, csum;
9652
9653 rc = bnx2x_nvram_read(bp, 0, data, 4);
9654 if (rc) {
f5372251 9655 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9656 goto test_nvram_exit;
9657 }
9658
9659 magic = be32_to_cpu(buf[0]);
9660 if (magic != 0x669955aa) {
9661 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9662 rc = -ENODEV;
9663 goto test_nvram_exit;
9664 }
9665
9666 for (i = 0; nvram_tbl[i].size; i++) {
9667
9668 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9669 nvram_tbl[i].size);
9670 if (rc) {
9671 DP(NETIF_MSG_PROBE,
f5372251 9672 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9673 goto test_nvram_exit;
9674 }
9675
9676 csum = ether_crc_le(nvram_tbl[i].size, data);
9677 if (csum != CRC32_RESIDUAL) {
9678 DP(NETIF_MSG_PROBE,
9679 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9680 rc = -ENODEV;
9681 goto test_nvram_exit;
9682 }
9683 }
9684
9685test_nvram_exit:
9686 return rc;
9687}
9688
9689static int bnx2x_test_intr(struct bnx2x *bp)
9690{
9691 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9692 int i, rc;
9693
9694 if (!netif_running(bp->dev))
9695 return -ENODEV;
9696
8d9c5f34 9697 config->hdr.length = 0;
af246401
EG
9698 if (CHIP_IS_E1(bp))
9699 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9700 else
9701 config->hdr.offset = BP_FUNC(bp);
0626b899 9702 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9703 config->hdr.reserved1 = 0;
9704
9705 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9706 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9707 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9708 if (rc == 0) {
9709 bp->set_mac_pending++;
9710 for (i = 0; i < 10; i++) {
9711 if (!bp->set_mac_pending)
9712 break;
9713 msleep_interruptible(10);
9714 }
9715 if (i == 10)
9716 rc = -ENODEV;
9717 }
9718
9719 return rc;
9720}
9721
a2fbb9ea
ET
9722static void bnx2x_self_test(struct net_device *dev,
9723 struct ethtool_test *etest, u64 *buf)
9724{
9725 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9726
9727 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9728
f3c87cdd 9729 if (!netif_running(dev))
a2fbb9ea 9730 return;
a2fbb9ea 9731
33471629 9732 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9733 if (IS_E1HMF(bp))
9734 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9735
9736 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
9737 int port = BP_PORT(bp);
9738 u32 val;
f3c87cdd
YG
9739 u8 link_up;
9740
279abdf5
EG
9741 /* save current value of input enable for TX port IF */
9742 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9743 /* disable input for TX port IF */
9744 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9745
f3c87cdd
YG
9746 link_up = bp->link_vars.link_up;
9747 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9748 bnx2x_nic_load(bp, LOAD_DIAG);
9749 /* wait until link state is restored */
9750 bnx2x_wait_for_link(bp, link_up);
9751
9752 if (bnx2x_test_registers(bp) != 0) {
9753 buf[0] = 1;
9754 etest->flags |= ETH_TEST_FL_FAILED;
9755 }
9756 if (bnx2x_test_memory(bp) != 0) {
9757 buf[1] = 1;
9758 etest->flags |= ETH_TEST_FL_FAILED;
9759 }
9760 buf[2] = bnx2x_test_loopback(bp, link_up);
9761 if (buf[2] != 0)
9762 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9763
f3c87cdd 9764 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
9765
9766 /* restore input for TX port IF */
9767 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9768
f3c87cdd
YG
9769 bnx2x_nic_load(bp, LOAD_NORMAL);
9770 /* wait until link state is restored */
9771 bnx2x_wait_for_link(bp, link_up);
9772 }
9773 if (bnx2x_test_nvram(bp) != 0) {
9774 buf[3] = 1;
a2fbb9ea
ET
9775 etest->flags |= ETH_TEST_FL_FAILED;
9776 }
f3c87cdd
YG
9777 if (bnx2x_test_intr(bp) != 0) {
9778 buf[4] = 1;
9779 etest->flags |= ETH_TEST_FL_FAILED;
9780 }
9781 if (bp->port.pmf)
9782 if (bnx2x_link_test(bp) != 0) {
9783 buf[5] = 1;
9784 etest->flags |= ETH_TEST_FL_FAILED;
9785 }
f3c87cdd
YG
9786
9787#ifdef BNX2X_EXTRA_DEBUG
9788 bnx2x_panic_dump(bp);
9789#endif
a2fbb9ea
ET
9790}
9791
de832a55
EG
9792static const struct {
9793 long offset;
9794 int size;
9795 u8 string[ETH_GSTRING_LEN];
9796} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9797/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9798 { Q_STATS_OFFSET32(error_bytes_received_hi),
9799 8, "[%d]: rx_error_bytes" },
9800 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9801 8, "[%d]: rx_ucast_packets" },
9802 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9803 8, "[%d]: rx_mcast_packets" },
9804 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9805 8, "[%d]: rx_bcast_packets" },
9806 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9807 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9808 4, "[%d]: rx_phy_ip_err_discards"},
9809 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9810 4, "[%d]: rx_skb_alloc_discard" },
9811 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9812
9813/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9814 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9815 8, "[%d]: tx_packets" }
9816};
9817
bb2a0f7a
YG
9818static const struct {
9819 long offset;
9820 int size;
9821 u32 flags;
66e855f3
YG
9822#define STATS_FLAGS_PORT 1
9823#define STATS_FLAGS_FUNC 2
de832a55 9824#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9825 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9826} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9827/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9828 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9829 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9830 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9831 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9832 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9833 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9834 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9835 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9836 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9837 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9838 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9839 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9840 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9841 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9842 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9843 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9844 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9845/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9846 8, STATS_FLAGS_PORT, "rx_fragments" },
9847 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9848 8, STATS_FLAGS_PORT, "rx_jabbers" },
9849 { STATS_OFFSET32(no_buff_discard_hi),
9850 8, STATS_FLAGS_BOTH, "rx_discards" },
9851 { STATS_OFFSET32(mac_filter_discard),
9852 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9853 { STATS_OFFSET32(xxoverflow_discard),
9854 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9855 { STATS_OFFSET32(brb_drop_hi),
9856 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9857 { STATS_OFFSET32(brb_truncate_hi),
9858 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9859 { STATS_OFFSET32(pause_frames_received_hi),
9860 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9861 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9862 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9863 { STATS_OFFSET32(nig_timer_max),
9864 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9865/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9866 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9867 { STATS_OFFSET32(rx_skb_alloc_failed),
9868 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9869 { STATS_OFFSET32(hw_csum_err),
9870 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9871
9872 { STATS_OFFSET32(total_bytes_transmitted_hi),
9873 8, STATS_FLAGS_BOTH, "tx_bytes" },
9874 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9875 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9876 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9877 8, STATS_FLAGS_BOTH, "tx_packets" },
9878 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9879 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9880 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9881 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9882 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9883 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9884 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9885 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9886/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9887 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9888 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9889 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9890 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9891 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9892 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9893 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9894 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9895 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9896 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9897 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9898 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9899 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9900 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9901 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9902 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9903 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9904 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9905 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9906/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9907 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9908 { STATS_OFFSET32(pause_frames_sent_hi),
9909 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9910};
9911
de832a55
EG
9912#define IS_PORT_STAT(i) \
9913 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9914#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9915#define IS_E1HMF_MODE_STAT(bp) \
9916 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9917
a2fbb9ea
ET
9918static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9919{
bb2a0f7a 9920 struct bnx2x *bp = netdev_priv(dev);
de832a55 9921 int i, j, k;
bb2a0f7a 9922
a2fbb9ea
ET
9923 switch (stringset) {
9924 case ETH_SS_STATS:
de832a55
EG
9925 if (is_multi(bp)) {
9926 k = 0;
9927 for_each_queue(bp, i) {
9928 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9929 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9930 bnx2x_q_stats_arr[j].string, i);
9931 k += BNX2X_NUM_Q_STATS;
9932 }
9933 if (IS_E1HMF_MODE_STAT(bp))
9934 break;
9935 for (j = 0; j < BNX2X_NUM_STATS; j++)
9936 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9937 bnx2x_stats_arr[j].string);
9938 } else {
9939 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9940 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9941 continue;
9942 strcpy(buf + j*ETH_GSTRING_LEN,
9943 bnx2x_stats_arr[i].string);
9944 j++;
9945 }
bb2a0f7a 9946 }
a2fbb9ea
ET
9947 break;
9948
9949 case ETH_SS_TEST:
9950 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9951 break;
9952 }
9953}
9954
9955static int bnx2x_get_stats_count(struct net_device *dev)
9956{
bb2a0f7a 9957 struct bnx2x *bp = netdev_priv(dev);
de832a55 9958 int i, num_stats;
bb2a0f7a 9959
de832a55
EG
9960 if (is_multi(bp)) {
9961 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9962 if (!IS_E1HMF_MODE_STAT(bp))
9963 num_stats += BNX2X_NUM_STATS;
9964 } else {
9965 if (IS_E1HMF_MODE_STAT(bp)) {
9966 num_stats = 0;
9967 for (i = 0; i < BNX2X_NUM_STATS; i++)
9968 if (IS_FUNC_STAT(i))
9969 num_stats++;
9970 } else
9971 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9972 }
de832a55 9973
bb2a0f7a 9974 return num_stats;
a2fbb9ea
ET
9975}
9976
9977static void bnx2x_get_ethtool_stats(struct net_device *dev,
9978 struct ethtool_stats *stats, u64 *buf)
9979{
9980 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9981 u32 *hw_stats, *offset;
9982 int i, j, k;
bb2a0f7a 9983
de832a55
EG
9984 if (is_multi(bp)) {
9985 k = 0;
9986 for_each_queue(bp, i) {
9987 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9988 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9989 if (bnx2x_q_stats_arr[j].size == 0) {
9990 /* skip this counter */
9991 buf[k + j] = 0;
9992 continue;
9993 }
9994 offset = (hw_stats +
9995 bnx2x_q_stats_arr[j].offset);
9996 if (bnx2x_q_stats_arr[j].size == 4) {
9997 /* 4-byte counter */
9998 buf[k + j] = (u64) *offset;
9999 continue;
10000 }
10001 /* 8-byte counter */
10002 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10003 }
10004 k += BNX2X_NUM_Q_STATS;
10005 }
10006 if (IS_E1HMF_MODE_STAT(bp))
10007 return;
10008 hw_stats = (u32 *)&bp->eth_stats;
10009 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10010 if (bnx2x_stats_arr[j].size == 0) {
10011 /* skip this counter */
10012 buf[k + j] = 0;
10013 continue;
10014 }
10015 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10016 if (bnx2x_stats_arr[j].size == 4) {
10017 /* 4-byte counter */
10018 buf[k + j] = (u64) *offset;
10019 continue;
10020 }
10021 /* 8-byte counter */
10022 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10023 }
de832a55
EG
10024 } else {
10025 hw_stats = (u32 *)&bp->eth_stats;
10026 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10027 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10028 continue;
10029 if (bnx2x_stats_arr[i].size == 0) {
10030 /* skip this counter */
10031 buf[j] = 0;
10032 j++;
10033 continue;
10034 }
10035 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10036 if (bnx2x_stats_arr[i].size == 4) {
10037 /* 4-byte counter */
10038 buf[j] = (u64) *offset;
10039 j++;
10040 continue;
10041 }
10042 /* 8-byte counter */
10043 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10044 j++;
a2fbb9ea 10045 }
a2fbb9ea
ET
10046 }
10047}
10048
10049static int bnx2x_phys_id(struct net_device *dev, u32 data)
10050{
10051 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10052 int port = BP_PORT(bp);
a2fbb9ea
ET
10053 int i;
10054
34f80b04
EG
10055 if (!netif_running(dev))
10056 return 0;
10057
10058 if (!bp->port.pmf)
10059 return 0;
10060
a2fbb9ea
ET
10061 if (data == 0)
10062 data = 2;
10063
10064 for (i = 0; i < (data * 2); i++) {
c18487ee 10065 if ((i % 2) == 0)
34f80b04 10066 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10067 bp->link_params.hw_led_mode,
10068 bp->link_params.chip_id);
10069 else
34f80b04 10070 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10071 bp->link_params.hw_led_mode,
10072 bp->link_params.chip_id);
10073
a2fbb9ea
ET
10074 msleep_interruptible(500);
10075 if (signal_pending(current))
10076 break;
10077 }
10078
c18487ee 10079 if (bp->link_vars.link_up)
34f80b04 10080 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10081 bp->link_vars.line_speed,
10082 bp->link_params.hw_led_mode,
10083 bp->link_params.chip_id);
a2fbb9ea
ET
10084
10085 return 0;
10086}
10087
10088static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10089 .get_settings = bnx2x_get_settings,
10090 .set_settings = bnx2x_set_settings,
10091 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10092 .get_regs_len = bnx2x_get_regs_len,
10093 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10094 .get_wol = bnx2x_get_wol,
10095 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10096 .get_msglevel = bnx2x_get_msglevel,
10097 .set_msglevel = bnx2x_set_msglevel,
10098 .nway_reset = bnx2x_nway_reset,
01e53298 10099 .get_link = bnx2x_get_link,
7a9b2557
VZ
10100 .get_eeprom_len = bnx2x_get_eeprom_len,
10101 .get_eeprom = bnx2x_get_eeprom,
10102 .set_eeprom = bnx2x_set_eeprom,
10103 .get_coalesce = bnx2x_get_coalesce,
10104 .set_coalesce = bnx2x_set_coalesce,
10105 .get_ringparam = bnx2x_get_ringparam,
10106 .set_ringparam = bnx2x_set_ringparam,
10107 .get_pauseparam = bnx2x_get_pauseparam,
10108 .set_pauseparam = bnx2x_set_pauseparam,
10109 .get_rx_csum = bnx2x_get_rx_csum,
10110 .set_rx_csum = bnx2x_set_rx_csum,
10111 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10112 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10113 .set_flags = bnx2x_set_flags,
10114 .get_flags = ethtool_op_get_flags,
10115 .get_sg = ethtool_op_get_sg,
10116 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10117 .get_tso = ethtool_op_get_tso,
10118 .set_tso = bnx2x_set_tso,
10119 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10120 .self_test = bnx2x_self_test,
10121 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10122 .phys_id = bnx2x_phys_id,
10123 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10124 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10125};
10126
10127/* end of ethtool_ops */
10128
10129/****************************************************************************
10130* General service functions
10131****************************************************************************/
10132
10133static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10134{
10135 u16 pmcsr;
10136
10137 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10138
10139 switch (state) {
10140 case PCI_D0:
34f80b04 10141 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10142 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10143 PCI_PM_CTRL_PME_STATUS));
10144
10145 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10146 /* delay required during transition out of D3hot */
a2fbb9ea 10147 msleep(20);
34f80b04 10148 break;
a2fbb9ea 10149
34f80b04
EG
10150 case PCI_D3hot:
10151 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10152 pmcsr |= 3;
a2fbb9ea 10153
34f80b04
EG
10154 if (bp->wol)
10155 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10156
34f80b04
EG
10157 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10158 pmcsr);
a2fbb9ea 10159
34f80b04
EG
10160 /* No more memory access after this point until
10161 * device is brought back to D0.
10162 */
10163 break;
10164
10165 default:
10166 return -EINVAL;
10167 }
10168 return 0;
a2fbb9ea
ET
10169}
10170
237907c1
EG
10171static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10172{
10173 u16 rx_cons_sb;
10174
10175 /* Tell compiler that status block fields can change */
10176 barrier();
10177 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10178 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10179 rx_cons_sb++;
10180 return (fp->rx_comp_cons != rx_cons_sb);
10181}
10182
34f80b04
EG
10183/*
10184 * net_device service functions
10185 */
10186
a2fbb9ea
ET
10187static int bnx2x_poll(struct napi_struct *napi, int budget)
10188{
10189 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10190 napi);
10191 struct bnx2x *bp = fp->bp;
10192 int work_done = 0;
10193
10194#ifdef BNX2X_STOP_ON_ERROR
10195 if (unlikely(bp->panic))
34f80b04 10196 goto poll_panic;
a2fbb9ea
ET
10197#endif
10198
10199 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10200 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10201 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10202
10203 bnx2x_update_fpsb_idx(fp);
10204
237907c1 10205 if (bnx2x_has_tx_work(fp))
7961f791 10206 bnx2x_tx_int(fp);
a2fbb9ea 10207
8534f32c 10208 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10209 work_done = bnx2x_rx_int(fp, budget);
356e2385 10210
8534f32c
EG
10211 /* must not complete if we consumed full budget */
10212 if (work_done >= budget)
10213 goto poll_again;
10214 }
a2fbb9ea 10215
8534f32c
EG
10216 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10217 * ensure that status block indices have been actually read
10218 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10219 * so that we won't write the "newer" value of the status block to IGU
10220 * (if there was a DMA right after BNX2X_HAS_WORK and
10221 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10222 * may be postponed to right before bnx2x_ack_sb). In this case
10223 * there will never be another interrupt until there is another update
10224 * of the status block, while there is still unhandled work.
10225 */
10226 rmb();
a2fbb9ea 10227
8534f32c 10228 if (!BNX2X_HAS_WORK(fp)) {
a2fbb9ea 10229#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10230poll_panic:
a2fbb9ea 10231#endif
288379f0 10232 napi_complete(napi);
a2fbb9ea 10233
0626b899 10234 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10235 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10236 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10237 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10238 }
356e2385 10239
8534f32c 10240poll_again:
a2fbb9ea
ET
10241 return work_done;
10242}
10243
755735eb
EG
10244
10245/* we split the first BD into headers and data BDs
33471629 10246 * to ease the pain of our fellow microcode engineers
755735eb
EG
10247 * we use one mapping for both BDs
10248 * So far this has only been observed to happen
10249 * in Other Operating Systems(TM)
10250 */
10251static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10252 struct bnx2x_fastpath *fp,
10253 struct eth_tx_bd **tx_bd, u16 hlen,
10254 u16 bd_prod, int nbd)
10255{
10256 struct eth_tx_bd *h_tx_bd = *tx_bd;
10257 struct eth_tx_bd *d_tx_bd;
10258 dma_addr_t mapping;
10259 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10260
10261 /* first fix first BD */
10262 h_tx_bd->nbd = cpu_to_le16(nbd);
10263 h_tx_bd->nbytes = cpu_to_le16(hlen);
10264
10265 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10266 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10267 h_tx_bd->addr_lo, h_tx_bd->nbd);
10268
10269 /* now get a new data BD
10270 * (after the pbd) and fill it */
10271 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10272 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10273
10274 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10275 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10276
10277 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10278 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10279 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10280 d_tx_bd->vlan = 0;
10281 /* this marks the BD as one that has no individual mapping
10282 * the FW ignores this flag in a BD not marked start
10283 */
10284 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10285 DP(NETIF_MSG_TX_QUEUED,
10286 "TSO split data size is %d (%x:%x)\n",
10287 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10288
10289 /* update tx_bd for marking the last BD flag */
10290 *tx_bd = d_tx_bd;
10291
10292 return bd_prod;
10293}
10294
10295static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10296{
10297 if (fix > 0)
10298 csum = (u16) ~csum_fold(csum_sub(csum,
10299 csum_partial(t_header - fix, fix, 0)));
10300
10301 else if (fix < 0)
10302 csum = (u16) ~csum_fold(csum_add(csum,
10303 csum_partial(t_header, -fix, 0)));
10304
10305 return swab16(csum);
10306}
10307
10308static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10309{
10310 u32 rc;
10311
10312 if (skb->ip_summed != CHECKSUM_PARTIAL)
10313 rc = XMIT_PLAIN;
10314
10315 else {
4781bfad 10316 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10317 rc = XMIT_CSUM_V6;
10318 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10319 rc |= XMIT_CSUM_TCP;
10320
10321 } else {
10322 rc = XMIT_CSUM_V4;
10323 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10324 rc |= XMIT_CSUM_TCP;
10325 }
10326 }
10327
10328 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10329 rc |= XMIT_GSO_V4;
10330
10331 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10332 rc |= XMIT_GSO_V6;
10333
10334 return rc;
10335}
10336
632da4d6 10337#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10338/* check if packet requires linearization (packet is too fragmented)
10339 no need to check fragmentation if page size > 8K (there will be no
10340 violation to FW restrictions) */
755735eb
EG
10341static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10342 u32 xmit_type)
10343{
10344 int to_copy = 0;
10345 int hlen = 0;
10346 int first_bd_sz = 0;
10347
10348 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10349 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10350
10351 if (xmit_type & XMIT_GSO) {
10352 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10353 /* Check if LSO packet needs to be copied:
10354 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10355 int wnd_size = MAX_FETCH_BD - 3;
33471629 10356 /* Number of windows to check */
755735eb
EG
10357 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10358 int wnd_idx = 0;
10359 int frag_idx = 0;
10360 u32 wnd_sum = 0;
10361
10362 /* Headers length */
10363 hlen = (int)(skb_transport_header(skb) - skb->data) +
10364 tcp_hdrlen(skb);
10365
10366 /* Amount of data (w/o headers) on linear part of SKB*/
10367 first_bd_sz = skb_headlen(skb) - hlen;
10368
10369 wnd_sum = first_bd_sz;
10370
10371 /* Calculate the first sum - it's special */
10372 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10373 wnd_sum +=
10374 skb_shinfo(skb)->frags[frag_idx].size;
10375
10376 /* If there was data on linear skb data - check it */
10377 if (first_bd_sz > 0) {
10378 if (unlikely(wnd_sum < lso_mss)) {
10379 to_copy = 1;
10380 goto exit_lbl;
10381 }
10382
10383 wnd_sum -= first_bd_sz;
10384 }
10385
10386 /* Others are easier: run through the frag list and
10387 check all windows */
10388 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10389 wnd_sum +=
10390 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10391
10392 if (unlikely(wnd_sum < lso_mss)) {
10393 to_copy = 1;
10394 break;
10395 }
10396 wnd_sum -=
10397 skb_shinfo(skb)->frags[wnd_idx].size;
10398 }
755735eb
EG
10399 } else {
10400 /* in non-LSO too fragmented packet should always
10401 be linearized */
10402 to_copy = 1;
10403 }
10404 }
10405
10406exit_lbl:
10407 if (unlikely(to_copy))
10408 DP(NETIF_MSG_TX_QUEUED,
10409 "Linearization IS REQUIRED for %s packet. "
10410 "num_frags %d hlen %d first_bd_sz %d\n",
10411 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10412 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10413
10414 return to_copy;
10415}
632da4d6 10416#endif
755735eb
EG
10417
10418/* called with netif_tx_lock
a2fbb9ea 10419 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10420 * netif_wake_queue()
a2fbb9ea
ET
10421 */
10422static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10423{
10424 struct bnx2x *bp = netdev_priv(dev);
10425 struct bnx2x_fastpath *fp;
555f6c78 10426 struct netdev_queue *txq;
a2fbb9ea
ET
10427 struct sw_tx_bd *tx_buf;
10428 struct eth_tx_bd *tx_bd;
10429 struct eth_tx_parse_bd *pbd = NULL;
10430 u16 pkt_prod, bd_prod;
755735eb 10431 int nbd, fp_index;
a2fbb9ea 10432 dma_addr_t mapping;
755735eb
EG
10433 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10434 int vlan_off = (bp->e1hov ? 4 : 0);
10435 int i;
10436 u8 hlen = 0;
a2fbb9ea
ET
10437
10438#ifdef BNX2X_STOP_ON_ERROR
10439 if (unlikely(bp->panic))
10440 return NETDEV_TX_BUSY;
10441#endif
10442
555f6c78
EG
10443 fp_index = skb_get_queue_mapping(skb);
10444 txq = netdev_get_tx_queue(dev, fp_index);
10445
a2fbb9ea 10446 fp = &bp->fp[fp_index];
755735eb 10447
231fd58a 10448 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10449 fp->eth_q_stats.driver_xoff++,
555f6c78 10450 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10451 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10452 return NETDEV_TX_BUSY;
10453 }
10454
755735eb
EG
10455 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10456 " gso type %x xmit_type %x\n",
10457 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10458 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10459
632da4d6 10460#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10461 /* First, check if we need to linearize the skb (due to FW
10462 restrictions). No need to check fragmentation if page size > 8K
10463 (there will be no violation to FW restrictions) */
755735eb
EG
10464 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10465 /* Statistics of linearization */
10466 bp->lin_cnt++;
10467 if (skb_linearize(skb) != 0) {
10468 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10469 "silently dropping this SKB\n");
10470 dev_kfree_skb_any(skb);
da5a662a 10471 return NETDEV_TX_OK;
755735eb
EG
10472 }
10473 }
632da4d6 10474#endif
755735eb 10475
a2fbb9ea 10476 /*
755735eb 10477 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10478 then for TSO or xsum we have a parsing info BD,
755735eb 10479 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10480 (don't forget to mark the last one as last,
10481 and to unmap only AFTER you write to the BD ...)
755735eb 10482 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10483 */
10484
10485 pkt_prod = fp->tx_pkt_prod++;
755735eb 10486 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10487
755735eb 10488 /* get a tx_buf and first BD */
a2fbb9ea
ET
10489 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10490 tx_bd = &fp->tx_desc_ring[bd_prod];
10491
10492 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10493 tx_bd->general_data = (UNICAST_ADDRESS <<
10494 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10495 /* header nbd */
10496 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10497
755735eb
EG
10498 /* remember the first BD of the packet */
10499 tx_buf->first_bd = fp->tx_bd_prod;
10500 tx_buf->skb = skb;
a2fbb9ea
ET
10501
10502 DP(NETIF_MSG_TX_QUEUED,
10503 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10504 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10505
0c6671b0
EG
10506#ifdef BCM_VLAN
10507 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10508 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10509 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10510 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10511 vlan_off += 4;
10512 } else
0c6671b0 10513#endif
755735eb 10514 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10515
755735eb 10516 if (xmit_type) {
755735eb 10517 /* turn on parsing and get a BD */
a2fbb9ea
ET
10518 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10519 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10520
10521 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10522 }
10523
10524 if (xmit_type & XMIT_CSUM) {
10525 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10526
10527 /* for now NS flag is not used in Linux */
4781bfad
EG
10528 pbd->global_data =
10529 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10530 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10531
755735eb
EG
10532 pbd->ip_hlen = (skb_transport_header(skb) -
10533 skb_network_header(skb)) / 2;
10534
10535 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10536
755735eb
EG
10537 pbd->total_hlen = cpu_to_le16(hlen);
10538 hlen = hlen*2 - vlan_off;
a2fbb9ea 10539
755735eb
EG
10540 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10541
10542 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10543 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10544 ETH_TX_BD_FLAGS_IP_CSUM;
10545 else
10546 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10547
10548 if (xmit_type & XMIT_CSUM_TCP) {
10549 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10550
10551 } else {
10552 s8 fix = SKB_CS_OFF(skb); /* signed! */
10553
a2fbb9ea 10554 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10555 pbd->cs_offset = fix / 2;
a2fbb9ea 10556
755735eb
EG
10557 DP(NETIF_MSG_TX_QUEUED,
10558 "hlen %d offset %d fix %d csum before fix %x\n",
10559 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10560 SKB_CS(skb));
10561
10562 /* HW bug: fixup the CSUM */
10563 pbd->tcp_pseudo_csum =
10564 bnx2x_csum_fix(skb_transport_header(skb),
10565 SKB_CS(skb), fix);
10566
10567 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10568 pbd->tcp_pseudo_csum);
10569 }
a2fbb9ea
ET
10570 }
10571
10572 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10573 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10574
10575 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10576 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10577 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10578 tx_bd->nbd = cpu_to_le16(nbd);
10579 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10580
10581 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10582 " nbytes %d flags %x vlan %x\n",
10583 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10584 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10585 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10586
755735eb 10587 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10588
10589 DP(NETIF_MSG_TX_QUEUED,
10590 "TSO packet len %d hlen %d total len %d tso size %d\n",
10591 skb->len, hlen, skb_headlen(skb),
10592 skb_shinfo(skb)->gso_size);
10593
10594 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10595
755735eb
EG
10596 if (unlikely(skb_headlen(skb) > hlen))
10597 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10598 bd_prod, ++nbd);
a2fbb9ea
ET
10599
10600 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10601 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10602 pbd->tcp_flags = pbd_tcp_flags(skb);
10603
10604 if (xmit_type & XMIT_GSO_V4) {
10605 pbd->ip_id = swab16(ip_hdr(skb)->id);
10606 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10607 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10608 ip_hdr(skb)->daddr,
10609 0, IPPROTO_TCP, 0));
755735eb
EG
10610
10611 } else
10612 pbd->tcp_pseudo_csum =
10613 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10614 &ipv6_hdr(skb)->daddr,
10615 0, IPPROTO_TCP, 0));
10616
a2fbb9ea
ET
10617 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10618 }
10619
755735eb
EG
10620 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10621 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10622
755735eb
EG
10623 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10624 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10625
755735eb
EG
10626 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10627 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10628
755735eb
EG
10629 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10630 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10631 tx_bd->nbytes = cpu_to_le16(frag->size);
10632 tx_bd->vlan = cpu_to_le16(pkt_prod);
10633 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10634
755735eb
EG
10635 DP(NETIF_MSG_TX_QUEUED,
10636 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10637 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10638 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10639 }
10640
755735eb 10641 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10642 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10643
10644 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10645 tx_bd, tx_bd->bd_flags.as_bitfield);
10646
a2fbb9ea
ET
10647 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10648
755735eb 10649 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10650 * if the packet contains or ends with it
10651 */
10652 if (TX_BD_POFF(bd_prod) < nbd)
10653 nbd++;
10654
10655 if (pbd)
10656 DP(NETIF_MSG_TX_QUEUED,
10657 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10658 " tcp_flags %x xsum %x seq %u hlen %u\n",
10659 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10660 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10661 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10662
755735eb 10663 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10664
58f4c4cf
EG
10665 /*
10666 * Make sure that the BD data is updated before updating the producer
10667 * since FW might read the BD right after the producer is updated.
10668 * This is only applicable for weak-ordered memory model archs such
10669 * as IA-64. The following barrier is also mandatory since FW will
10670 * assumes packets must have BDs.
10671 */
10672 wmb();
10673
4781bfad 10674 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10675 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10676 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10677 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10678
10679 mmiowb();
10680
755735eb 10681 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10682
10683 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10684 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10685 if we put Tx into XOFF state. */
10686 smp_mb();
555f6c78 10687 netif_tx_stop_queue(txq);
de832a55 10688 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10689 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10690 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10691 }
10692 fp->tx_pkt++;
10693
10694 return NETDEV_TX_OK;
10695}
10696
bb2a0f7a 10697/* called with rtnl_lock */
a2fbb9ea
ET
10698static int bnx2x_open(struct net_device *dev)
10699{
10700 struct bnx2x *bp = netdev_priv(dev);
10701
6eccabb3
EG
10702 netif_carrier_off(dev);
10703
a2fbb9ea
ET
10704 bnx2x_set_power_state(bp, PCI_D0);
10705
bb2a0f7a 10706 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10707}
10708
bb2a0f7a 10709/* called with rtnl_lock */
a2fbb9ea
ET
10710static int bnx2x_close(struct net_device *dev)
10711{
a2fbb9ea
ET
10712 struct bnx2x *bp = netdev_priv(dev);
10713
10714 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10715 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10716 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10717 if (!CHIP_REV_IS_SLOW(bp))
10718 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10719
10720 return 0;
10721}
10722
f5372251 10723/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10724static void bnx2x_set_rx_mode(struct net_device *dev)
10725{
10726 struct bnx2x *bp = netdev_priv(dev);
10727 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10728 int port = BP_PORT(bp);
10729
10730 if (bp->state != BNX2X_STATE_OPEN) {
10731 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10732 return;
10733 }
10734
10735 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10736
10737 if (dev->flags & IFF_PROMISC)
10738 rx_mode = BNX2X_RX_MODE_PROMISC;
10739
10740 else if ((dev->flags & IFF_ALLMULTI) ||
10741 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10742 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10743
10744 else { /* some multicasts */
10745 if (CHIP_IS_E1(bp)) {
10746 int i, old, offset;
10747 struct dev_mc_list *mclist;
10748 struct mac_configuration_cmd *config =
10749 bnx2x_sp(bp, mcast_config);
10750
10751 for (i = 0, mclist = dev->mc_list;
10752 mclist && (i < dev->mc_count);
10753 i++, mclist = mclist->next) {
10754
10755 config->config_table[i].
10756 cam_entry.msb_mac_addr =
10757 swab16(*(u16 *)&mclist->dmi_addr[0]);
10758 config->config_table[i].
10759 cam_entry.middle_mac_addr =
10760 swab16(*(u16 *)&mclist->dmi_addr[2]);
10761 config->config_table[i].
10762 cam_entry.lsb_mac_addr =
10763 swab16(*(u16 *)&mclist->dmi_addr[4]);
10764 config->config_table[i].cam_entry.flags =
10765 cpu_to_le16(port);
10766 config->config_table[i].
10767 target_table_entry.flags = 0;
10768 config->config_table[i].
10769 target_table_entry.client_id = 0;
10770 config->config_table[i].
10771 target_table_entry.vlan_id = 0;
10772
10773 DP(NETIF_MSG_IFUP,
10774 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10775 config->config_table[i].
10776 cam_entry.msb_mac_addr,
10777 config->config_table[i].
10778 cam_entry.middle_mac_addr,
10779 config->config_table[i].
10780 cam_entry.lsb_mac_addr);
10781 }
8d9c5f34 10782 old = config->hdr.length;
34f80b04
EG
10783 if (old > i) {
10784 for (; i < old; i++) {
10785 if (CAM_IS_INVALID(config->
10786 config_table[i])) {
af246401 10787 /* already invalidated */
34f80b04
EG
10788 break;
10789 }
10790 /* invalidate */
10791 CAM_INVALIDATE(config->
10792 config_table[i]);
10793 }
10794 }
10795
10796 if (CHIP_REV_IS_SLOW(bp))
10797 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10798 else
10799 offset = BNX2X_MAX_MULTICAST*(1 + port);
10800
8d9c5f34 10801 config->hdr.length = i;
34f80b04 10802 config->hdr.offset = offset;
8d9c5f34 10803 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10804 config->hdr.reserved1 = 0;
10805
10806 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10807 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10808 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10809 0);
10810 } else { /* E1H */
10811 /* Accept one or more multicasts */
10812 struct dev_mc_list *mclist;
10813 u32 mc_filter[MC_HASH_SIZE];
10814 u32 crc, bit, regidx;
10815 int i;
10816
10817 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10818
10819 for (i = 0, mclist = dev->mc_list;
10820 mclist && (i < dev->mc_count);
10821 i++, mclist = mclist->next) {
10822
7c510e4b
JB
10823 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10824 mclist->dmi_addr);
34f80b04
EG
10825
10826 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10827 bit = (crc >> 24) & 0xff;
10828 regidx = bit >> 5;
10829 bit &= 0x1f;
10830 mc_filter[regidx] |= (1 << bit);
10831 }
10832
10833 for (i = 0; i < MC_HASH_SIZE; i++)
10834 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10835 mc_filter[i]);
10836 }
10837 }
10838
10839 bp->rx_mode = rx_mode;
10840 bnx2x_set_storm_rx_mode(bp);
10841}
10842
10843/* called with rtnl_lock */
a2fbb9ea
ET
10844static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10845{
10846 struct sockaddr *addr = p;
10847 struct bnx2x *bp = netdev_priv(dev);
10848
34f80b04 10849 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10850 return -EINVAL;
10851
10852 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10853 if (netif_running(dev)) {
10854 if (CHIP_IS_E1(bp))
3101c2bc 10855 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10856 else
3101c2bc 10857 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10858 }
a2fbb9ea
ET
10859
10860 return 0;
10861}
10862
c18487ee 10863/* called with rtnl_lock */
a2fbb9ea
ET
10864static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10865{
10866 struct mii_ioctl_data *data = if_mii(ifr);
10867 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10868 int port = BP_PORT(bp);
a2fbb9ea
ET
10869 int err;
10870
10871 switch (cmd) {
10872 case SIOCGMIIPHY:
34f80b04 10873 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10874
c14423fe 10875 /* fallthrough */
c18487ee 10876
a2fbb9ea 10877 case SIOCGMIIREG: {
c18487ee 10878 u16 mii_regval;
a2fbb9ea 10879
c18487ee
YR
10880 if (!netif_running(dev))
10881 return -EAGAIN;
a2fbb9ea 10882
34f80b04 10883 mutex_lock(&bp->port.phy_mutex);
3196a88a 10884 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10885 DEFAULT_PHY_DEV_ADDR,
10886 (data->reg_num & 0x1f), &mii_regval);
10887 data->val_out = mii_regval;
34f80b04 10888 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10889 return err;
10890 }
10891
10892 case SIOCSMIIREG:
10893 if (!capable(CAP_NET_ADMIN))
10894 return -EPERM;
10895
c18487ee
YR
10896 if (!netif_running(dev))
10897 return -EAGAIN;
10898
34f80b04 10899 mutex_lock(&bp->port.phy_mutex);
3196a88a 10900 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10901 DEFAULT_PHY_DEV_ADDR,
10902 (data->reg_num & 0x1f), data->val_in);
34f80b04 10903 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10904 return err;
10905
10906 default:
10907 /* do nothing */
10908 break;
10909 }
10910
10911 return -EOPNOTSUPP;
10912}
10913
34f80b04 10914/* called with rtnl_lock */
a2fbb9ea
ET
10915static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10916{
10917 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10918 int rc = 0;
a2fbb9ea
ET
10919
10920 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10921 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10922 return -EINVAL;
10923
10924 /* This does not race with packet allocation
c14423fe 10925 * because the actual alloc size is
a2fbb9ea
ET
10926 * only updated as part of load
10927 */
10928 dev->mtu = new_mtu;
10929
10930 if (netif_running(dev)) {
34f80b04
EG
10931 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10932 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10933 }
34f80b04
EG
10934
10935 return rc;
a2fbb9ea
ET
10936}
10937
10938static void bnx2x_tx_timeout(struct net_device *dev)
10939{
10940 struct bnx2x *bp = netdev_priv(dev);
10941
10942#ifdef BNX2X_STOP_ON_ERROR
10943 if (!bp->panic)
10944 bnx2x_panic();
10945#endif
10946 /* This allows the netif to be shutdown gracefully before resetting */
10947 schedule_work(&bp->reset_task);
10948}
10949
10950#ifdef BCM_VLAN
34f80b04 10951/* called with rtnl_lock */
a2fbb9ea
ET
10952static void bnx2x_vlan_rx_register(struct net_device *dev,
10953 struct vlan_group *vlgrp)
10954{
10955 struct bnx2x *bp = netdev_priv(dev);
10956
10957 bp->vlgrp = vlgrp;
0c6671b0
EG
10958
10959 /* Set flags according to the required capabilities */
10960 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10961
10962 if (dev->features & NETIF_F_HW_VLAN_TX)
10963 bp->flags |= HW_VLAN_TX_FLAG;
10964
10965 if (dev->features & NETIF_F_HW_VLAN_RX)
10966 bp->flags |= HW_VLAN_RX_FLAG;
10967
a2fbb9ea 10968 if (netif_running(dev))
49d66772 10969 bnx2x_set_client_config(bp);
a2fbb9ea 10970}
34f80b04 10971
a2fbb9ea
ET
10972#endif
10973
10974#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10975static void poll_bnx2x(struct net_device *dev)
10976{
10977 struct bnx2x *bp = netdev_priv(dev);
10978
10979 disable_irq(bp->pdev->irq);
10980 bnx2x_interrupt(bp->pdev->irq, dev);
10981 enable_irq(bp->pdev->irq);
10982}
10983#endif
10984
c64213cd
SH
10985static const struct net_device_ops bnx2x_netdev_ops = {
10986 .ndo_open = bnx2x_open,
10987 .ndo_stop = bnx2x_close,
10988 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 10989 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
10990 .ndo_set_mac_address = bnx2x_change_mac_addr,
10991 .ndo_validate_addr = eth_validate_addr,
10992 .ndo_do_ioctl = bnx2x_ioctl,
10993 .ndo_change_mtu = bnx2x_change_mtu,
10994 .ndo_tx_timeout = bnx2x_tx_timeout,
10995#ifdef BCM_VLAN
10996 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10997#endif
10998#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10999 .ndo_poll_controller = poll_bnx2x,
11000#endif
11001};
11002
34f80b04
EG
11003static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11004 struct net_device *dev)
a2fbb9ea
ET
11005{
11006 struct bnx2x *bp;
11007 int rc;
11008
11009 SET_NETDEV_DEV(dev, &pdev->dev);
11010 bp = netdev_priv(dev);
11011
34f80b04
EG
11012 bp->dev = dev;
11013 bp->pdev = pdev;
a2fbb9ea 11014 bp->flags = 0;
34f80b04 11015 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11016
11017 rc = pci_enable_device(pdev);
11018 if (rc) {
11019 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11020 goto err_out;
11021 }
11022
11023 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11024 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11025 " aborting\n");
11026 rc = -ENODEV;
11027 goto err_out_disable;
11028 }
11029
11030 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11031 printk(KERN_ERR PFX "Cannot find second PCI device"
11032 " base address, aborting\n");
11033 rc = -ENODEV;
11034 goto err_out_disable;
11035 }
11036
34f80b04
EG
11037 if (atomic_read(&pdev->enable_cnt) == 1) {
11038 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11039 if (rc) {
11040 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11041 " aborting\n");
11042 goto err_out_disable;
11043 }
a2fbb9ea 11044
34f80b04
EG
11045 pci_set_master(pdev);
11046 pci_save_state(pdev);
11047 }
a2fbb9ea
ET
11048
11049 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11050 if (bp->pm_cap == 0) {
11051 printk(KERN_ERR PFX "Cannot find power management"
11052 " capability, aborting\n");
11053 rc = -EIO;
11054 goto err_out_release;
11055 }
11056
11057 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11058 if (bp->pcie_cap == 0) {
11059 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11060 " aborting\n");
11061 rc = -EIO;
11062 goto err_out_release;
11063 }
11064
6a35528a 11065 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11066 bp->flags |= USING_DAC_FLAG;
6a35528a 11067 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11068 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11069 " failed, aborting\n");
11070 rc = -EIO;
11071 goto err_out_release;
11072 }
11073
284901a9 11074 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11075 printk(KERN_ERR PFX "System does not support DMA,"
11076 " aborting\n");
11077 rc = -EIO;
11078 goto err_out_release;
11079 }
11080
34f80b04
EG
11081 dev->mem_start = pci_resource_start(pdev, 0);
11082 dev->base_addr = dev->mem_start;
11083 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11084
11085 dev->irq = pdev->irq;
11086
275f165f 11087 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11088 if (!bp->regview) {
11089 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11090 rc = -ENOMEM;
11091 goto err_out_release;
11092 }
11093
34f80b04
EG
11094 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11095 min_t(u64, BNX2X_DB_SIZE,
11096 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11097 if (!bp->doorbells) {
11098 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11099 rc = -ENOMEM;
11100 goto err_out_unmap;
11101 }
11102
11103 bnx2x_set_power_state(bp, PCI_D0);
11104
34f80b04
EG
11105 /* clean indirect addresses */
11106 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11107 PCICFG_VENDOR_ID_OFFSET);
11108 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11109 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11110 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11111 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11112
34f80b04 11113 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11114
c64213cd 11115 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11116 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11117 dev->features |= NETIF_F_SG;
11118 dev->features |= NETIF_F_HW_CSUM;
11119 if (bp->flags & USING_DAC_FLAG)
11120 dev->features |= NETIF_F_HIGHDMA;
11121#ifdef BCM_VLAN
11122 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11123 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
11124#endif
11125 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 11126 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
11127
11128 return 0;
11129
11130err_out_unmap:
11131 if (bp->regview) {
11132 iounmap(bp->regview);
11133 bp->regview = NULL;
11134 }
a2fbb9ea
ET
11135 if (bp->doorbells) {
11136 iounmap(bp->doorbells);
11137 bp->doorbells = NULL;
11138 }
11139
11140err_out_release:
34f80b04
EG
11141 if (atomic_read(&pdev->enable_cnt) == 1)
11142 pci_release_regions(pdev);
a2fbb9ea
ET
11143
11144err_out_disable:
11145 pci_disable_device(pdev);
11146 pci_set_drvdata(pdev, NULL);
11147
11148err_out:
11149 return rc;
11150}
11151
25047950
ET
11152static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11153{
11154 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11155
11156 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11157 return val;
11158}
11159
11160/* return value of 1=2.5GHz 2=5GHz */
11161static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11162{
11163 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11164
11165 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11166 return val;
11167}
94a78b79
VZ
11168static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11169{
11170 struct bnx2x_fw_file_hdr *fw_hdr;
11171 struct bnx2x_fw_file_section *sections;
11172 u16 *ops_offsets;
11173 u32 offset, len, num_ops;
11174 int i;
11175 const struct firmware *firmware = bp->firmware;
11176 const u8 * fw_ver;
11177
11178 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11179 return -EINVAL;
11180
11181 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11182 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11183
11184 /* Make sure none of the offsets and sizes make us read beyond
11185 * the end of the firmware data */
11186 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11187 offset = be32_to_cpu(sections[i].offset);
11188 len = be32_to_cpu(sections[i].len);
11189 if (offset + len > firmware->size) {
11190 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11191 return -EINVAL;
11192 }
11193 }
11194
11195 /* Likewise for the init_ops offsets */
11196 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11197 ops_offsets = (u16 *)(firmware->data + offset);
11198 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11199
11200 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11201 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11202 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11203 return -EINVAL;
11204 }
11205 }
11206
11207 /* Check FW version */
11208 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11209 fw_ver = firmware->data + offset;
11210 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11211 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11212 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11213 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11214 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11215 " Should be %d.%d.%d.%d\n",
11216 fw_ver[0], fw_ver[1], fw_ver[2],
11217 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11218 BCM_5710_FW_MINOR_VERSION,
11219 BCM_5710_FW_REVISION_VERSION,
11220 BCM_5710_FW_ENGINEERING_VERSION);
11221 return -EINVAL;
11222 }
11223
11224 return 0;
11225}
11226
11227static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11228{
11229 u32 i;
11230 const __be32 *source = (const __be32*)_source;
11231 u32 *target = (u32*)_target;
11232
11233 for (i = 0; i < n/4; i++)
11234 target[i] = be32_to_cpu(source[i]);
11235}
11236
11237/*
11238 Ops array is stored in the following format:
11239 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11240 */
11241static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11242{
11243 u32 i, j, tmp;
11244 const __be32 *source = (const __be32*)_source;
11245 struct raw_op *target = (struct raw_op*)_target;
11246
11247 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11248 tmp = be32_to_cpu(source[j]);
11249 target[i].op = (tmp >> 24) & 0xff;
11250 target[i].offset = tmp & 0xffffff;
11251 target[i].raw_data = be32_to_cpu(source[j+1]);
11252 }
11253}
11254static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11255{
11256 u32 i;
11257 u16 *target = (u16*)_target;
11258 const __be16 *source = (const __be16*)_source;
11259
11260 for (i = 0; i < n/2; i++)
11261 target[i] = be16_to_cpu(source[i]);
11262}
11263
11264#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11265 do { \
11266 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11267 bp->arr = kmalloc(len, GFP_KERNEL); \
11268 if (!bp->arr) { \
11269 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11270 goto lbl; \
11271 } \
11272 func(bp->firmware->data + \
11273 be32_to_cpu(fw_hdr->arr.offset), \
11274 (u8*)bp->arr, len); \
11275 } while (0)
11276
11277
11278static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11279{
11280 char fw_file_name[40] = {0};
11281 int rc, offset;
11282 struct bnx2x_fw_file_hdr *fw_hdr;
11283
11284 /* Create a FW file name */
11285 if (CHIP_IS_E1(bp))
11286 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11287 else
11288 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11289
11290 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11291 BCM_5710_FW_MAJOR_VERSION,
11292 BCM_5710_FW_MINOR_VERSION,
11293 BCM_5710_FW_REVISION_VERSION,
11294 BCM_5710_FW_ENGINEERING_VERSION);
11295
11296 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11297
11298 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11299 if (rc) {
11300 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11301 goto request_firmware_exit;
11302 }
11303
11304 rc = bnx2x_check_firmware(bp);
11305 if (rc) {
11306 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11307 goto request_firmware_exit;
11308 }
11309
11310 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11311
11312 /* Initialize the pointers to the init arrays */
11313 /* Blob */
11314 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11315
11316 /* Opcodes */
11317 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11318
11319 /* Offsets */
11320 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11321
11322 /* STORMs firmware */
11323 bp->tsem_int_table_data = bp->firmware->data +
11324 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11325 bp->tsem_pram_data = bp->firmware->data +
11326 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11327 bp->usem_int_table_data = bp->firmware->data +
11328 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11329 bp->usem_pram_data = bp->firmware->data +
11330 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11331 bp->xsem_int_table_data = bp->firmware->data +
11332 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11333 bp->xsem_pram_data = bp->firmware->data +
11334 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11335 bp->csem_int_table_data = bp->firmware->data +
11336 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11337 bp->csem_pram_data = bp->firmware->data +
11338 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11339
11340 return 0;
11341init_offsets_alloc_err:
11342 kfree(bp->init_ops);
11343init_ops_alloc_err:
11344 kfree(bp->init_data);
11345request_firmware_exit:
11346 release_firmware(bp->firmware);
11347
11348 return rc;
11349}
11350
11351
25047950 11352
a2fbb9ea
ET
11353static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11354 const struct pci_device_id *ent)
11355{
11356 static int version_printed;
11357 struct net_device *dev = NULL;
11358 struct bnx2x *bp;
25047950 11359 int rc;
a2fbb9ea
ET
11360
11361 if (version_printed++ == 0)
11362 printk(KERN_INFO "%s", version);
11363
11364 /* dev zeroed in init_etherdev */
555f6c78 11365 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11366 if (!dev) {
11367 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11368 return -ENOMEM;
34f80b04 11369 }
a2fbb9ea 11370
a2fbb9ea
ET
11371 bp = netdev_priv(dev);
11372 bp->msglevel = debug;
11373
34f80b04 11374 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11375 if (rc < 0) {
11376 free_netdev(dev);
11377 return rc;
11378 }
11379
a2fbb9ea
ET
11380 pci_set_drvdata(pdev, dev);
11381
34f80b04 11382 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11383 if (rc)
11384 goto init_one_exit;
11385
94a78b79
VZ
11386 /* Set init arrays */
11387 rc = bnx2x_init_firmware(bp, &pdev->dev);
11388 if (rc) {
11389 printk(KERN_ERR PFX "Error loading firmware\n");
11390 goto init_one_exit;
11391 }
11392
693fc0d1 11393 rc = register_netdev(dev);
34f80b04 11394 if (rc) {
693fc0d1 11395 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11396 goto init_one_exit;
11397 }
11398
25047950 11399 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11400 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11401 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11402 bnx2x_get_pcie_width(bp),
11403 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11404 dev->base_addr, bp->pdev->irq);
e174961c 11405 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11406
a2fbb9ea 11407 return 0;
34f80b04
EG
11408
11409init_one_exit:
11410 if (bp->regview)
11411 iounmap(bp->regview);
11412
11413 if (bp->doorbells)
11414 iounmap(bp->doorbells);
11415
11416 free_netdev(dev);
11417
11418 if (atomic_read(&pdev->enable_cnt) == 1)
11419 pci_release_regions(pdev);
11420
11421 pci_disable_device(pdev);
11422 pci_set_drvdata(pdev, NULL);
11423
11424 return rc;
a2fbb9ea
ET
11425}
11426
11427static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11428{
11429 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11430 struct bnx2x *bp;
11431
11432 if (!dev) {
228241eb
ET
11433 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11434 return;
11435 }
228241eb 11436 bp = netdev_priv(dev);
a2fbb9ea 11437
a2fbb9ea
ET
11438 unregister_netdev(dev);
11439
94a78b79
VZ
11440 kfree(bp->init_ops_offsets);
11441 kfree(bp->init_ops);
11442 kfree(bp->init_data);
11443 release_firmware(bp->firmware);
11444
a2fbb9ea
ET
11445 if (bp->regview)
11446 iounmap(bp->regview);
11447
11448 if (bp->doorbells)
11449 iounmap(bp->doorbells);
11450
11451 free_netdev(dev);
34f80b04
EG
11452
11453 if (atomic_read(&pdev->enable_cnt) == 1)
11454 pci_release_regions(pdev);
11455
a2fbb9ea
ET
11456 pci_disable_device(pdev);
11457 pci_set_drvdata(pdev, NULL);
11458}
11459
11460static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11461{
11462 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11463 struct bnx2x *bp;
11464
34f80b04
EG
11465 if (!dev) {
11466 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11467 return -ENODEV;
11468 }
11469 bp = netdev_priv(dev);
a2fbb9ea 11470
34f80b04 11471 rtnl_lock();
a2fbb9ea 11472
34f80b04 11473 pci_save_state(pdev);
228241eb 11474
34f80b04
EG
11475 if (!netif_running(dev)) {
11476 rtnl_unlock();
11477 return 0;
11478 }
a2fbb9ea
ET
11479
11480 netif_device_detach(dev);
a2fbb9ea 11481
da5a662a 11482 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11483
a2fbb9ea 11484 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11485
34f80b04
EG
11486 rtnl_unlock();
11487
a2fbb9ea
ET
11488 return 0;
11489}
11490
11491static int bnx2x_resume(struct pci_dev *pdev)
11492{
11493 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11494 struct bnx2x *bp;
a2fbb9ea
ET
11495 int rc;
11496
228241eb
ET
11497 if (!dev) {
11498 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11499 return -ENODEV;
11500 }
228241eb 11501 bp = netdev_priv(dev);
a2fbb9ea 11502
34f80b04
EG
11503 rtnl_lock();
11504
228241eb 11505 pci_restore_state(pdev);
34f80b04
EG
11506
11507 if (!netif_running(dev)) {
11508 rtnl_unlock();
11509 return 0;
11510 }
11511
a2fbb9ea
ET
11512 bnx2x_set_power_state(bp, PCI_D0);
11513 netif_device_attach(dev);
11514
da5a662a 11515 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11516
34f80b04
EG
11517 rtnl_unlock();
11518
11519 return rc;
a2fbb9ea
ET
11520}
11521
f8ef6e44
YG
11522static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11523{
11524 int i;
11525
11526 bp->state = BNX2X_STATE_ERROR;
11527
11528 bp->rx_mode = BNX2X_RX_MODE_NONE;
11529
11530 bnx2x_netif_stop(bp, 0);
11531
11532 del_timer_sync(&bp->timer);
11533 bp->stats_state = STATS_STATE_DISABLED;
11534 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11535
11536 /* Release IRQs */
11537 bnx2x_free_irq(bp);
11538
11539 if (CHIP_IS_E1(bp)) {
11540 struct mac_configuration_cmd *config =
11541 bnx2x_sp(bp, mcast_config);
11542
8d9c5f34 11543 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11544 CAM_INVALIDATE(config->config_table[i]);
11545 }
11546
11547 /* Free SKBs, SGEs, TPA pool and driver internals */
11548 bnx2x_free_skbs(bp);
555f6c78 11549 for_each_rx_queue(bp, i)
f8ef6e44 11550 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11551 for_each_rx_queue(bp, i)
7cde1c8b 11552 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11553 bnx2x_free_mem(bp);
11554
11555 bp->state = BNX2X_STATE_CLOSED;
11556
11557 netif_carrier_off(bp->dev);
11558
11559 return 0;
11560}
11561
11562static void bnx2x_eeh_recover(struct bnx2x *bp)
11563{
11564 u32 val;
11565
11566 mutex_init(&bp->port.phy_mutex);
11567
11568 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11569 bp->link_params.shmem_base = bp->common.shmem_base;
11570 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11571
11572 if (!bp->common.shmem_base ||
11573 (bp->common.shmem_base < 0xA0000) ||
11574 (bp->common.shmem_base >= 0xC0000)) {
11575 BNX2X_DEV_INFO("MCP not active\n");
11576 bp->flags |= NO_MCP_FLAG;
11577 return;
11578 }
11579
11580 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11581 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11582 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11583 BNX2X_ERR("BAD MCP validity signature\n");
11584
11585 if (!BP_NOMCP(bp)) {
11586 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11587 & DRV_MSG_SEQ_NUMBER_MASK);
11588 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11589 }
11590}
11591
493adb1f
WX
11592/**
11593 * bnx2x_io_error_detected - called when PCI error is detected
11594 * @pdev: Pointer to PCI device
11595 * @state: The current pci connection state
11596 *
11597 * This function is called after a PCI bus error affecting
11598 * this device has been detected.
11599 */
11600static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11601 pci_channel_state_t state)
11602{
11603 struct net_device *dev = pci_get_drvdata(pdev);
11604 struct bnx2x *bp = netdev_priv(dev);
11605
11606 rtnl_lock();
11607
11608 netif_device_detach(dev);
11609
11610 if (netif_running(dev))
f8ef6e44 11611 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11612
11613 pci_disable_device(pdev);
11614
11615 rtnl_unlock();
11616
11617 /* Request a slot reset */
11618 return PCI_ERS_RESULT_NEED_RESET;
11619}
11620
11621/**
11622 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11623 * @pdev: Pointer to PCI device
11624 *
11625 * Restart the card from scratch, as if from a cold-boot.
11626 */
11627static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11628{
11629 struct net_device *dev = pci_get_drvdata(pdev);
11630 struct bnx2x *bp = netdev_priv(dev);
11631
11632 rtnl_lock();
11633
11634 if (pci_enable_device(pdev)) {
11635 dev_err(&pdev->dev,
11636 "Cannot re-enable PCI device after reset\n");
11637 rtnl_unlock();
11638 return PCI_ERS_RESULT_DISCONNECT;
11639 }
11640
11641 pci_set_master(pdev);
11642 pci_restore_state(pdev);
11643
11644 if (netif_running(dev))
11645 bnx2x_set_power_state(bp, PCI_D0);
11646
11647 rtnl_unlock();
11648
11649 return PCI_ERS_RESULT_RECOVERED;
11650}
11651
11652/**
11653 * bnx2x_io_resume - called when traffic can start flowing again
11654 * @pdev: Pointer to PCI device
11655 *
11656 * This callback is called when the error recovery driver tells us that
11657 * its OK to resume normal operation.
11658 */
11659static void bnx2x_io_resume(struct pci_dev *pdev)
11660{
11661 struct net_device *dev = pci_get_drvdata(pdev);
11662 struct bnx2x *bp = netdev_priv(dev);
11663
11664 rtnl_lock();
11665
f8ef6e44
YG
11666 bnx2x_eeh_recover(bp);
11667
493adb1f 11668 if (netif_running(dev))
f8ef6e44 11669 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11670
11671 netif_device_attach(dev);
11672
11673 rtnl_unlock();
11674}
11675
11676static struct pci_error_handlers bnx2x_err_handler = {
11677 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11678 .slot_reset = bnx2x_io_slot_reset,
11679 .resume = bnx2x_io_resume,
493adb1f
WX
11680};
11681
a2fbb9ea 11682static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11683 .name = DRV_MODULE_NAME,
11684 .id_table = bnx2x_pci_tbl,
11685 .probe = bnx2x_init_one,
11686 .remove = __devexit_p(bnx2x_remove_one),
11687 .suspend = bnx2x_suspend,
11688 .resume = bnx2x_resume,
11689 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11690};
11691
11692static int __init bnx2x_init(void)
11693{
dd21ca6d
SG
11694 int ret;
11695
1cf167f2
EG
11696 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11697 if (bnx2x_wq == NULL) {
11698 printk(KERN_ERR PFX "Cannot create workqueue\n");
11699 return -ENOMEM;
11700 }
11701
dd21ca6d
SG
11702 ret = pci_register_driver(&bnx2x_pci_driver);
11703 if (ret) {
11704 printk(KERN_ERR PFX "Cannot register driver\n");
11705 destroy_workqueue(bnx2x_wq);
11706 }
11707 return ret;
a2fbb9ea
ET
11708}
11709
11710static void __exit bnx2x_cleanup(void)
11711{
11712 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11713
11714 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11715}
11716
11717module_init(bnx2x_init);
11718module_exit(bnx2x_cleanup);
11719
94a78b79 11720