]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: tx_has_work should not wait for FW
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
19680c48 76static int disable_tpa;
a2fbb9ea
ET
77static int use_inta;
78static int poll;
a2fbb9ea 79static int debug;
34f80b04 80static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
81static int use_multi;
82
19680c48 83module_param(disable_tpa, int, 0);
a2fbb9ea
ET
84module_param(use_inta, int, 0);
85module_param(poll, int, 0);
a2fbb9ea 86module_param(debug, int, 0);
19680c48 87MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
88MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 90MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
91
92#ifdef BNX2X_MULTI
93module_param(use_multi, int, 0);
94MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95#endif
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594 if (msix) {
595 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598 } else {
599 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
601 HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 603
615f8fd9
ET
604 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val, port, addr, msix);
606
607 REG_WR(bp, addr, val);
608
a2fbb9ea
ET
609 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610 }
611
615f8fd9 612 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
613 val, port, addr, msix);
614
615 REG_WR(bp, addr, val);
34f80b04
EG
616
617 if (CHIP_IS_E1H(bp)) {
618 /* init leading/trailing edge */
619 if (IS_E1HMF(bp)) {
620 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621 if (bp->port.pmf)
622 /* enable nig attention */
623 val |= 0x0100;
624 } else
625 val = 0xffff;
626
627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629 }
a2fbb9ea
ET
630}
631
615f8fd9 632static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 633{
34f80b04 634 int port = BP_PORT(bp);
a2fbb9ea
ET
635 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 u32 val = REG_RD(bp, addr);
637
638 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
645
646 REG_WR(bp, addr, val);
647 if (REG_RD(bp, addr) != val)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649}
650
f8ef6e44 651static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 652{
a2fbb9ea
ET
653 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654 int i;
655
34f80b04 656 /* disable interrupt handling */
a2fbb9ea 657 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
658 if (disable_hw)
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
a2fbb9ea
ET
661
662 /* make sure all ISRs are done */
663 if (msix) {
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
666
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
669 } else
670 synchronize_irq(bp->pdev->irq);
671
672 /* make sure sp_task is not running */
1cf167f2
EG
673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
675}
676
34f80b04 677/* fast path */
a2fbb9ea
ET
678
679/*
34f80b04 680 * General service functions
a2fbb9ea
ET
681 */
682
34f80b04 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
684 u8 storm, u16 index, u8 op, u8 update)
685{
5c862848
EG
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
34f80b04 692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
5c862848
EG
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
700}
701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703{
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717}
718
a2fbb9ea
ET
719static u16 bnx2x_ack_int(struct bnx2x *bp)
720{
5c862848
EG
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 724
5c862848
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
a2fbb9ea 727
a2fbb9ea
ET
728 return result;
729}
730
731
732/*
733 * fast path service functions
734 */
735
237907c1
EG
736static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737{
738 u16 tx_cons_sb;
739
740 /* Tell compiler that status block fields can change */
741 barrier();
742 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
743 return (fp->tx_pkt_cons != tx_cons_sb);
744}
745
746static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
747{
748 /* Tell compiler that consumer and producer can change */
749 barrier();
750 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
751
237907c1
EG
752}
753
a2fbb9ea
ET
754/* free skb in the packet ring at pos idx
755 * return idx of last bd freed
756 */
757static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
758 u16 idx)
759{
760 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
761 struct eth_tx_bd *tx_bd;
762 struct sk_buff *skb = tx_buf->skb;
34f80b04 763 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
764 int nbd;
765
766 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
767 idx, tx_buf, skb);
768
769 /* unmap first bd */
770 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
771 tx_bd = &fp->tx_desc_ring[bd_idx];
772 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
773 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
774
775 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 776 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
777#ifdef BNX2X_STOP_ON_ERROR
778 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 779 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
780 bnx2x_panic();
781 }
782#endif
783
784 /* Skip a parse bd and the TSO split header bd
785 since they have no mapping */
786 if (nbd)
787 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
788
789 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
790 ETH_TX_BD_FLAGS_TCP_CSUM |
791 ETH_TX_BD_FLAGS_SW_LSO)) {
792 if (--nbd)
793 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794 tx_bd = &fp->tx_desc_ring[bd_idx];
795 /* is this a TSO split header bd? */
796 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
797 if (--nbd)
798 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
799 }
800 }
801
802 /* now free frags */
803 while (nbd > 0) {
804
805 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
806 tx_bd = &fp->tx_desc_ring[bd_idx];
807 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
808 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
809 if (--nbd)
810 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
811 }
812
813 /* release skb */
53e5e96e 814 WARN_ON(!skb);
a2fbb9ea
ET
815 dev_kfree_skb(skb);
816 tx_buf->first_bd = 0;
817 tx_buf->skb = NULL;
818
34f80b04 819 return new_cons;
a2fbb9ea
ET
820}
821
34f80b04 822static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 823{
34f80b04
EG
824 s16 used;
825 u16 prod;
826 u16 cons;
a2fbb9ea 827
34f80b04 828 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
829 prod = fp->tx_bd_prod;
830 cons = fp->tx_bd_cons;
831
34f80b04
EG
832 /* NUM_TX_RINGS = number of "next-page" entries
833 It will be used as a threshold */
834 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 835
34f80b04 836#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
837 WARN_ON(used < 0);
838 WARN_ON(used > fp->bp->tx_ring_size);
839 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 840#endif
a2fbb9ea 841
34f80b04 842 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
843}
844
845static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
846{
847 struct bnx2x *bp = fp->bp;
848 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
849 int done = 0;
850
851#ifdef BNX2X_STOP_ON_ERROR
852 if (unlikely(bp->panic))
853 return;
854#endif
855
856 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
857 sw_cons = fp->tx_pkt_cons;
858
859 while (sw_cons != hw_cons) {
860 u16 pkt_cons;
861
862 pkt_cons = TX_BD(sw_cons);
863
864 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
865
34f80b04 866 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
867 hw_cons, sw_cons, pkt_cons);
868
34f80b04 869/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
870 rmb();
871 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
872 }
873*/
874 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
875 sw_cons++;
876 done++;
877
878 if (done == work)
879 break;
880 }
881
882 fp->tx_pkt_cons = sw_cons;
883 fp->tx_bd_cons = bd_cons;
884
885 /* Need to make the tx_cons update visible to start_xmit()
886 * before checking for netif_queue_stopped(). Without the
887 * memory barrier, there is a small possibility that start_xmit()
888 * will miss it and cause the queue to be stopped forever.
889 */
890 smp_mb();
891
892 /* TBD need a thresh? */
893 if (unlikely(netif_queue_stopped(bp->dev))) {
894
895 netif_tx_lock(bp->dev);
896
897 if (netif_queue_stopped(bp->dev) &&
da5a662a 898 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
899 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
900 netif_wake_queue(bp->dev);
901
902 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
903 }
904}
905
3196a88a 906
a2fbb9ea
ET
907static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
908 union eth_rx_cqe *rr_cqe)
909{
910 struct bnx2x *bp = fp->bp;
911 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
912 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
913
34f80b04 914 DP(BNX2X_MSG_SP,
a2fbb9ea 915 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
916 FP_IDX(fp), cid, command, bp->state,
917 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
918
919 bp->spq_left++;
920
34f80b04 921 if (FP_IDX(fp)) {
a2fbb9ea
ET
922 switch (command | fp->state) {
923 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
924 BNX2X_FP_STATE_OPENING):
925 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
926 cid);
927 fp->state = BNX2X_FP_STATE_OPEN;
928 break;
929
930 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
931 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
932 cid);
933 fp->state = BNX2X_FP_STATE_HALTED;
934 break;
935
936 default:
34f80b04
EG
937 BNX2X_ERR("unexpected MC reply (%d) "
938 "fp->state is %x\n", command, fp->state);
939 break;
a2fbb9ea 940 }
34f80b04 941 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
942 return;
943 }
c14423fe 944
a2fbb9ea
ET
945 switch (command | bp->state) {
946 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
947 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
948 bp->state = BNX2X_STATE_OPEN;
949 break;
950
951 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
952 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
953 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
954 fp->state = BNX2X_FP_STATE_HALTED;
955 break;
956
a2fbb9ea 957 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 958 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 959 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
960 break;
961
3196a88a 962
a2fbb9ea 963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 966 bp->set_mac_pending = 0;
a2fbb9ea
ET
967 break;
968
49d66772 969 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 970 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
971 break;
972
a2fbb9ea 973 default:
34f80b04 974 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 975 command, bp->state);
34f80b04 976 break;
a2fbb9ea 977 }
34f80b04 978 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
979}
980
7a9b2557
VZ
981static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
982 struct bnx2x_fastpath *fp, u16 index)
983{
984 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
985 struct page *page = sw_buf->page;
986 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
987
988 /* Skip "next page" elements */
989 if (!page)
990 return;
991
992 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 993 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
994 __free_pages(page, PAGES_PER_SGE_SHIFT);
995
996 sw_buf->page = NULL;
997 sge->addr_hi = 0;
998 sge->addr_lo = 0;
999}
1000
1001static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1002 struct bnx2x_fastpath *fp, int last)
1003{
1004 int i;
1005
1006 for (i = 0; i < last; i++)
1007 bnx2x_free_rx_sge(bp, fp, i);
1008}
1009
1010static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1011 struct bnx2x_fastpath *fp, u16 index)
1012{
1013 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1014 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1015 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1016 dma_addr_t mapping;
1017
1018 if (unlikely(page == NULL))
1019 return -ENOMEM;
1020
4f40f2cb 1021 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1022 PCI_DMA_FROMDEVICE);
8d8bb39b 1023 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1024 __free_pages(page, PAGES_PER_SGE_SHIFT);
1025 return -ENOMEM;
1026 }
1027
1028 sw_buf->page = page;
1029 pci_unmap_addr_set(sw_buf, mapping, mapping);
1030
1031 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1032 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1033
1034 return 0;
1035}
1036
a2fbb9ea
ET
1037static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1038 struct bnx2x_fastpath *fp, u16 index)
1039{
1040 struct sk_buff *skb;
1041 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1042 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1043 dma_addr_t mapping;
1044
1045 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1046 if (unlikely(skb == NULL))
1047 return -ENOMEM;
1048
437cf2f1 1049 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1050 PCI_DMA_FROMDEVICE);
8d8bb39b 1051 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1052 dev_kfree_skb(skb);
1053 return -ENOMEM;
1054 }
1055
1056 rx_buf->skb = skb;
1057 pci_unmap_addr_set(rx_buf, mapping, mapping);
1058
1059 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1060 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1061
1062 return 0;
1063}
1064
1065/* note that we are not allocating a new skb,
1066 * we are just moving one from cons to prod
1067 * we are not creating a new mapping,
1068 * so there is no need to check for dma_mapping_error().
1069 */
1070static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1071 struct sk_buff *skb, u16 cons, u16 prod)
1072{
1073 struct bnx2x *bp = fp->bp;
1074 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1075 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1076 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1077 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1078
1079 pci_dma_sync_single_for_device(bp->pdev,
1080 pci_unmap_addr(cons_rx_buf, mapping),
1081 bp->rx_offset + RX_COPY_THRESH,
1082 PCI_DMA_FROMDEVICE);
1083
1084 prod_rx_buf->skb = cons_rx_buf->skb;
1085 pci_unmap_addr_set(prod_rx_buf, mapping,
1086 pci_unmap_addr(cons_rx_buf, mapping));
1087 *prod_bd = *cons_bd;
1088}
1089
7a9b2557
VZ
1090static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1091 u16 idx)
1092{
1093 u16 last_max = fp->last_max_sge;
1094
1095 if (SUB_S16(idx, last_max) > 0)
1096 fp->last_max_sge = idx;
1097}
1098
1099static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1100{
1101 int i, j;
1102
1103 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1104 int idx = RX_SGE_CNT * i - 1;
1105
1106 for (j = 0; j < 2; j++) {
1107 SGE_MASK_CLEAR_BIT(fp, idx);
1108 idx--;
1109 }
1110 }
1111}
1112
1113static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1114 struct eth_fast_path_rx_cqe *fp_cqe)
1115{
1116 struct bnx2x *bp = fp->bp;
4f40f2cb 1117 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1118 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1119 SGE_PAGE_SHIFT;
7a9b2557
VZ
1120 u16 last_max, last_elem, first_elem;
1121 u16 delta = 0;
1122 u16 i;
1123
1124 if (!sge_len)
1125 return;
1126
1127 /* First mark all used pages */
1128 for (i = 0; i < sge_len; i++)
1129 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1130
1131 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1132 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1133
1134 /* Here we assume that the last SGE index is the biggest */
1135 prefetch((void *)(fp->sge_mask));
1136 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1137
1138 last_max = RX_SGE(fp->last_max_sge);
1139 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1140 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1141
1142 /* If ring is not full */
1143 if (last_elem + 1 != first_elem)
1144 last_elem++;
1145
1146 /* Now update the prod */
1147 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1148 if (likely(fp->sge_mask[i]))
1149 break;
1150
1151 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1152 delta += RX_SGE_MASK_ELEM_SZ;
1153 }
1154
1155 if (delta > 0) {
1156 fp->rx_sge_prod += delta;
1157 /* clear page-end entries */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 DP(NETIF_MSG_RX_STATUS,
1162 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1163 fp->last_max_sge, fp->rx_sge_prod);
1164}
1165
1166static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1167{
1168 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1169 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171
33471629
EG
1172 /* Clear the two last indices in the page to 1:
1173 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1174 hence will never be indicated and should be removed from
1175 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp);
1177}
1178
1179static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1180 struct sk_buff *skb, u16 cons, u16 prod)
1181{
1182 struct bnx2x *bp = fp->bp;
1183 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1184 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1185 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1186 dma_addr_t mapping;
1187
1188 /* move empty skb from pool to prod and map it */
1189 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1191 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1192 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1193
1194 /* move partial skb from cons to pool (don't unmap yet) */
1195 fp->tpa_pool[queue] = *cons_rx_buf;
1196
1197 /* mark bin state as start - print error if current state != stop */
1198 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1199 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1200
1201 fp->tpa_state[queue] = BNX2X_TPA_START;
1202
1203 /* point prod_bd to new skb */
1204 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1205 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1206
1207#ifdef BNX2X_STOP_ON_ERROR
1208 fp->tpa_queue_used |= (1 << queue);
1209#ifdef __powerpc64__
1210 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1211#else
1212 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1213#endif
1214 fp->tpa_queue_used);
1215#endif
1216}
1217
1218static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1219 struct sk_buff *skb,
1220 struct eth_fast_path_rx_cqe *fp_cqe,
1221 u16 cqe_idx)
1222{
1223 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1224 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1225 u32 i, frag_len, frag_size, pages;
1226 int err;
1227 int j;
1228
1229 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1230 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1231
1232 /* This is needed in order to enable forwarding support */
1233 if (frag_size)
4f40f2cb 1234 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1235 max(frag_size, (u32)len_on_bd));
1236
1237#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1238 if (pages >
1239 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1240 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1241 pages, cqe_idx);
1242 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1243 fp_cqe->pkt_len, len_on_bd);
1244 bnx2x_panic();
1245 return -EINVAL;
1246 }
1247#endif
1248
1249 /* Run through the SGL and compose the fragmented skb */
1250 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1251 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1252
1253 /* FW gives the indices of the SGE as if the ring is an array
1254 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1255 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1256 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1257 old_rx_pg = *rx_pg;
1258
1259 /* If we fail to allocate a substitute page, we simply stop
1260 where we are and drop the whole packet */
1261 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1262 if (unlikely(err)) {
66e855f3 1263 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1264 return err;
1265 }
1266
1267 /* Unmap the page as we r going to pass it to the stack */
1268 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1269 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1270
1271 /* Add one frag and update the appropriate fields in the skb */
1272 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1273
1274 skb->data_len += frag_len;
1275 skb->truesize += frag_len;
1276 skb->len += frag_len;
1277
1278 frag_size -= frag_len;
1279 }
1280
1281 return 0;
1282}
1283
1284static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1285 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1286 u16 cqe_idx)
1287{
1288 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1289 struct sk_buff *skb = rx_buf->skb;
1290 /* alloc new skb */
1291 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1292
1293 /* Unmap skb in the pool anyway, as we are going to change
1294 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1295 fails. */
1296 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1297 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1298
7a9b2557 1299 if (likely(new_skb)) {
66e855f3
YG
1300 /* fix ip xsum and give it to the stack */
1301 /* (no need to map the new skb) */
0c6671b0
EG
1302#ifdef BCM_VLAN
1303 int is_vlan_cqe =
1304 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1305 PARSING_FLAGS_VLAN);
1306 int is_not_hwaccel_vlan_cqe =
1307 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1308#endif
7a9b2557
VZ
1309
1310 prefetch(skb);
1311 prefetch(((char *)(skb)) + 128);
1312
7a9b2557
VZ
1313#ifdef BNX2X_STOP_ON_ERROR
1314 if (pad + len > bp->rx_buf_size) {
1315 BNX2X_ERR("skb_put is about to fail... "
1316 "pad %d len %d rx_buf_size %d\n",
1317 pad, len, bp->rx_buf_size);
1318 bnx2x_panic();
1319 return;
1320 }
1321#endif
1322
1323 skb_reserve(skb, pad);
1324 skb_put(skb, len);
1325
1326 skb->protocol = eth_type_trans(skb, bp->dev);
1327 skb->ip_summed = CHECKSUM_UNNECESSARY;
1328
1329 {
1330 struct iphdr *iph;
1331
1332 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1333#ifdef BCM_VLAN
1334 /* If there is no Rx VLAN offloading -
1335 take VLAN tag into an account */
1336 if (unlikely(is_not_hwaccel_vlan_cqe))
1337 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1338#endif
7a9b2557
VZ
1339 iph->check = 0;
1340 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1341 }
1342
1343 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1344 &cqe->fast_path_cqe, cqe_idx)) {
1345#ifdef BCM_VLAN
0c6671b0
EG
1346 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1347 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1348 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1349 le16_to_cpu(cqe->fast_path_cqe.
1350 vlan_tag));
1351 else
1352#endif
1353 netif_receive_skb(skb);
1354 } else {
1355 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1356 " - dropping packet!\n");
1357 dev_kfree_skb(skb);
1358 }
1359
7a9b2557
VZ
1360
1361 /* put new skb in bin */
1362 fp->tpa_pool[queue].skb = new_skb;
1363
1364 } else {
66e855f3 1365 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1366 DP(NETIF_MSG_RX_STATUS,
1367 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1368 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1369 }
1370
1371 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1372}
1373
1374static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1375 struct bnx2x_fastpath *fp,
1376 u16 bd_prod, u16 rx_comp_prod,
1377 u16 rx_sge_prod)
1378{
1379 struct tstorm_eth_rx_producers rx_prods = {0};
1380 int i;
1381
1382 /* Update producers */
1383 rx_prods.bd_prod = bd_prod;
1384 rx_prods.cqe_prod = rx_comp_prod;
1385 rx_prods.sge_prod = rx_sge_prod;
1386
58f4c4cf
EG
1387 /*
1388 * Make sure that the BD and SGE data is updated before updating the
1389 * producers since FW might read the BD/SGE right after the producer
1390 * is updated.
1391 * This is only applicable for weak-ordered memory model archs such
1392 * as IA-64. The following barrier is also mandatory since FW will
1393 * assumes BDs must have buffers.
1394 */
1395 wmb();
1396
7a9b2557
VZ
1397 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1398 REG_WR(bp, BAR_TSTRORM_INTMEM +
1399 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1400 ((u32 *)&rx_prods)[i]);
1401
58f4c4cf
EG
1402 mmiowb(); /* keep prod updates ordered */
1403
7a9b2557
VZ
1404 DP(NETIF_MSG_RX_STATUS,
1405 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1406 bd_prod, rx_comp_prod, rx_sge_prod);
1407}
1408
a2fbb9ea
ET
1409static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1410{
1411 struct bnx2x *bp = fp->bp;
34f80b04 1412 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1413 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1414 int rx_pkt = 0;
1415
1416#ifdef BNX2X_STOP_ON_ERROR
1417 if (unlikely(bp->panic))
1418 return 0;
1419#endif
1420
34f80b04
EG
1421 /* CQ "next element" is of the size of the regular element,
1422 that's why it's ok here */
a2fbb9ea
ET
1423 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1424 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1425 hw_comp_cons++;
1426
1427 bd_cons = fp->rx_bd_cons;
1428 bd_prod = fp->rx_bd_prod;
34f80b04 1429 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1430 sw_comp_cons = fp->rx_comp_cons;
1431 sw_comp_prod = fp->rx_comp_prod;
1432
1433 /* Memory barrier necessary as speculative reads of the rx
1434 * buffer can be ahead of the index in the status block
1435 */
1436 rmb();
1437
1438 DP(NETIF_MSG_RX_STATUS,
1439 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1440 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1441
1442 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1443 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1444 struct sk_buff *skb;
1445 union eth_rx_cqe *cqe;
34f80b04
EG
1446 u8 cqe_fp_flags;
1447 u16 len, pad;
a2fbb9ea
ET
1448
1449 comp_ring_cons = RCQ_BD(sw_comp_cons);
1450 bd_prod = RX_BD(bd_prod);
1451 bd_cons = RX_BD(bd_cons);
1452
1453 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1454 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1455
a2fbb9ea 1456 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1457 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1458 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1459 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1460 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1461 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1462
1463 /* is this a slowpath msg? */
34f80b04 1464 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1465 bnx2x_sp_event(fp, cqe);
1466 goto next_cqe;
1467
1468 /* this is an rx packet */
1469 } else {
1470 rx_buf = &fp->rx_buf_ring[bd_cons];
1471 skb = rx_buf->skb;
a2fbb9ea
ET
1472 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1473 pad = cqe->fast_path_cqe.placement_offset;
1474
7a9b2557
VZ
1475 /* If CQE is marked both TPA_START and TPA_END
1476 it is a non-TPA CQE */
1477 if ((!fp->disable_tpa) &&
1478 (TPA_TYPE(cqe_fp_flags) !=
1479 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1480 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1481
1482 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1483 DP(NETIF_MSG_RX_STATUS,
1484 "calling tpa_start on queue %d\n",
1485 queue);
1486
1487 bnx2x_tpa_start(fp, queue, skb,
1488 bd_cons, bd_prod);
1489 goto next_rx;
1490 }
1491
1492 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1493 DP(NETIF_MSG_RX_STATUS,
1494 "calling tpa_stop on queue %d\n",
1495 queue);
1496
1497 if (!BNX2X_RX_SUM_FIX(cqe))
1498 BNX2X_ERR("STOP on none TCP "
1499 "data\n");
1500
1501 /* This is a size of the linear data
1502 on this skb */
1503 len = le16_to_cpu(cqe->fast_path_cqe.
1504 len_on_bd);
1505 bnx2x_tpa_stop(bp, fp, queue, pad,
1506 len, cqe, comp_ring_cons);
1507#ifdef BNX2X_STOP_ON_ERROR
1508 if (bp->panic)
1509 return -EINVAL;
1510#endif
1511
1512 bnx2x_update_sge_prod(fp,
1513 &cqe->fast_path_cqe);
1514 goto next_cqe;
1515 }
1516 }
1517
a2fbb9ea
ET
1518 pci_dma_sync_single_for_device(bp->pdev,
1519 pci_unmap_addr(rx_buf, mapping),
1520 pad + RX_COPY_THRESH,
1521 PCI_DMA_FROMDEVICE);
1522 prefetch(skb);
1523 prefetch(((char *)(skb)) + 128);
1524
1525 /* is this an error packet? */
34f80b04 1526 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1527 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1528 "ERROR flags %x rx packet %u\n",
1529 cqe_fp_flags, sw_comp_cons);
66e855f3 1530 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1531 goto reuse_rx;
1532 }
1533
1534 /* Since we don't have a jumbo ring
1535 * copy small packets if mtu > 1500
1536 */
1537 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1538 (len <= RX_COPY_THRESH)) {
1539 struct sk_buff *new_skb;
1540
1541 new_skb = netdev_alloc_skb(bp->dev,
1542 len + pad);
1543 if (new_skb == NULL) {
1544 DP(NETIF_MSG_RX_ERR,
34f80b04 1545 "ERROR packet dropped "
a2fbb9ea 1546 "because of alloc failure\n");
66e855f3 1547 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1548 goto reuse_rx;
1549 }
1550
1551 /* aligned copy */
1552 skb_copy_from_linear_data_offset(skb, pad,
1553 new_skb->data + pad, len);
1554 skb_reserve(new_skb, pad);
1555 skb_put(new_skb, len);
1556
1557 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1558
1559 skb = new_skb;
1560
1561 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1562 pci_unmap_single(bp->pdev,
1563 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1564 bp->rx_buf_size,
a2fbb9ea
ET
1565 PCI_DMA_FROMDEVICE);
1566 skb_reserve(skb, pad);
1567 skb_put(skb, len);
1568
1569 } else {
1570 DP(NETIF_MSG_RX_ERR,
34f80b04 1571 "ERROR packet dropped because "
a2fbb9ea 1572 "of alloc failure\n");
66e855f3 1573 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1574reuse_rx:
1575 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1576 goto next_rx;
1577 }
1578
1579 skb->protocol = eth_type_trans(skb, bp->dev);
1580
1581 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1582 if (bp->rx_csum) {
1adcd8be
EG
1583 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1584 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1585 else
1586 bp->eth_stats.hw_csum_err++;
1587 }
a2fbb9ea
ET
1588 }
1589
1590#ifdef BCM_VLAN
0c6671b0 1591 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1592 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1593 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1594 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1595 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1596 else
1597#endif
34f80b04 1598 netif_receive_skb(skb);
a2fbb9ea 1599
a2fbb9ea
ET
1600
1601next_rx:
1602 rx_buf->skb = NULL;
1603
1604 bd_cons = NEXT_RX_IDX(bd_cons);
1605 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1606 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1607 rx_pkt++;
a2fbb9ea
ET
1608next_cqe:
1609 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1610 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1611
34f80b04 1612 if (rx_pkt == budget)
a2fbb9ea
ET
1613 break;
1614 } /* while */
1615
1616 fp->rx_bd_cons = bd_cons;
34f80b04 1617 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1618 fp->rx_comp_cons = sw_comp_cons;
1619 fp->rx_comp_prod = sw_comp_prod;
1620
7a9b2557
VZ
1621 /* Update producers */
1622 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1623 fp->rx_sge_prod);
a2fbb9ea
ET
1624
1625 fp->rx_pkt += rx_pkt;
1626 fp->rx_calls++;
1627
1628 return rx_pkt;
1629}
1630
1631static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1632{
1633 struct bnx2x_fastpath *fp = fp_cookie;
1634 struct bnx2x *bp = fp->bp;
34f80b04 1635 int index = FP_IDX(fp);
a2fbb9ea 1636
da5a662a
VZ
1637 /* Return here if interrupt is disabled */
1638 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1639 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1640 return IRQ_HANDLED;
1641 }
1642
34f80b04
EG
1643 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1644 index, FP_SB_ID(fp));
1645 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1646
1647#ifdef BNX2X_STOP_ON_ERROR
1648 if (unlikely(bp->panic))
1649 return IRQ_HANDLED;
1650#endif
1651
1652 prefetch(fp->rx_cons_sb);
1653 prefetch(fp->tx_cons_sb);
1654 prefetch(&fp->status_blk->c_status_block.status_block_index);
1655 prefetch(&fp->status_blk->u_status_block.status_block_index);
1656
908a7a16 1657 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1658
a2fbb9ea
ET
1659 return IRQ_HANDLED;
1660}
1661
1662static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1663{
1664 struct net_device *dev = dev_instance;
1665 struct bnx2x *bp = netdev_priv(dev);
1666 u16 status = bnx2x_ack_int(bp);
34f80b04 1667 u16 mask;
a2fbb9ea 1668
34f80b04 1669 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1670 if (unlikely(status == 0)) {
1671 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1672 return IRQ_NONE;
1673 }
34f80b04 1674 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1675
34f80b04 1676 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1677 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1678 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1679 return IRQ_HANDLED;
1680 }
1681
3196a88a
EG
1682#ifdef BNX2X_STOP_ON_ERROR
1683 if (unlikely(bp->panic))
1684 return IRQ_HANDLED;
1685#endif
1686
34f80b04
EG
1687 mask = 0x2 << bp->fp[0].sb_id;
1688 if (status & mask) {
a2fbb9ea
ET
1689 struct bnx2x_fastpath *fp = &bp->fp[0];
1690
1691 prefetch(fp->rx_cons_sb);
1692 prefetch(fp->tx_cons_sb);
1693 prefetch(&fp->status_blk->c_status_block.status_block_index);
1694 prefetch(&fp->status_blk->u_status_block.status_block_index);
1695
908a7a16 1696 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1697
34f80b04 1698 status &= ~mask;
a2fbb9ea
ET
1699 }
1700
a2fbb9ea 1701
34f80b04 1702 if (unlikely(status & 0x1)) {
1cf167f2 1703 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1704
1705 status &= ~0x1;
1706 if (!status)
1707 return IRQ_HANDLED;
1708 }
1709
34f80b04
EG
1710 if (status)
1711 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1712 status);
a2fbb9ea 1713
c18487ee 1714 return IRQ_HANDLED;
a2fbb9ea
ET
1715}
1716
c18487ee 1717/* end of fast path */
a2fbb9ea 1718
bb2a0f7a 1719static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1720
c18487ee
YR
1721/* Link */
1722
1723/*
1724 * General service functions
1725 */
a2fbb9ea 1726
4a37fb66 1727static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1728{
1729 u32 lock_status;
1730 u32 resource_bit = (1 << resource);
4a37fb66
YG
1731 int func = BP_FUNC(bp);
1732 u32 hw_lock_control_reg;
c18487ee 1733 int cnt;
a2fbb9ea 1734
c18487ee
YR
1735 /* Validating that the resource is within range */
1736 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1737 DP(NETIF_MSG_HW,
1738 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1739 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1740 return -EINVAL;
1741 }
a2fbb9ea 1742
4a37fb66
YG
1743 if (func <= 5) {
1744 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1745 } else {
1746 hw_lock_control_reg =
1747 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1748 }
1749
c18487ee 1750 /* Validating that the resource is not already taken */
4a37fb66 1751 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1752 if (lock_status & resource_bit) {
1753 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1754 lock_status, resource_bit);
1755 return -EEXIST;
1756 }
a2fbb9ea 1757
46230476
EG
1758 /* Try for 5 second every 5ms */
1759 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1760 /* Try to acquire the lock */
4a37fb66
YG
1761 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1762 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1763 if (lock_status & resource_bit)
1764 return 0;
a2fbb9ea 1765
c18487ee 1766 msleep(5);
a2fbb9ea 1767 }
c18487ee
YR
1768 DP(NETIF_MSG_HW, "Timeout\n");
1769 return -EAGAIN;
1770}
a2fbb9ea 1771
4a37fb66 1772static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1773{
1774 u32 lock_status;
1775 u32 resource_bit = (1 << resource);
4a37fb66
YG
1776 int func = BP_FUNC(bp);
1777 u32 hw_lock_control_reg;
a2fbb9ea 1778
c18487ee
YR
1779 /* Validating that the resource is within range */
1780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1781 DP(NETIF_MSG_HW,
1782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1784 return -EINVAL;
1785 }
1786
4a37fb66
YG
1787 if (func <= 5) {
1788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1789 } else {
1790 hw_lock_control_reg =
1791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1792 }
1793
c18487ee 1794 /* Validating that the resource is currently taken */
4a37fb66 1795 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1796 if (!(lock_status & resource_bit)) {
1797 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1798 lock_status, resource_bit);
1799 return -EFAULT;
a2fbb9ea
ET
1800 }
1801
4a37fb66 1802 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1803 return 0;
1804}
1805
1806/* HW Lock for shared dual port PHYs */
4a37fb66 1807static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1808{
1809 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1810
34f80b04 1811 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1812
c18487ee
YR
1813 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1814 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1815 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1816}
a2fbb9ea 1817
4a37fb66 1818static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1819{
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1821
c18487ee
YR
1822 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1823 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1824 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1825
34f80b04 1826 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1827}
a2fbb9ea 1828
17de50b7 1829int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1830{
1831 /* The GPIO should be swapped if swap register is set and active */
1832 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1833 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1834 int gpio_shift = gpio_num +
1835 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1836 u32 gpio_mask = (1 << gpio_shift);
1837 u32 gpio_reg;
a2fbb9ea 1838
c18487ee
YR
1839 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1840 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1841 return -EINVAL;
1842 }
a2fbb9ea 1843
4a37fb66 1844 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1845 /* read GPIO and mask except the float bits */
1846 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1847
c18487ee
YR
1848 switch (mode) {
1849 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1850 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1851 gpio_num, gpio_shift);
1852 /* clear FLOAT and set CLR */
1853 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1854 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1855 break;
a2fbb9ea 1856
c18487ee
YR
1857 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1858 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1859 gpio_num, gpio_shift);
1860 /* clear FLOAT and set SET */
1861 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1862 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1863 break;
a2fbb9ea 1864
17de50b7 1865 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1866 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1867 gpio_num, gpio_shift);
1868 /* set FLOAT */
1869 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1870 break;
a2fbb9ea 1871
c18487ee
YR
1872 default:
1873 break;
a2fbb9ea
ET
1874 }
1875
c18487ee 1876 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1877 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1878
c18487ee 1879 return 0;
a2fbb9ea
ET
1880}
1881
c18487ee 1882static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1883{
c18487ee
YR
1884 u32 spio_mask = (1 << spio_num);
1885 u32 spio_reg;
a2fbb9ea 1886
c18487ee
YR
1887 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1888 (spio_num > MISC_REGISTERS_SPIO_7)) {
1889 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1890 return -EINVAL;
a2fbb9ea
ET
1891 }
1892
4a37fb66 1893 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1894 /* read SPIO and mask except the float bits */
1895 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1896
c18487ee 1897 switch (mode) {
6378c025 1898 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1899 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1900 /* clear FLOAT and set CLR */
1901 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1902 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1903 break;
a2fbb9ea 1904
6378c025 1905 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1906 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1907 /* clear FLOAT and set SET */
1908 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1909 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1910 break;
a2fbb9ea 1911
c18487ee
YR
1912 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1913 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1914 /* set FLOAT */
1915 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1916 break;
a2fbb9ea 1917
c18487ee
YR
1918 default:
1919 break;
a2fbb9ea
ET
1920 }
1921
c18487ee 1922 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1923 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1924
a2fbb9ea
ET
1925 return 0;
1926}
1927
c18487ee 1928static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1929{
ad33ea3a
EG
1930 switch (bp->link_vars.ieee_fc &
1931 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1932 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1933 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1934 ADVERTISED_Pause);
1935 break;
1936 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1937 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1938 ADVERTISED_Pause);
1939 break;
1940 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1941 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1942 break;
1943 default:
34f80b04 1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1945 ADVERTISED_Pause);
1946 break;
1947 }
1948}
f1410647 1949
c18487ee
YR
1950static void bnx2x_link_report(struct bnx2x *bp)
1951{
1952 if (bp->link_vars.link_up) {
1953 if (bp->state == BNX2X_STATE_OPEN)
1954 netif_carrier_on(bp->dev);
1955 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1956
c18487ee 1957 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1958
c18487ee
YR
1959 if (bp->link_vars.duplex == DUPLEX_FULL)
1960 printk("full duplex");
1961 else
1962 printk("half duplex");
f1410647 1963
c0700f90
DM
1964 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1965 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1966 printk(", receive ");
c0700f90 1967 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1968 printk("& transmit ");
1969 } else {
1970 printk(", transmit ");
1971 }
1972 printk("flow control ON");
1973 }
1974 printk("\n");
f1410647 1975
c18487ee
YR
1976 } else { /* link_down */
1977 netif_carrier_off(bp->dev);
1978 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1979 }
c18487ee
YR
1980}
1981
1982static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1983{
19680c48
EG
1984 if (!BP_NOMCP(bp)) {
1985 u8 rc;
a2fbb9ea 1986
19680c48 1987 /* Initialize link parameters structure variables */
8c99e7b0
YR
1988 /* It is recommended to turn off RX FC for jumbo frames
1989 for better performance */
1990 if (IS_E1HMF(bp))
c0700f90 1991 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 1992 else if (bp->dev->mtu > 5000)
c0700f90 1993 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1994 else
c0700f90 1995 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1996
4a37fb66 1997 bnx2x_acquire_phy_lock(bp);
19680c48 1998 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1999 bnx2x_release_phy_lock(bp);
a2fbb9ea 2000
3c96c68b
EG
2001 bnx2x_calc_fc_adv(bp);
2002
19680c48
EG
2003 if (bp->link_vars.link_up)
2004 bnx2x_link_report(bp);
a2fbb9ea 2005
34f80b04 2006
19680c48
EG
2007 return rc;
2008 }
2009 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2010 return -EINVAL;
a2fbb9ea
ET
2011}
2012
c18487ee 2013static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2014{
19680c48 2015 if (!BP_NOMCP(bp)) {
4a37fb66 2016 bnx2x_acquire_phy_lock(bp);
19680c48 2017 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2018 bnx2x_release_phy_lock(bp);
a2fbb9ea 2019
19680c48
EG
2020 bnx2x_calc_fc_adv(bp);
2021 } else
2022 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2023}
a2fbb9ea 2024
c18487ee
YR
2025static void bnx2x__link_reset(struct bnx2x *bp)
2026{
19680c48 2027 if (!BP_NOMCP(bp)) {
4a37fb66 2028 bnx2x_acquire_phy_lock(bp);
19680c48 2029 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2030 bnx2x_release_phy_lock(bp);
19680c48
EG
2031 } else
2032 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2033}
a2fbb9ea 2034
c18487ee
YR
2035static u8 bnx2x_link_test(struct bnx2x *bp)
2036{
2037 u8 rc;
a2fbb9ea 2038
4a37fb66 2039 bnx2x_acquire_phy_lock(bp);
c18487ee 2040 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2041 bnx2x_release_phy_lock(bp);
a2fbb9ea 2042
c18487ee
YR
2043 return rc;
2044}
a2fbb9ea 2045
34f80b04
EG
2046/* Calculates the sum of vn_min_rates.
2047 It's needed for further normalizing of the min_rates.
2048
2049 Returns:
2050 sum of vn_min_rates
2051 or
2052 0 - if all the min_rates are 0.
33471629 2053 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2054 If not all min_rates are zero then those that are zeroes will
2055 be set to 1.
2056 */
2057static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2058{
2059 int i, port = BP_PORT(bp);
2060 u32 wsum = 0;
2061 int all_zero = 1;
2062
2063 for (i = 0; i < E1HVN_MAX; i++) {
2064 u32 vn_cfg =
2065 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2066 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2067 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2068 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2069 /* If min rate is zero - set it to 1 */
2070 if (!vn_min_rate)
2071 vn_min_rate = DEF_MIN_RATE;
2072 else
2073 all_zero = 0;
2074
2075 wsum += vn_min_rate;
2076 }
2077 }
2078
2079 /* ... only if all min rates are zeros - disable FAIRNESS */
2080 if (all_zero)
2081 return 0;
2082
2083 return wsum;
2084}
2085
2086static void bnx2x_init_port_minmax(struct bnx2x *bp,
2087 int en_fness,
2088 u16 port_rate,
2089 struct cmng_struct_per_port *m_cmng_port)
2090{
2091 u32 r_param = port_rate / 8;
2092 int port = BP_PORT(bp);
2093 int i;
2094
2095 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2096
2097 /* Enable minmax only if we are in e1hmf mode */
2098 if (IS_E1HMF(bp)) {
2099 u32 fair_periodic_timeout_usec;
2100 u32 t_fair;
2101
2102 /* Enable rate shaping and fairness */
2103 m_cmng_port->flags.cmng_vn_enable = 1;
2104 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2105 m_cmng_port->flags.rate_shaping_enable = 1;
2106
2107 if (!en_fness)
2108 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2109 " fairness will be disabled\n");
2110
2111 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2112 m_cmng_port->rs_vars.rs_periodic_timeout =
2113 RS_PERIODIC_TIMEOUT_USEC / 4;
2114
2115 /* this is the threshold below which no timer arming will occur
2116 1.25 coefficient is for the threshold to be a little bigger
2117 than the real time, to compensate for timer in-accuracy */
2118 m_cmng_port->rs_vars.rs_threshold =
2119 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2120
2121 /* resolution of fairness timer */
2122 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2123 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2124 t_fair = T_FAIR_COEF / port_rate;
2125
2126 /* this is the threshold below which we won't arm
2127 the timer anymore */
2128 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2129
2130 /* we multiply by 1e3/8 to get bytes/msec.
2131 We don't want the credits to pass a credit
2132 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2133 m_cmng_port->fair_vars.upper_bound =
2134 r_param * t_fair * FAIR_MEM;
2135 /* since each tick is 4 usec */
2136 m_cmng_port->fair_vars.fairness_timeout =
2137 fair_periodic_timeout_usec / 4;
2138
2139 } else {
2140 /* Disable rate shaping and fairness */
2141 m_cmng_port->flags.cmng_vn_enable = 0;
2142 m_cmng_port->flags.fairness_enable = 0;
2143 m_cmng_port->flags.rate_shaping_enable = 0;
2144
2145 DP(NETIF_MSG_IFUP,
2146 "Single function mode minmax will be disabled\n");
2147 }
2148
2149 /* Store it to internal memory */
2150 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2151 REG_WR(bp, BAR_XSTRORM_INTMEM +
2152 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2153 ((u32 *)(m_cmng_port))[i]);
2154}
2155
2156static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2157 u32 wsum, u16 port_rate,
2158 struct cmng_struct_per_port *m_cmng_port)
2159{
2160 struct rate_shaping_vars_per_vn m_rs_vn;
2161 struct fairness_vars_per_vn m_fair_vn;
2162 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2163 u16 vn_min_rate, vn_max_rate;
2164 int i;
2165
2166 /* If function is hidden - set min and max to zeroes */
2167 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2168 vn_min_rate = 0;
2169 vn_max_rate = 0;
2170
2171 } else {
2172 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2173 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2174 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2175 if current min rate is zero - set it to 1.
33471629 2176 This is a requirement of the algorithm. */
34f80b04
EG
2177 if ((vn_min_rate == 0) && wsum)
2178 vn_min_rate = DEF_MIN_RATE;
2179 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2180 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2181 }
2182
2183 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2184 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2185
2186 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2187 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2188
2189 /* global vn counter - maximal Mbps for this vn */
2190 m_rs_vn.vn_counter.rate = vn_max_rate;
2191
2192 /* quota - number of bytes transmitted in this period */
2193 m_rs_vn.vn_counter.quota =
2194 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2195
2196#ifdef BNX2X_PER_PROT_QOS
2197 /* per protocol counter */
2198 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2199 /* maximal Mbps for this protocol */
2200 m_rs_vn.protocol_counters[protocol].rate =
2201 protocol_max_rate[protocol];
2202 /* the quota in each timer period -
2203 number of bytes transmitted in this period */
2204 m_rs_vn.protocol_counters[protocol].quota =
2205 (u32)(rs_periodic_timeout_usec *
2206 ((double)m_rs_vn.
2207 protocol_counters[protocol].rate/8));
2208 }
2209#endif
2210
2211 if (wsum) {
2212 /* credit for each period of the fairness algorithm:
2213 number of bytes in T_FAIR (the vn share the port rate).
2214 wsum should not be larger than 10000, thus
2215 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2216 m_fair_vn.vn_credit_delta =
2217 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2218 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2219 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2220 m_fair_vn.vn_credit_delta);
2221 }
2222
2223#ifdef BNX2X_PER_PROT_QOS
2224 do {
2225 u32 protocolWeightSum = 0;
2226
2227 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2228 protocolWeightSum +=
2229 drvInit.protocol_min_rate[protocol];
2230 /* per protocol counter -
2231 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2232 if (protocolWeightSum > 0) {
2233 for (protocol = 0;
2234 protocol < NUM_OF_PROTOCOLS; protocol++)
2235 /* credit for each period of the
2236 fairness algorithm - number of bytes in
2237 T_FAIR (the protocol share the vn rate) */
2238 m_fair_vn.protocol_credit_delta[protocol] =
2239 (u32)((vn_min_rate / 8) * t_fair *
2240 protocol_min_rate / protocolWeightSum);
2241 }
2242 } while (0);
2243#endif
2244
2245 /* Store it to internal memory */
2246 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2247 REG_WR(bp, BAR_XSTRORM_INTMEM +
2248 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2249 ((u32 *)(&m_rs_vn))[i]);
2250
2251 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2252 REG_WR(bp, BAR_XSTRORM_INTMEM +
2253 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2254 ((u32 *)(&m_fair_vn))[i]);
2255}
2256
c18487ee
YR
2257/* This function is called upon link interrupt */
2258static void bnx2x_link_attn(struct bnx2x *bp)
2259{
34f80b04
EG
2260 int vn;
2261
bb2a0f7a
YG
2262 /* Make sure that we are synced with the current statistics */
2263 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2264
c18487ee 2265 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2266
bb2a0f7a
YG
2267 if (bp->link_vars.link_up) {
2268
2269 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2270 struct host_port_stats *pstats;
2271
2272 pstats = bnx2x_sp(bp, port_stats);
2273 /* reset old bmac stats */
2274 memset(&(pstats->mac_stx[0]), 0,
2275 sizeof(struct mac_stx));
2276 }
2277 if ((bp->state == BNX2X_STATE_OPEN) ||
2278 (bp->state == BNX2X_STATE_DISABLED))
2279 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2280 }
2281
c18487ee
YR
2282 /* indicate link status */
2283 bnx2x_link_report(bp);
34f80b04
EG
2284
2285 if (IS_E1HMF(bp)) {
2286 int func;
2287
2288 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2289 if (vn == BP_E1HVN(bp))
2290 continue;
2291
2292 func = ((vn << 1) | BP_PORT(bp));
2293
2294 /* Set the attention towards other drivers
2295 on the same port */
2296 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2297 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2298 }
2299 }
2300
2301 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2302 struct cmng_struct_per_port m_cmng_port;
2303 u32 wsum;
2304 int port = BP_PORT(bp);
2305
2306 /* Init RATE SHAPING and FAIRNESS contexts */
2307 wsum = bnx2x_calc_vn_wsum(bp);
2308 bnx2x_init_port_minmax(bp, (int)wsum,
2309 bp->link_vars.line_speed,
2310 &m_cmng_port);
2311 if (IS_E1HMF(bp))
2312 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2313 bnx2x_init_vn_minmax(bp, 2*vn + port,
2314 wsum, bp->link_vars.line_speed,
2315 &m_cmng_port);
2316 }
c18487ee 2317}
a2fbb9ea 2318
c18487ee
YR
2319static void bnx2x__link_status_update(struct bnx2x *bp)
2320{
2321 if (bp->state != BNX2X_STATE_OPEN)
2322 return;
a2fbb9ea 2323
c18487ee 2324 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2325
bb2a0f7a
YG
2326 if (bp->link_vars.link_up)
2327 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2328 else
2329 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2330
c18487ee
YR
2331 /* indicate link status */
2332 bnx2x_link_report(bp);
a2fbb9ea 2333}
a2fbb9ea 2334
34f80b04
EG
2335static void bnx2x_pmf_update(struct bnx2x *bp)
2336{
2337 int port = BP_PORT(bp);
2338 u32 val;
2339
2340 bp->port.pmf = 1;
2341 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2342
2343 /* enable nig attention */
2344 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2345 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2346 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2347
2348 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2349}
2350
c18487ee 2351/* end of Link */
a2fbb9ea
ET
2352
2353/* slow path */
2354
2355/*
2356 * General service functions
2357 */
2358
2359/* the slow path queue is odd since completions arrive on the fastpath ring */
2360static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2361 u32 data_hi, u32 data_lo, int common)
2362{
34f80b04 2363 int func = BP_FUNC(bp);
a2fbb9ea 2364
34f80b04
EG
2365 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2366 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2367 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2368 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2369 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2370
2371#ifdef BNX2X_STOP_ON_ERROR
2372 if (unlikely(bp->panic))
2373 return -EIO;
2374#endif
2375
34f80b04 2376 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2377
2378 if (!bp->spq_left) {
2379 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2380 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2381 bnx2x_panic();
2382 return -EBUSY;
2383 }
f1410647 2384
a2fbb9ea
ET
2385 /* CID needs port number to be encoded int it */
2386 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2387 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2388 HW_CID(bp, cid)));
2389 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2390 if (common)
2391 bp->spq_prod_bd->hdr.type |=
2392 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2393
2394 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2395 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2396
2397 bp->spq_left--;
2398
2399 if (bp->spq_prod_bd == bp->spq_last_bd) {
2400 bp->spq_prod_bd = bp->spq;
2401 bp->spq_prod_idx = 0;
2402 DP(NETIF_MSG_TIMER, "end of spq\n");
2403
2404 } else {
2405 bp->spq_prod_bd++;
2406 bp->spq_prod_idx++;
2407 }
2408
34f80b04 2409 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2410 bp->spq_prod_idx);
2411
34f80b04 2412 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2413 return 0;
2414}
2415
2416/* acquire split MCP access lock register */
4a37fb66 2417static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2418{
a2fbb9ea 2419 u32 i, j, val;
34f80b04 2420 int rc = 0;
a2fbb9ea
ET
2421
2422 might_sleep();
2423 i = 100;
2424 for (j = 0; j < i*10; j++) {
2425 val = (1UL << 31);
2426 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2427 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2428 if (val & (1L << 31))
2429 break;
2430
2431 msleep(5);
2432 }
a2fbb9ea 2433 if (!(val & (1L << 31))) {
19680c48 2434 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2435 rc = -EBUSY;
2436 }
2437
2438 return rc;
2439}
2440
4a37fb66
YG
2441/* release split MCP access lock register */
2442static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2443{
2444 u32 val = 0;
2445
2446 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2447}
2448
2449static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2450{
2451 struct host_def_status_block *def_sb = bp->def_status_blk;
2452 u16 rc = 0;
2453
2454 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2455 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2456 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2457 rc |= 1;
2458 }
2459 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2460 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2461 rc |= 2;
2462 }
2463 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2464 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2465 rc |= 4;
2466 }
2467 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2468 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2469 rc |= 8;
2470 }
2471 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2472 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2473 rc |= 16;
2474 }
2475 return rc;
2476}
2477
2478/*
2479 * slow path service functions
2480 */
2481
2482static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2483{
34f80b04 2484 int port = BP_PORT(bp);
5c862848
EG
2485 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2486 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2487 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2488 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2489 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2490 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2491 u32 aeu_mask;
a2fbb9ea 2492
a2fbb9ea
ET
2493 if (bp->attn_state & asserted)
2494 BNX2X_ERR("IGU ERROR\n");
2495
3fcaf2e5
EG
2496 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2497 aeu_mask = REG_RD(bp, aeu_addr);
2498
a2fbb9ea 2499 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2500 aeu_mask, asserted);
2501 aeu_mask &= ~(asserted & 0xff);
2502 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2503
3fcaf2e5
EG
2504 REG_WR(bp, aeu_addr, aeu_mask);
2505 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2506
3fcaf2e5 2507 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2508 bp->attn_state |= asserted;
3fcaf2e5 2509 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2510
2511 if (asserted & ATTN_HARD_WIRED_MASK) {
2512 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2513
a5e9a7cf
EG
2514 bnx2x_acquire_phy_lock(bp);
2515
877e9aa4
ET
2516 /* save nig interrupt mask */
2517 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2518 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2519
c18487ee 2520 bnx2x_link_attn(bp);
a2fbb9ea
ET
2521
2522 /* handle unicore attn? */
2523 }
2524 if (asserted & ATTN_SW_TIMER_4_FUNC)
2525 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2526
2527 if (asserted & GPIO_2_FUNC)
2528 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2529
2530 if (asserted & GPIO_3_FUNC)
2531 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2532
2533 if (asserted & GPIO_4_FUNC)
2534 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2535
2536 if (port == 0) {
2537 if (asserted & ATTN_GENERAL_ATTN_1) {
2538 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2539 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2540 }
2541 if (asserted & ATTN_GENERAL_ATTN_2) {
2542 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2543 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2544 }
2545 if (asserted & ATTN_GENERAL_ATTN_3) {
2546 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2547 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2548 }
2549 } else {
2550 if (asserted & ATTN_GENERAL_ATTN_4) {
2551 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2552 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2553 }
2554 if (asserted & ATTN_GENERAL_ATTN_5) {
2555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2557 }
2558 if (asserted & ATTN_GENERAL_ATTN_6) {
2559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2561 }
2562 }
2563
2564 } /* if hardwired */
2565
5c862848
EG
2566 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2567 asserted, hc_addr);
2568 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2569
2570 /* now set back the mask */
a5e9a7cf 2571 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2572 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2573 bnx2x_release_phy_lock(bp);
2574 }
a2fbb9ea
ET
2575}
2576
877e9aa4 2577static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2578{
34f80b04 2579 int port = BP_PORT(bp);
877e9aa4
ET
2580 int reg_offset;
2581 u32 val;
2582
34f80b04
EG
2583 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2584 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2585
34f80b04 2586 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2587
2588 val = REG_RD(bp, reg_offset);
2589 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2590 REG_WR(bp, reg_offset, val);
2591
2592 BNX2X_ERR("SPIO5 hw attention\n");
2593
34f80b04 2594 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2595 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2596 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2597 /* Fan failure attention */
2598
17de50b7 2599 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2600 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2601 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2602 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2603 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2604 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2605 /* mark the failure */
c18487ee 2606 bp->link_params.ext_phy_config &=
877e9aa4 2607 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2608 bp->link_params.ext_phy_config |=
877e9aa4
ET
2609 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2610 SHMEM_WR(bp,
2611 dev_info.port_hw_config[port].
2612 external_phy_config,
c18487ee 2613 bp->link_params.ext_phy_config);
877e9aa4
ET
2614 /* log the failure */
2615 printk(KERN_ERR PFX "Fan Failure on Network"
2616 " Controller %s has caused the driver to"
2617 " shutdown the card to prevent permanent"
2618 " damage. Please contact Dell Support for"
2619 " assistance\n", bp->dev->name);
2620 break;
2621
2622 default:
2623 break;
2624 }
2625 }
34f80b04
EG
2626
2627 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2628
2629 val = REG_RD(bp, reg_offset);
2630 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2631 REG_WR(bp, reg_offset, val);
2632
2633 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2634 (attn & HW_INTERRUT_ASSERT_SET_0));
2635 bnx2x_panic();
2636 }
877e9aa4
ET
2637}
2638
2639static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2640{
2641 u32 val;
2642
2643 if (attn & BNX2X_DOORQ_ASSERT) {
2644
2645 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2646 BNX2X_ERR("DB hw attention 0x%x\n", val);
2647 /* DORQ discard attention */
2648 if (val & 0x2)
2649 BNX2X_ERR("FATAL error from DORQ\n");
2650 }
34f80b04
EG
2651
2652 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2653
2654 int port = BP_PORT(bp);
2655 int reg_offset;
2656
2657 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2658 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2659
2660 val = REG_RD(bp, reg_offset);
2661 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2662 REG_WR(bp, reg_offset, val);
2663
2664 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2665 (attn & HW_INTERRUT_ASSERT_SET_1));
2666 bnx2x_panic();
2667 }
877e9aa4
ET
2668}
2669
2670static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2671{
2672 u32 val;
2673
2674 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2675
2676 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2677 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2678 /* CFC error attention */
2679 if (val & 0x2)
2680 BNX2X_ERR("FATAL error from CFC\n");
2681 }
2682
2683 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2684
2685 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2686 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2687 /* RQ_USDMDP_FIFO_OVERFLOW */
2688 if (val & 0x18000)
2689 BNX2X_ERR("FATAL error from PXP\n");
2690 }
34f80b04
EG
2691
2692 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2693
2694 int port = BP_PORT(bp);
2695 int reg_offset;
2696
2697 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2698 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2699
2700 val = REG_RD(bp, reg_offset);
2701 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2702 REG_WR(bp, reg_offset, val);
2703
2704 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2705 (attn & HW_INTERRUT_ASSERT_SET_2));
2706 bnx2x_panic();
2707 }
877e9aa4
ET
2708}
2709
2710static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2711{
34f80b04
EG
2712 u32 val;
2713
877e9aa4
ET
2714 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2715
34f80b04
EG
2716 if (attn & BNX2X_PMF_LINK_ASSERT) {
2717 int func = BP_FUNC(bp);
2718
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2720 bnx2x__link_status_update(bp);
2721 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2722 DRV_STATUS_PMF)
2723 bnx2x_pmf_update(bp);
2724
2725 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2726
2727 BNX2X_ERR("MC assert!\n");
2728 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2729 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2731 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2732 bnx2x_panic();
2733
2734 } else if (attn & BNX2X_MCP_ASSERT) {
2735
2736 BNX2X_ERR("MCP assert!\n");
2737 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2738 bnx2x_fw_dump(bp);
877e9aa4
ET
2739
2740 } else
2741 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2742 }
2743
2744 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2745 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2746 if (attn & BNX2X_GRC_TIMEOUT) {
2747 val = CHIP_IS_E1H(bp) ?
2748 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2749 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2750 }
2751 if (attn & BNX2X_GRC_RSV) {
2752 val = CHIP_IS_E1H(bp) ?
2753 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2754 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2755 }
877e9aa4 2756 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2757 }
2758}
2759
2760static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2761{
a2fbb9ea
ET
2762 struct attn_route attn;
2763 struct attn_route group_mask;
34f80b04 2764 int port = BP_PORT(bp);
877e9aa4 2765 int index;
a2fbb9ea
ET
2766 u32 reg_addr;
2767 u32 val;
3fcaf2e5 2768 u32 aeu_mask;
a2fbb9ea
ET
2769
2770 /* need to take HW lock because MCP or other port might also
2771 try to handle this event */
4a37fb66 2772 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2773
2774 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2775 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2776 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2777 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2778 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2779 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2780
2781 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2782 if (deasserted & (1 << index)) {
2783 group_mask = bp->attn_group[index];
2784
34f80b04
EG
2785 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2786 index, group_mask.sig[0], group_mask.sig[1],
2787 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2788
877e9aa4
ET
2789 bnx2x_attn_int_deasserted3(bp,
2790 attn.sig[3] & group_mask.sig[3]);
2791 bnx2x_attn_int_deasserted1(bp,
2792 attn.sig[1] & group_mask.sig[1]);
2793 bnx2x_attn_int_deasserted2(bp,
2794 attn.sig[2] & group_mask.sig[2]);
2795 bnx2x_attn_int_deasserted0(bp,
2796 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2797
a2fbb9ea
ET
2798 if ((attn.sig[0] & group_mask.sig[0] &
2799 HW_PRTY_ASSERT_SET_0) ||
2800 (attn.sig[1] & group_mask.sig[1] &
2801 HW_PRTY_ASSERT_SET_1) ||
2802 (attn.sig[2] & group_mask.sig[2] &
2803 HW_PRTY_ASSERT_SET_2))
6378c025 2804 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2805 }
2806 }
2807
4a37fb66 2808 bnx2x_release_alr(bp);
a2fbb9ea 2809
5c862848 2810 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2811
2812 val = ~deasserted;
3fcaf2e5
EG
2813 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2814 val, reg_addr);
5c862848 2815 REG_WR(bp, reg_addr, val);
a2fbb9ea 2816
a2fbb9ea 2817 if (~bp->attn_state & deasserted)
3fcaf2e5 2818 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2819
2820 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2821 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2822
3fcaf2e5
EG
2823 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2824 aeu_mask = REG_RD(bp, reg_addr);
2825
2826 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2827 aeu_mask, deasserted);
2828 aeu_mask |= (deasserted & 0xff);
2829 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2830
3fcaf2e5
EG
2831 REG_WR(bp, reg_addr, aeu_mask);
2832 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2833
2834 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2835 bp->attn_state &= ~deasserted;
2836 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2837}
2838
2839static void bnx2x_attn_int(struct bnx2x *bp)
2840{
2841 /* read local copy of bits */
68d59484
EG
2842 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2843 attn_bits);
2844 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2845 attn_bits_ack);
a2fbb9ea
ET
2846 u32 attn_state = bp->attn_state;
2847
2848 /* look for changed bits */
2849 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2850 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2851
2852 DP(NETIF_MSG_HW,
2853 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2854 attn_bits, attn_ack, asserted, deasserted);
2855
2856 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2857 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2858
2859 /* handle bits that were raised */
2860 if (asserted)
2861 bnx2x_attn_int_asserted(bp, asserted);
2862
2863 if (deasserted)
2864 bnx2x_attn_int_deasserted(bp, deasserted);
2865}
2866
2867static void bnx2x_sp_task(struct work_struct *work)
2868{
1cf167f2 2869 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2870 u16 status;
2871
34f80b04 2872
a2fbb9ea
ET
2873 /* Return here if interrupt is disabled */
2874 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2875 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2876 return;
2877 }
2878
2879 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2880/* if (status == 0) */
2881/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2882
3196a88a 2883 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2884
877e9aa4
ET
2885 /* HW attentions */
2886 if (status & 0x1)
a2fbb9ea 2887 bnx2x_attn_int(bp);
a2fbb9ea 2888
bb2a0f7a
YG
2889 /* CStorm events: query_stats, port delete ramrod */
2890 if (status & 0x2)
2891 bp->stats_pending = 0;
2892
68d59484 2893 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2894 IGU_INT_NOP, 1);
2895 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2896 IGU_INT_NOP, 1);
2897 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2898 IGU_INT_NOP, 1);
2899 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2900 IGU_INT_NOP, 1);
2901 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2902 IGU_INT_ENABLE, 1);
877e9aa4 2903
a2fbb9ea
ET
2904}
2905
2906static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2907{
2908 struct net_device *dev = dev_instance;
2909 struct bnx2x *bp = netdev_priv(dev);
2910
2911 /* Return here if interrupt is disabled */
2912 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2913 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2914 return IRQ_HANDLED;
2915 }
2916
877e9aa4 2917 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2918
2919#ifdef BNX2X_STOP_ON_ERROR
2920 if (unlikely(bp->panic))
2921 return IRQ_HANDLED;
2922#endif
2923
1cf167f2 2924 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2925
2926 return IRQ_HANDLED;
2927}
2928
2929/* end of slow path */
2930
2931/* Statistics */
2932
2933/****************************************************************************
2934* Macros
2935****************************************************************************/
2936
a2fbb9ea
ET
2937/* sum[hi:lo] += add[hi:lo] */
2938#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2939 do { \
2940 s_lo += a_lo; \
f5ba6772 2941 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2942 } while (0)
2943
2944/* difference = minuend - subtrahend */
2945#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2946 do { \
bb2a0f7a
YG
2947 if (m_lo < s_lo) { \
2948 /* underflow */ \
a2fbb9ea 2949 d_hi = m_hi - s_hi; \
bb2a0f7a 2950 if (d_hi > 0) { \
6378c025 2951 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2952 d_hi--; \
2953 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2954 } else { \
6378c025 2955 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2956 d_hi = 0; \
2957 d_lo = 0; \
2958 } \
bb2a0f7a
YG
2959 } else { \
2960 /* m_lo >= s_lo */ \
a2fbb9ea 2961 if (m_hi < s_hi) { \
bb2a0f7a
YG
2962 d_hi = 0; \
2963 d_lo = 0; \
2964 } else { \
6378c025 2965 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2966 d_hi = m_hi - s_hi; \
2967 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2968 } \
2969 } \
2970 } while (0)
2971
bb2a0f7a 2972#define UPDATE_STAT64(s, t) \
a2fbb9ea 2973 do { \
bb2a0f7a
YG
2974 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2975 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2976 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2977 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2978 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2979 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2980 } while (0)
2981
bb2a0f7a 2982#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2983 do { \
bb2a0f7a
YG
2984 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2985 diff.lo, new->s##_lo, old->s##_lo); \
2986 ADD_64(estats->t##_hi, diff.hi, \
2987 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2988 } while (0)
2989
2990/* sum[hi:lo] += add */
2991#define ADD_EXTEND_64(s_hi, s_lo, a) \
2992 do { \
2993 s_lo += a; \
2994 s_hi += (s_lo < a) ? 1 : 0; \
2995 } while (0)
2996
bb2a0f7a 2997#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2998 do { \
bb2a0f7a
YG
2999 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3000 pstats->mac_stx[1].s##_lo, \
3001 new->s); \
a2fbb9ea
ET
3002 } while (0)
3003
bb2a0f7a 3004#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
3005 do { \
3006 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3007 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
3008 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3009 } while (0)
3010
3011#define UPDATE_EXTEND_XSTAT(s, t) \
3012 do { \
3013 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3014 old_xclient->s = le32_to_cpu(xclient->s); \
3015 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
3016 } while (0)
3017
3018/*
3019 * General service functions
3020 */
3021
3022static inline long bnx2x_hilo(u32 *hiref)
3023{
3024 u32 lo = *(hiref + 1);
3025#if (BITS_PER_LONG == 64)
3026 u32 hi = *hiref;
3027
3028 return HILO_U64(hi, lo);
3029#else
3030 return lo;
3031#endif
3032}
3033
3034/*
3035 * Init service functions
3036 */
3037
bb2a0f7a
YG
3038static void bnx2x_storm_stats_post(struct bnx2x *bp)
3039{
3040 if (!bp->stats_pending) {
3041 struct eth_query_ramrod_data ramrod_data = {0};
3042 int rc;
3043
3044 ramrod_data.drv_counter = bp->stats_counter++;
3045 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3046 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3047
3048 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3049 ((u32 *)&ramrod_data)[1],
3050 ((u32 *)&ramrod_data)[0], 0);
3051 if (rc == 0) {
3052 /* stats ramrod has it's own slot on the spq */
3053 bp->spq_left++;
3054 bp->stats_pending = 1;
3055 }
3056 }
3057}
3058
3059static void bnx2x_stats_init(struct bnx2x *bp)
3060{
3061 int port = BP_PORT(bp);
3062
3063 bp->executer_idx = 0;
3064 bp->stats_counter = 0;
3065
3066 /* port stats */
3067 if (!BP_NOMCP(bp))
3068 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3069 else
3070 bp->port.port_stx = 0;
3071 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3072
3073 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3074 bp->port.old_nig_stats.brb_discard =
3075 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3076 bp->port.old_nig_stats.brb_truncate =
3077 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3078 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3079 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3080 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3081 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3082
3083 /* function stats */
3084 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3085 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3086 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3087 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3088
3089 bp->stats_state = STATS_STATE_DISABLED;
3090 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3091 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3092}
3093
3094static void bnx2x_hw_stats_post(struct bnx2x *bp)
3095{
3096 struct dmae_command *dmae = &bp->stats_dmae;
3097 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098
3099 *stats_comp = DMAE_COMP_VAL;
3100
3101 /* loader */
3102 if (bp->executer_idx) {
3103 int loader_idx = PMF_DMAE_C(bp);
3104
3105 memset(dmae, 0, sizeof(struct dmae_command));
3106
3107 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3108 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3109 DMAE_CMD_DST_RESET |
3110#ifdef __BIG_ENDIAN
3111 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3112#else
3113 DMAE_CMD_ENDIANITY_DW_SWAP |
3114#endif
3115 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3116 DMAE_CMD_PORT_0) |
3117 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3118 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3119 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3120 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3121 sizeof(struct dmae_command) *
3122 (loader_idx + 1)) >> 2;
3123 dmae->dst_addr_hi = 0;
3124 dmae->len = sizeof(struct dmae_command) >> 2;
3125 if (CHIP_IS_E1(bp))
3126 dmae->len--;
3127 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3128 dmae->comp_addr_hi = 0;
3129 dmae->comp_val = 1;
3130
3131 *stats_comp = 0;
3132 bnx2x_post_dmae(bp, dmae, loader_idx);
3133
3134 } else if (bp->func_stx) {
3135 *stats_comp = 0;
3136 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3137 }
3138}
3139
3140static int bnx2x_stats_comp(struct bnx2x *bp)
3141{
3142 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3143 int cnt = 10;
3144
3145 might_sleep();
3146 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3147 if (!cnt) {
3148 BNX2X_ERR("timeout waiting for stats finished\n");
3149 break;
3150 }
3151 cnt--;
12469401 3152 msleep(1);
bb2a0f7a
YG
3153 }
3154 return 1;
3155}
3156
3157/*
3158 * Statistics service functions
3159 */
3160
3161static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3162{
3163 struct dmae_command *dmae;
3164 u32 opcode;
3165 int loader_idx = PMF_DMAE_C(bp);
3166 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3167
3168 /* sanity */
3169 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3170 BNX2X_ERR("BUG!\n");
3171 return;
3172 }
3173
3174 bp->executer_idx = 0;
3175
3176 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3177 DMAE_CMD_C_ENABLE |
3178 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3179#ifdef __BIG_ENDIAN
3180 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3181#else
3182 DMAE_CMD_ENDIANITY_DW_SWAP |
3183#endif
3184 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3185 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3186
3187 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3188 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3189 dmae->src_addr_lo = bp->port.port_stx >> 2;
3190 dmae->src_addr_hi = 0;
3191 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3192 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3193 dmae->len = DMAE_LEN32_RD_MAX;
3194 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3195 dmae->comp_addr_hi = 0;
3196 dmae->comp_val = 1;
3197
3198 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3199 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3200 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3201 dmae->src_addr_hi = 0;
7a9b2557
VZ
3202 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3203 DMAE_LEN32_RD_MAX * 4);
3204 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3205 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3206 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3207 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3208 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3209 dmae->comp_val = DMAE_COMP_VAL;
3210
3211 *stats_comp = 0;
3212 bnx2x_hw_stats_post(bp);
3213 bnx2x_stats_comp(bp);
3214}
3215
3216static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3217{
3218 struct dmae_command *dmae;
34f80b04 3219 int port = BP_PORT(bp);
bb2a0f7a 3220 int vn = BP_E1HVN(bp);
a2fbb9ea 3221 u32 opcode;
bb2a0f7a 3222 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3223 u32 mac_addr;
bb2a0f7a
YG
3224 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3225
3226 /* sanity */
3227 if (!bp->link_vars.link_up || !bp->port.pmf) {
3228 BNX2X_ERR("BUG!\n");
3229 return;
3230 }
a2fbb9ea
ET
3231
3232 bp->executer_idx = 0;
bb2a0f7a
YG
3233
3234 /* MCP */
3235 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3236 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3237 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3238#ifdef __BIG_ENDIAN
bb2a0f7a 3239 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3240#else
bb2a0f7a 3241 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3242#endif
bb2a0f7a
YG
3243 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3244 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3245
bb2a0f7a 3246 if (bp->port.port_stx) {
a2fbb9ea
ET
3247
3248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249 dmae->opcode = opcode;
bb2a0f7a
YG
3250 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3251 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3252 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3253 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3254 dmae->len = sizeof(struct host_port_stats) >> 2;
3255 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3256 dmae->comp_addr_hi = 0;
3257 dmae->comp_val = 1;
a2fbb9ea
ET
3258 }
3259
bb2a0f7a
YG
3260 if (bp->func_stx) {
3261
3262 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3263 dmae->opcode = opcode;
3264 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3265 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3266 dmae->dst_addr_lo = bp->func_stx >> 2;
3267 dmae->dst_addr_hi = 0;
3268 dmae->len = sizeof(struct host_func_stats) >> 2;
3269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3270 dmae->comp_addr_hi = 0;
3271 dmae->comp_val = 1;
a2fbb9ea
ET
3272 }
3273
bb2a0f7a 3274 /* MAC */
a2fbb9ea
ET
3275 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3276 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3278#ifdef __BIG_ENDIAN
3279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3280#else
3281 DMAE_CMD_ENDIANITY_DW_SWAP |
3282#endif
bb2a0f7a
YG
3283 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3284 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3285
c18487ee 3286 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3287
3288 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3289 NIG_REG_INGRESS_BMAC0_MEM);
3290
3291 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3292 BIGMAC_REGISTER_TX_STAT_GTBYT */
3293 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3294 dmae->opcode = opcode;
3295 dmae->src_addr_lo = (mac_addr +
3296 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3297 dmae->src_addr_hi = 0;
3298 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3299 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3300 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3301 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3302 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3303 dmae->comp_addr_hi = 0;
3304 dmae->comp_val = 1;
3305
3306 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3307 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3308 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3309 dmae->opcode = opcode;
3310 dmae->src_addr_lo = (mac_addr +
3311 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3312 dmae->src_addr_hi = 0;
3313 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3314 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3315 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3316 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3317 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3318 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3319 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3320 dmae->comp_addr_hi = 0;
3321 dmae->comp_val = 1;
3322
c18487ee 3323 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3324
3325 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3326
3327 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329 dmae->opcode = opcode;
3330 dmae->src_addr_lo = (mac_addr +
3331 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3332 dmae->src_addr_hi = 0;
3333 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3334 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3335 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3338 dmae->comp_val = 1;
3339
3340 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3341 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3342 dmae->opcode = opcode;
3343 dmae->src_addr_lo = (mac_addr +
3344 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3345 dmae->src_addr_hi = 0;
3346 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3347 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3348 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3349 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3350 dmae->len = 1;
3351 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3352 dmae->comp_addr_hi = 0;
3353 dmae->comp_val = 1;
3354
3355 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3356 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3357 dmae->opcode = opcode;
3358 dmae->src_addr_lo = (mac_addr +
3359 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3360 dmae->src_addr_hi = 0;
3361 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3362 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3363 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3364 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3365 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3366 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3367 dmae->comp_addr_hi = 0;
3368 dmae->comp_val = 1;
3369 }
3370
3371 /* NIG */
bb2a0f7a
YG
3372 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3373 dmae->opcode = opcode;
3374 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3375 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3376 dmae->src_addr_hi = 0;
3377 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3378 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3379 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3380 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3381 dmae->comp_addr_hi = 0;
3382 dmae->comp_val = 1;
3383
3384 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3385 dmae->opcode = opcode;
3386 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3387 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3388 dmae->src_addr_hi = 0;
3389 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3390 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3391 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3392 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3393 dmae->len = (2*sizeof(u32)) >> 2;
3394 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395 dmae->comp_addr_hi = 0;
3396 dmae->comp_val = 1;
3397
a2fbb9ea
ET
3398 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3399 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3400 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3401 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3402#ifdef __BIG_ENDIAN
3403 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3404#else
3405 DMAE_CMD_ENDIANITY_DW_SWAP |
3406#endif
bb2a0f7a
YG
3407 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3408 (vn << DMAE_CMD_E1HVN_SHIFT));
3409 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3410 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3411 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3412 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3413 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3414 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3415 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3416 dmae->len = (2*sizeof(u32)) >> 2;
3417 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3418 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3419 dmae->comp_val = DMAE_COMP_VAL;
3420
3421 *stats_comp = 0;
a2fbb9ea
ET
3422}
3423
bb2a0f7a 3424static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3425{
bb2a0f7a
YG
3426 struct dmae_command *dmae = &bp->stats_dmae;
3427 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3428
bb2a0f7a
YG
3429 /* sanity */
3430 if (!bp->func_stx) {
3431 BNX2X_ERR("BUG!\n");
3432 return;
3433 }
a2fbb9ea 3434
bb2a0f7a
YG
3435 bp->executer_idx = 0;
3436 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3437
bb2a0f7a
YG
3438 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3439 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3440 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3441#ifdef __BIG_ENDIAN
3442 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3443#else
3444 DMAE_CMD_ENDIANITY_DW_SWAP |
3445#endif
3446 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3447 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3448 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3449 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3450 dmae->dst_addr_lo = bp->func_stx >> 2;
3451 dmae->dst_addr_hi = 0;
3452 dmae->len = sizeof(struct host_func_stats) >> 2;
3453 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3454 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3455 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3456
bb2a0f7a
YG
3457 *stats_comp = 0;
3458}
a2fbb9ea 3459
bb2a0f7a
YG
3460static void bnx2x_stats_start(struct bnx2x *bp)
3461{
3462 if (bp->port.pmf)
3463 bnx2x_port_stats_init(bp);
3464
3465 else if (bp->func_stx)
3466 bnx2x_func_stats_init(bp);
3467
3468 bnx2x_hw_stats_post(bp);
3469 bnx2x_storm_stats_post(bp);
3470}
3471
3472static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3473{
3474 bnx2x_stats_comp(bp);
3475 bnx2x_stats_pmf_update(bp);
3476 bnx2x_stats_start(bp);
3477}
3478
3479static void bnx2x_stats_restart(struct bnx2x *bp)
3480{
3481 bnx2x_stats_comp(bp);
3482 bnx2x_stats_start(bp);
3483}
3484
3485static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3486{
3487 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3488 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3489 struct regpair diff;
3490
3491 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3492 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3493 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3494 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3495 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3496 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3497 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3498 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3499 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3500 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3501 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3502 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3503 UPDATE_STAT64(tx_stat_gt127,
3504 tx_stat_etherstatspkts65octetsto127octets);
3505 UPDATE_STAT64(tx_stat_gt255,
3506 tx_stat_etherstatspkts128octetsto255octets);
3507 UPDATE_STAT64(tx_stat_gt511,
3508 tx_stat_etherstatspkts256octetsto511octets);
3509 UPDATE_STAT64(tx_stat_gt1023,
3510 tx_stat_etherstatspkts512octetsto1023octets);
3511 UPDATE_STAT64(tx_stat_gt1518,
3512 tx_stat_etherstatspkts1024octetsto1522octets);
3513 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3514 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3515 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3516 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3517 UPDATE_STAT64(tx_stat_gterr,
3518 tx_stat_dot3statsinternalmactransmiterrors);
3519 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3520}
3521
3522static void bnx2x_emac_stats_update(struct bnx2x *bp)
3523{
3524 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3525 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3526
3527 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3528 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3529 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3530 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3531 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3532 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3533 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3534 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3535 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3536 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3537 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3538 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3539 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3540 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3541 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3542 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3543 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3544 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3545 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3546 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3547 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3548 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3549 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3550 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3551 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3552 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3553 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3554 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3555 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3556 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3557 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3558}
3559
3560static int bnx2x_hw_stats_update(struct bnx2x *bp)
3561{
3562 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3563 struct nig_stats *old = &(bp->port.old_nig_stats);
3564 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3565 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3566 struct regpair diff;
3567
3568 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3569 bnx2x_bmac_stats_update(bp);
3570
3571 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3572 bnx2x_emac_stats_update(bp);
3573
3574 else { /* unreached */
3575 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3576 return -1;
3577 }
a2fbb9ea 3578
bb2a0f7a
YG
3579 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3580 new->brb_discard - old->brb_discard);
66e855f3
YG
3581 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3582 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3583
bb2a0f7a
YG
3584 UPDATE_STAT64_NIG(egress_mac_pkt0,
3585 etherstatspkts1024octetsto1522octets);
3586 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3587
bb2a0f7a 3588 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3589
bb2a0f7a
YG
3590 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3591 sizeof(struct mac_stx));
3592 estats->brb_drop_hi = pstats->brb_drop_hi;
3593 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3594
bb2a0f7a 3595 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3596
bb2a0f7a 3597 return 0;
a2fbb9ea
ET
3598}
3599
bb2a0f7a 3600static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3601{
3602 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3603 int cl_id = BP_CL_ID(bp);
3604 struct tstorm_per_port_stats *tport =
3605 &stats->tstorm_common.port_statistics;
a2fbb9ea 3606 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3607 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3608 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3609 struct xstorm_per_client_stats *xclient =
3610 &stats->xstorm_common.client_statistics[cl_id];
3611 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3612 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3613 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3614 u32 diff;
3615
bb2a0f7a
YG
3616 /* are storm stats valid? */
3617 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3618 bp->stats_counter) {
3619 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3620 " tstorm counter (%d) != stats_counter (%d)\n",
3621 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3622 return -1;
3623 }
bb2a0f7a
YG
3624 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3625 bp->stats_counter) {
3626 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3627 " xstorm counter (%d) != stats_counter (%d)\n",
3628 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3629 return -2;
3630 }
a2fbb9ea 3631
bb2a0f7a
YG
3632 fstats->total_bytes_received_hi =
3633 fstats->valid_bytes_received_hi =
a2fbb9ea 3634 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3635 fstats->total_bytes_received_lo =
3636 fstats->valid_bytes_received_lo =
a2fbb9ea 3637 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3638
3639 estats->error_bytes_received_hi =
3640 le32_to_cpu(tclient->rcv_error_bytes.hi);
3641 estats->error_bytes_received_lo =
3642 le32_to_cpu(tclient->rcv_error_bytes.lo);
3643 ADD_64(estats->error_bytes_received_hi,
3644 estats->rx_stat_ifhcinbadoctets_hi,
3645 estats->error_bytes_received_lo,
3646 estats->rx_stat_ifhcinbadoctets_lo);
3647
3648 ADD_64(fstats->total_bytes_received_hi,
3649 estats->error_bytes_received_hi,
3650 fstats->total_bytes_received_lo,
3651 estats->error_bytes_received_lo);
3652
3653 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3654 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3655 total_multicast_packets_received);
a2fbb9ea 3656 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3657 total_broadcast_packets_received);
3658
3659 fstats->total_bytes_transmitted_hi =
3660 le32_to_cpu(xclient->total_sent_bytes.hi);
3661 fstats->total_bytes_transmitted_lo =
3662 le32_to_cpu(xclient->total_sent_bytes.lo);
3663
3664 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3665 total_unicast_packets_transmitted);
3666 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3667 total_multicast_packets_transmitted);
3668 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3669 total_broadcast_packets_transmitted);
3670
3671 memcpy(estats, &(fstats->total_bytes_received_hi),
3672 sizeof(struct host_func_stats) - 2*sizeof(u32));
3673
3674 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3675 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3676 estats->brb_truncate_discard =
3677 le32_to_cpu(tport->brb_truncate_discard);
3678 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3679
3680 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3681 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3682 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3683 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3684 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3685 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3686 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3687 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3688 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3689 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3690 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3691 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3692 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3693
bb2a0f7a
YG
3694 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3695 old_tclient->packets_too_big_discard =
a2fbb9ea 3696 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3697 estats->no_buff_discard =
3698 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3699 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3700
3701 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3702 old_xclient->unicast_bytes_sent.hi =
3703 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3704 old_xclient->unicast_bytes_sent.lo =
3705 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3706 old_xclient->multicast_bytes_sent.hi =
3707 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3708 old_xclient->multicast_bytes_sent.lo =
3709 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3710 old_xclient->broadcast_bytes_sent.hi =
3711 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3712 old_xclient->broadcast_bytes_sent.lo =
3713 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3714
3715 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3716
3717 return 0;
3718}
3719
bb2a0f7a 3720static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3721{
bb2a0f7a
YG
3722 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3723 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3724 struct net_device_stats *nstats = &bp->dev->stats;
3725
3726 nstats->rx_packets =
3727 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3728 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3729 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3730
3731 nstats->tx_packets =
3732 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3733 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3734 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3735
bb2a0f7a 3736 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3737
0e39e645 3738 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3739
bb2a0f7a
YG
3740 nstats->rx_dropped = old_tclient->checksum_discard +
3741 estats->mac_discard;
a2fbb9ea
ET
3742 nstats->tx_dropped = 0;
3743
3744 nstats->multicast =
3745 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3746
bb2a0f7a
YG
3747 nstats->collisions =
3748 estats->tx_stat_dot3statssinglecollisionframes_lo +
3749 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3750 estats->tx_stat_dot3statslatecollisions_lo +
3751 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3752
bb2a0f7a
YG
3753 estats->jabber_packets_received =
3754 old_tclient->packets_too_big_discard +
3755 estats->rx_stat_dot3statsframestoolong_lo;
3756
3757 nstats->rx_length_errors =
3758 estats->rx_stat_etherstatsundersizepkts_lo +
3759 estats->jabber_packets_received;
66e855f3 3760 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3761 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3762 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3763 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3764 nstats->rx_missed_errors = estats->xxoverflow_discard;
3765
3766 nstats->rx_errors = nstats->rx_length_errors +
3767 nstats->rx_over_errors +
3768 nstats->rx_crc_errors +
3769 nstats->rx_frame_errors +
0e39e645
ET
3770 nstats->rx_fifo_errors +
3771 nstats->rx_missed_errors;
a2fbb9ea 3772
bb2a0f7a
YG
3773 nstats->tx_aborted_errors =
3774 estats->tx_stat_dot3statslatecollisions_lo +
3775 estats->tx_stat_dot3statsexcessivecollisions_lo;
3776 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3777 nstats->tx_fifo_errors = 0;
3778 nstats->tx_heartbeat_errors = 0;
3779 nstats->tx_window_errors = 0;
3780
3781 nstats->tx_errors = nstats->tx_aborted_errors +
3782 nstats->tx_carrier_errors;
a2fbb9ea
ET
3783}
3784
bb2a0f7a 3785static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3786{
bb2a0f7a
YG
3787 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3788 int update = 0;
a2fbb9ea 3789
bb2a0f7a
YG
3790 if (*stats_comp != DMAE_COMP_VAL)
3791 return;
3792
3793 if (bp->port.pmf)
3794 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3795
bb2a0f7a 3796 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3797
bb2a0f7a
YG
3798 if (update)
3799 bnx2x_net_stats_update(bp);
a2fbb9ea 3800
bb2a0f7a
YG
3801 else {
3802 if (bp->stats_pending) {
3803 bp->stats_pending++;
3804 if (bp->stats_pending == 3) {
3805 BNX2X_ERR("stats not updated for 3 times\n");
3806 bnx2x_panic();
3807 return;
3808 }
3809 }
a2fbb9ea
ET
3810 }
3811
3812 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3813 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3814 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3815 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3816 int i;
a2fbb9ea
ET
3817
3818 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3819 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3820 " tx pkt (%lx)\n",
3821 bnx2x_tx_avail(bp->fp),
7a9b2557 3822 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3823 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3824 " rx pkt (%lx)\n",
7a9b2557
VZ
3825 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3826 bp->fp->rx_comp_cons),
3827 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3828 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3829 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3830 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3831 printk(KERN_DEBUG "tstats: checksum_discard %u "
3832 "packets_too_big_discard %u no_buff_discard %u "
3833 "mac_discard %u mac_filter_discard %u "
3834 "xxovrflow_discard %u brb_truncate_discard %u "
3835 "ttl0_discard %u\n",
bb2a0f7a
YG
3836 old_tclient->checksum_discard,
3837 old_tclient->packets_too_big_discard,
3838 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3839 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3840 estats->brb_truncate_discard,
3841 old_tclient->ttl0_discard);
a2fbb9ea
ET
3842
3843 for_each_queue(bp, i) {
3844 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3845 bnx2x_fp(bp, i, tx_pkt),
3846 bnx2x_fp(bp, i, rx_pkt),
3847 bnx2x_fp(bp, i, rx_calls));
3848 }
3849 }
3850
bb2a0f7a
YG
3851 bnx2x_hw_stats_post(bp);
3852 bnx2x_storm_stats_post(bp);
3853}
a2fbb9ea 3854
bb2a0f7a
YG
3855static void bnx2x_port_stats_stop(struct bnx2x *bp)
3856{
3857 struct dmae_command *dmae;
3858 u32 opcode;
3859 int loader_idx = PMF_DMAE_C(bp);
3860 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3861
bb2a0f7a 3862 bp->executer_idx = 0;
a2fbb9ea 3863
bb2a0f7a
YG
3864 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3865 DMAE_CMD_C_ENABLE |
3866 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3867#ifdef __BIG_ENDIAN
bb2a0f7a 3868 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3869#else
bb2a0f7a 3870 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3871#endif
bb2a0f7a
YG
3872 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3873 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3874
3875 if (bp->port.port_stx) {
3876
3877 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3878 if (bp->func_stx)
3879 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3880 else
3881 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3882 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3883 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3884 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3885 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3886 dmae->len = sizeof(struct host_port_stats) >> 2;
3887 if (bp->func_stx) {
3888 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3889 dmae->comp_addr_hi = 0;
3890 dmae->comp_val = 1;
3891 } else {
3892 dmae->comp_addr_lo =
3893 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894 dmae->comp_addr_hi =
3895 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3896 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3897
bb2a0f7a
YG
3898 *stats_comp = 0;
3899 }
a2fbb9ea
ET
3900 }
3901
bb2a0f7a
YG
3902 if (bp->func_stx) {
3903
3904 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3905 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3906 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3907 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3908 dmae->dst_addr_lo = bp->func_stx >> 2;
3909 dmae->dst_addr_hi = 0;
3910 dmae->len = sizeof(struct host_func_stats) >> 2;
3911 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3912 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3913 dmae->comp_val = DMAE_COMP_VAL;
3914
3915 *stats_comp = 0;
a2fbb9ea 3916 }
bb2a0f7a
YG
3917}
3918
3919static void bnx2x_stats_stop(struct bnx2x *bp)
3920{
3921 int update = 0;
3922
3923 bnx2x_stats_comp(bp);
3924
3925 if (bp->port.pmf)
3926 update = (bnx2x_hw_stats_update(bp) == 0);
3927
3928 update |= (bnx2x_storm_stats_update(bp) == 0);
3929
3930 if (update) {
3931 bnx2x_net_stats_update(bp);
a2fbb9ea 3932
bb2a0f7a
YG
3933 if (bp->port.pmf)
3934 bnx2x_port_stats_stop(bp);
3935
3936 bnx2x_hw_stats_post(bp);
3937 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3938 }
3939}
3940
bb2a0f7a
YG
3941static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3942{
3943}
3944
3945static const struct {
3946 void (*action)(struct bnx2x *bp);
3947 enum bnx2x_stats_state next_state;
3948} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3949/* state event */
3950{
3951/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3952/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3953/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3954/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3955},
3956{
3957/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3958/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3959/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3960/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3961}
3962};
3963
3964static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3965{
3966 enum bnx2x_stats_state state = bp->stats_state;
3967
3968 bnx2x_stats_stm[state][event].action(bp);
3969 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3970
3971 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3972 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3973 state, event, bp->stats_state);
3974}
3975
a2fbb9ea
ET
3976static void bnx2x_timer(unsigned long data)
3977{
3978 struct bnx2x *bp = (struct bnx2x *) data;
3979
3980 if (!netif_running(bp->dev))
3981 return;
3982
3983 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3984 goto timer_restart;
a2fbb9ea
ET
3985
3986 if (poll) {
3987 struct bnx2x_fastpath *fp = &bp->fp[0];
3988 int rc;
3989
3990 bnx2x_tx_int(fp, 1000);
3991 rc = bnx2x_rx_int(fp, 1000);
3992 }
3993
34f80b04
EG
3994 if (!BP_NOMCP(bp)) {
3995 int func = BP_FUNC(bp);
a2fbb9ea
ET
3996 u32 drv_pulse;
3997 u32 mcp_pulse;
3998
3999 ++bp->fw_drv_pulse_wr_seq;
4000 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4001 /* TBD - add SYSTEM_TIME */
4002 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4003 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4004
34f80b04 4005 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4006 MCP_PULSE_SEQ_MASK);
4007 /* The delta between driver pulse and mcp response
4008 * should be 1 (before mcp response) or 0 (after mcp response)
4009 */
4010 if ((drv_pulse != mcp_pulse) &&
4011 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4012 /* someone lost a heartbeat... */
4013 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4014 drv_pulse, mcp_pulse);
4015 }
4016 }
4017
bb2a0f7a
YG
4018 if ((bp->state == BNX2X_STATE_OPEN) ||
4019 (bp->state == BNX2X_STATE_DISABLED))
4020 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4021
f1410647 4022timer_restart:
a2fbb9ea
ET
4023 mod_timer(&bp->timer, jiffies + bp->current_interval);
4024}
4025
4026/* end of Statistics */
4027
4028/* nic init */
4029
4030/*
4031 * nic init service functions
4032 */
4033
34f80b04 4034static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4035{
34f80b04
EG
4036 int port = BP_PORT(bp);
4037
4038 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4039 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4040 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4041 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4042 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4043 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4044}
4045
5c862848
EG
4046static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4047 dma_addr_t mapping, int sb_id)
34f80b04
EG
4048{
4049 int port = BP_PORT(bp);
bb2a0f7a 4050 int func = BP_FUNC(bp);
a2fbb9ea 4051 int index;
34f80b04 4052 u64 section;
a2fbb9ea
ET
4053
4054 /* USTORM */
4055 section = ((u64)mapping) + offsetof(struct host_status_block,
4056 u_status_block);
34f80b04 4057 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4058
4059 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4060 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4061 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4062 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4063 U64_HI(section));
bb2a0f7a
YG
4064 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4065 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4066
4067 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4068 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4069 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4070
4071 /* CSTORM */
4072 section = ((u64)mapping) + offsetof(struct host_status_block,
4073 c_status_block);
34f80b04 4074 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4075
4076 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4077 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4078 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4079 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4080 U64_HI(section));
7a9b2557
VZ
4081 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4082 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4083
4084 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4085 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4086 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4087
4088 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4089}
4090
4091static void bnx2x_zero_def_sb(struct bnx2x *bp)
4092{
4093 int func = BP_FUNC(bp);
a2fbb9ea 4094
34f80b04
EG
4095 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4096 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4097 sizeof(struct ustorm_def_status_block)/4);
4098 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4099 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4100 sizeof(struct cstorm_def_status_block)/4);
4101 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4102 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4103 sizeof(struct xstorm_def_status_block)/4);
4104 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4105 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4106 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4107}
4108
4109static void bnx2x_init_def_sb(struct bnx2x *bp,
4110 struct host_def_status_block *def_sb,
34f80b04 4111 dma_addr_t mapping, int sb_id)
a2fbb9ea 4112{
34f80b04
EG
4113 int port = BP_PORT(bp);
4114 int func = BP_FUNC(bp);
a2fbb9ea
ET
4115 int index, val, reg_offset;
4116 u64 section;
4117
4118 /* ATTN */
4119 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4120 atten_status_block);
34f80b04 4121 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4122
49d66772
ET
4123 bp->attn_state = 0;
4124
a2fbb9ea
ET
4125 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4126 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4127
34f80b04 4128 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4129 bp->attn_group[index].sig[0] = REG_RD(bp,
4130 reg_offset + 0x10*index);
4131 bp->attn_group[index].sig[1] = REG_RD(bp,
4132 reg_offset + 0x4 + 0x10*index);
4133 bp->attn_group[index].sig[2] = REG_RD(bp,
4134 reg_offset + 0x8 + 0x10*index);
4135 bp->attn_group[index].sig[3] = REG_RD(bp,
4136 reg_offset + 0xc + 0x10*index);
4137 }
4138
a2fbb9ea
ET
4139 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4140 HC_REG_ATTN_MSG0_ADDR_L);
4141
4142 REG_WR(bp, reg_offset, U64_LO(section));
4143 REG_WR(bp, reg_offset + 4, U64_HI(section));
4144
4145 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4146
4147 val = REG_RD(bp, reg_offset);
34f80b04 4148 val |= sb_id;
a2fbb9ea
ET
4149 REG_WR(bp, reg_offset, val);
4150
4151 /* USTORM */
4152 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4153 u_def_status_block);
34f80b04 4154 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4155
4156 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4157 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4158 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4159 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4160 U64_HI(section));
5c862848 4161 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4162 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4163
4164 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4165 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4166 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4167
4168 /* CSTORM */
4169 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4170 c_def_status_block);
34f80b04 4171 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4172
4173 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4174 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4175 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4176 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4177 U64_HI(section));
5c862848 4178 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4179 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4180
4181 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4182 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4183 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4184
4185 /* TSTORM */
4186 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4187 t_def_status_block);
34f80b04 4188 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4189
4190 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4191 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4192 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4193 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4194 U64_HI(section));
5c862848 4195 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4196 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4197
4198 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4199 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4200 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4201
4202 /* XSTORM */
4203 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4204 x_def_status_block);
34f80b04 4205 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4206
4207 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4208 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4209 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4210 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4211 U64_HI(section));
5c862848 4212 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4213 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4214
4215 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4216 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4217 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4218
bb2a0f7a 4219 bp->stats_pending = 0;
66e855f3 4220 bp->set_mac_pending = 0;
bb2a0f7a 4221
34f80b04 4222 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4223}
4224
4225static void bnx2x_update_coalesce(struct bnx2x *bp)
4226{
34f80b04 4227 int port = BP_PORT(bp);
a2fbb9ea
ET
4228 int i;
4229
4230 for_each_queue(bp, i) {
34f80b04 4231 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4232
4233 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4234 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4235 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4236 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4237 bp->rx_ticks/12);
a2fbb9ea 4238 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4239 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4240 U_SB_ETH_RX_CQ_INDEX),
4241 bp->rx_ticks ? 0 : 1);
4242 REG_WR16(bp, BAR_USTRORM_INTMEM +
4243 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4244 U_SB_ETH_RX_BD_INDEX),
34f80b04 4245 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4246
4247 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4248 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4249 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4250 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4251 bp->tx_ticks/12);
a2fbb9ea 4252 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4253 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4254 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4255 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4256 }
4257}
4258
7a9b2557
VZ
4259static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4260 struct bnx2x_fastpath *fp, int last)
4261{
4262 int i;
4263
4264 for (i = 0; i < last; i++) {
4265 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4266 struct sk_buff *skb = rx_buf->skb;
4267
4268 if (skb == NULL) {
4269 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4270 continue;
4271 }
4272
4273 if (fp->tpa_state[i] == BNX2X_TPA_START)
4274 pci_unmap_single(bp->pdev,
4275 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4276 bp->rx_buf_size,
7a9b2557
VZ
4277 PCI_DMA_FROMDEVICE);
4278
4279 dev_kfree_skb(skb);
4280 rx_buf->skb = NULL;
4281 }
4282}
4283
a2fbb9ea
ET
4284static void bnx2x_init_rx_rings(struct bnx2x *bp)
4285{
7a9b2557 4286 int func = BP_FUNC(bp);
32626230
EG
4287 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4288 ETH_MAX_AGGREGATION_QUEUES_E1H;
4289 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4290 int i, j;
a2fbb9ea 4291
437cf2f1
EG
4292 bp->rx_buf_size = bp->dev->mtu;
4293 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4294 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4295
7a9b2557
VZ
4296 if (bp->flags & TPA_ENABLE_FLAG) {
4297 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4298 "rx_buf_size %d effective_mtu %d\n",
4299 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4300
4301 for_each_queue(bp, j) {
32626230 4302 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4303
32626230 4304 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4305 fp->tpa_pool[i].skb =
4306 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4307 if (!fp->tpa_pool[i].skb) {
4308 BNX2X_ERR("Failed to allocate TPA "
4309 "skb pool for queue[%d] - "
4310 "disabling TPA on this "
4311 "queue!\n", j);
4312 bnx2x_free_tpa_pool(bp, fp, i);
4313 fp->disable_tpa = 1;
4314 break;
4315 }
4316 pci_unmap_addr_set((struct sw_rx_bd *)
4317 &bp->fp->tpa_pool[i],
4318 mapping, 0);
4319 fp->tpa_state[i] = BNX2X_TPA_STOP;
4320 }
4321 }
4322 }
4323
a2fbb9ea
ET
4324 for_each_queue(bp, j) {
4325 struct bnx2x_fastpath *fp = &bp->fp[j];
4326
4327 fp->rx_bd_cons = 0;
4328 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4329 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4330
4331 /* "next page" elements initialization */
4332 /* SGE ring */
4333 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4334 struct eth_rx_sge *sge;
4335
4336 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4337 sge->addr_hi =
4338 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4339 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4340 sge->addr_lo =
4341 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4342 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4343 }
4344
4345 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4346
7a9b2557 4347 /* RX BD ring */
a2fbb9ea
ET
4348 for (i = 1; i <= NUM_RX_RINGS; i++) {
4349 struct eth_rx_bd *rx_bd;
4350
4351 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4352 rx_bd->addr_hi =
4353 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4354 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4355 rx_bd->addr_lo =
4356 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4357 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4358 }
4359
34f80b04 4360 /* CQ ring */
a2fbb9ea
ET
4361 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4362 struct eth_rx_cqe_next_page *nextpg;
4363
4364 nextpg = (struct eth_rx_cqe_next_page *)
4365 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4366 nextpg->addr_hi =
4367 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4368 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4369 nextpg->addr_lo =
4370 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4371 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4372 }
4373
7a9b2557
VZ
4374 /* Allocate SGEs and initialize the ring elements */
4375 for (i = 0, ring_prod = 0;
4376 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4377
7a9b2557
VZ
4378 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4379 BNX2X_ERR("was only able to allocate "
4380 "%d rx sges\n", i);
4381 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4382 /* Cleanup already allocated elements */
4383 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4384 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4385 fp->disable_tpa = 1;
4386 ring_prod = 0;
4387 break;
4388 }
4389 ring_prod = NEXT_SGE_IDX(ring_prod);
4390 }
4391 fp->rx_sge_prod = ring_prod;
4392
4393 /* Allocate BDs and initialize BD ring */
66e855f3 4394 fp->rx_comp_cons = 0;
7a9b2557 4395 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4396 for (i = 0; i < bp->rx_ring_size; i++) {
4397 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4398 BNX2X_ERR("was only able to allocate "
4399 "%d rx skbs\n", i);
66e855f3 4400 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4401 break;
4402 }
4403 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4404 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4405 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4406 }
4407
7a9b2557
VZ
4408 fp->rx_bd_prod = ring_prod;
4409 /* must not have more available CQEs than BDs */
4410 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4411 cqe_ring_prod);
a2fbb9ea
ET
4412 fp->rx_pkt = fp->rx_calls = 0;
4413
7a9b2557
VZ
4414 /* Warning!
4415 * this will generate an interrupt (to the TSTORM)
4416 * must only be done after chip is initialized
4417 */
4418 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4419 fp->rx_sge_prod);
a2fbb9ea
ET
4420 if (j != 0)
4421 continue;
4422
4423 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4424 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4425 U64_LO(fp->rx_comp_mapping));
4426 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4427 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4428 U64_HI(fp->rx_comp_mapping));
4429 }
4430}
4431
4432static void bnx2x_init_tx_ring(struct bnx2x *bp)
4433{
4434 int i, j;
4435
4436 for_each_queue(bp, j) {
4437 struct bnx2x_fastpath *fp = &bp->fp[j];
4438
4439 for (i = 1; i <= NUM_TX_RINGS; i++) {
4440 struct eth_tx_bd *tx_bd =
4441 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4442
4443 tx_bd->addr_hi =
4444 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4445 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4446 tx_bd->addr_lo =
4447 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4448 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4449 }
4450
4451 fp->tx_pkt_prod = 0;
4452 fp->tx_pkt_cons = 0;
4453 fp->tx_bd_prod = 0;
4454 fp->tx_bd_cons = 0;
4455 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4456 fp->tx_pkt = 0;
4457 }
4458}
4459
4460static void bnx2x_init_sp_ring(struct bnx2x *bp)
4461{
34f80b04 4462 int func = BP_FUNC(bp);
a2fbb9ea
ET
4463
4464 spin_lock_init(&bp->spq_lock);
4465
4466 bp->spq_left = MAX_SPQ_PENDING;
4467 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4468 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4469 bp->spq_prod_bd = bp->spq;
4470 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4471
34f80b04 4472 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4473 U64_LO(bp->spq_mapping));
34f80b04
EG
4474 REG_WR(bp,
4475 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4476 U64_HI(bp->spq_mapping));
4477
34f80b04 4478 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4479 bp->spq_prod_idx);
4480}
4481
4482static void bnx2x_init_context(struct bnx2x *bp)
4483{
4484 int i;
4485
4486 for_each_queue(bp, i) {
4487 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4488 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4489 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4490
4491 context->xstorm_st_context.tx_bd_page_base_hi =
4492 U64_HI(fp->tx_desc_mapping);
4493 context->xstorm_st_context.tx_bd_page_base_lo =
4494 U64_LO(fp->tx_desc_mapping);
4495 context->xstorm_st_context.db_data_addr_hi =
4496 U64_HI(fp->tx_prods_mapping);
4497 context->xstorm_st_context.db_data_addr_lo =
4498 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4499 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4500 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4501
4502 context->ustorm_st_context.common.sb_index_numbers =
4503 BNX2X_RX_SB_INDEX_NUM;
4504 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4505 context->ustorm_st_context.common.status_block_id = sb_id;
4506 context->ustorm_st_context.common.flags =
4507 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
437cf2f1
EG
4508 context->ustorm_st_context.common.mc_alignment_size =
4509 BCM_RX_ETH_PAYLOAD_ALIGN;
34f80b04 4510 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4511 bp->rx_buf_size;
34f80b04 4512 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4513 U64_HI(fp->rx_desc_mapping);
34f80b04 4514 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4515 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4516 if (!fp->disable_tpa) {
4517 context->ustorm_st_context.common.flags |=
4518 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4519 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4520 context->ustorm_st_context.common.sge_buff_size =
4521 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4522 context->ustorm_st_context.common.sge_page_base_hi =
4523 U64_HI(fp->rx_sge_mapping);
4524 context->ustorm_st_context.common.sge_page_base_lo =
4525 U64_LO(fp->rx_sge_mapping);
4526 }
4527
a2fbb9ea 4528 context->cstorm_st_context.sb_index_number =
5c862848 4529 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4530 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4531
4532 context->xstorm_ag_context.cdu_reserved =
4533 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4534 CDU_REGION_NUMBER_XCM_AG,
4535 ETH_CONNECTION_TYPE);
4536 context->ustorm_ag_context.cdu_usage =
4537 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4538 CDU_REGION_NUMBER_UCM_AG,
4539 ETH_CONNECTION_TYPE);
4540 }
4541}
4542
4543static void bnx2x_init_ind_table(struct bnx2x *bp)
4544{
26c8fa4d 4545 int func = BP_FUNC(bp);
a2fbb9ea
ET
4546 int i;
4547
4548 if (!is_multi(bp))
4549 return;
4550
34f80b04 4551 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4552 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4553 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d
EG
4554 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4555 BP_CL_ID(bp) + (i % bp->num_queues));
a2fbb9ea
ET
4556}
4557
49d66772
ET
4558static void bnx2x_set_client_config(struct bnx2x *bp)
4559{
49d66772 4560 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4561 int port = BP_PORT(bp);
4562 int i;
49d66772 4563
e7799c5f 4564 tstorm_client.mtu = bp->dev->mtu;
66e855f3 4565 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4566 tstorm_client.config_flags =
4567 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4568#ifdef BCM_VLAN
0c6671b0 4569 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772
ET
4570 tstorm_client.config_flags |=
4571 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4572 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4573 }
4574#endif
49d66772 4575
7a9b2557
VZ
4576 if (bp->flags & TPA_ENABLE_FLAG) {
4577 tstorm_client.max_sges_for_packet =
4f40f2cb 4578 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4579 tstorm_client.max_sges_for_packet =
4580 ((tstorm_client.max_sges_for_packet +
4581 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4582 PAGES_PER_SGE_SHIFT;
4583
4584 tstorm_client.config_flags |=
4585 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4586 }
4587
49d66772
ET
4588 for_each_queue(bp, i) {
4589 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4590 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4591 ((u32 *)&tstorm_client)[0]);
4592 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4593 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4594 ((u32 *)&tstorm_client)[1]);
4595 }
4596
34f80b04
EG
4597 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4598 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4599}
4600
a2fbb9ea
ET
4601static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4602{
a2fbb9ea 4603 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4604 int mode = bp->rx_mode;
4605 int mask = (1 << BP_L_ID(bp));
4606 int func = BP_FUNC(bp);
a2fbb9ea
ET
4607 int i;
4608
3196a88a 4609 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4610
4611 switch (mode) {
4612 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4613 tstorm_mac_filter.ucast_drop_all = mask;
4614 tstorm_mac_filter.mcast_drop_all = mask;
4615 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4616 break;
4617 case BNX2X_RX_MODE_NORMAL:
34f80b04 4618 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4619 break;
4620 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4621 tstorm_mac_filter.mcast_accept_all = mask;
4622 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4623 break;
4624 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4625 tstorm_mac_filter.ucast_accept_all = mask;
4626 tstorm_mac_filter.mcast_accept_all = mask;
4627 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4628 break;
4629 default:
34f80b04
EG
4630 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4631 break;
a2fbb9ea
ET
4632 }
4633
4634 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4635 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4636 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4637 ((u32 *)&tstorm_mac_filter)[i]);
4638
34f80b04 4639/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4640 ((u32 *)&tstorm_mac_filter)[i]); */
4641 }
a2fbb9ea 4642
49d66772
ET
4643 if (mode != BNX2X_RX_MODE_NONE)
4644 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4645}
4646
471de716
EG
4647static void bnx2x_init_internal_common(struct bnx2x *bp)
4648{
4649 int i;
4650
3cdf1db7
YG
4651 if (bp->flags & TPA_ENABLE_FLAG) {
4652 struct tstorm_eth_tpa_exist tpa = {0};
4653
4654 tpa.tpa_exist = 1;
4655
4656 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4657 ((u32 *)&tpa)[0]);
4658 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4659 ((u32 *)&tpa)[1]);
4660 }
4661
471de716
EG
4662 /* Zero this manually as its initialization is
4663 currently missing in the initTool */
4664 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4665 REG_WR(bp, BAR_USTRORM_INTMEM +
4666 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4667}
4668
4669static void bnx2x_init_internal_port(struct bnx2x *bp)
4670{
4671 int port = BP_PORT(bp);
4672
4673 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4674 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4675 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4677}
4678
4679static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4680{
a2fbb9ea
ET
4681 struct tstorm_eth_function_common_config tstorm_config = {0};
4682 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4683 int port = BP_PORT(bp);
4684 int func = BP_FUNC(bp);
4685 int i;
471de716 4686 u16 max_agg_size;
a2fbb9ea
ET
4687
4688 if (is_multi(bp)) {
4689 tstorm_config.config_flags = MULTI_FLAGS;
4690 tstorm_config.rss_result_mask = MULTI_MASK;
4691 }
4692
34f80b04
EG
4693 tstorm_config.leading_client_id = BP_L_ID(bp);
4694
a2fbb9ea 4695 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4696 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4697 (*(u32 *)&tstorm_config));
4698
c14423fe 4699 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4700 bnx2x_set_storm_rx_mode(bp);
4701
66e855f3
YG
4702 /* reset xstorm per client statistics */
4703 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4704 REG_WR(bp, BAR_XSTRORM_INTMEM +
4705 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4706 i*4, 0);
4707 }
4708 /* reset tstorm per client statistics */
4709 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4710 REG_WR(bp, BAR_TSTRORM_INTMEM +
4711 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4712 i*4, 0);
4713 }
4714
4715 /* Init statistics related context */
34f80b04 4716 stats_flags.collect_eth = 1;
a2fbb9ea 4717
66e855f3 4718 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4719 ((u32 *)&stats_flags)[0]);
66e855f3 4720 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4721 ((u32 *)&stats_flags)[1]);
4722
66e855f3 4723 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4724 ((u32 *)&stats_flags)[0]);
66e855f3 4725 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4726 ((u32 *)&stats_flags)[1]);
4727
66e855f3 4728 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4729 ((u32 *)&stats_flags)[0]);
66e855f3 4730 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4731 ((u32 *)&stats_flags)[1]);
4732
66e855f3
YG
4733 REG_WR(bp, BAR_XSTRORM_INTMEM +
4734 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4735 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4736 REG_WR(bp, BAR_XSTRORM_INTMEM +
4737 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4738 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4739
4740 REG_WR(bp, BAR_TSTRORM_INTMEM +
4741 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4742 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4743 REG_WR(bp, BAR_TSTRORM_INTMEM +
4744 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4745 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4746
4747 if (CHIP_IS_E1H(bp)) {
4748 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4749 IS_E1HMF(bp));
4750 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4751 IS_E1HMF(bp));
4752 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4753 IS_E1HMF(bp));
4754 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4755 IS_E1HMF(bp));
4756
7a9b2557
VZ
4757 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4758 bp->e1hov);
34f80b04
EG
4759 }
4760
4f40f2cb
EG
4761 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4762 max_agg_size =
4763 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4764 SGE_PAGE_SIZE * PAGES_PER_SGE),
4765 (u32)0xffff);
7a9b2557
VZ
4766 for_each_queue(bp, i) {
4767 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4768
4769 REG_WR(bp, BAR_USTRORM_INTMEM +
4770 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4771 U64_LO(fp->rx_comp_mapping));
4772 REG_WR(bp, BAR_USTRORM_INTMEM +
4773 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4774 U64_HI(fp->rx_comp_mapping));
4775
7a9b2557
VZ
4776 REG_WR16(bp, BAR_USTRORM_INTMEM +
4777 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4778 max_agg_size);
4779 }
a2fbb9ea
ET
4780}
4781
471de716
EG
4782static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4783{
4784 switch (load_code) {
4785 case FW_MSG_CODE_DRV_LOAD_COMMON:
4786 bnx2x_init_internal_common(bp);
4787 /* no break */
4788
4789 case FW_MSG_CODE_DRV_LOAD_PORT:
4790 bnx2x_init_internal_port(bp);
4791 /* no break */
4792
4793 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4794 bnx2x_init_internal_func(bp);
4795 break;
4796
4797 default:
4798 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4799 break;
4800 }
4801}
4802
4803static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4804{
4805 int i;
4806
4807 for_each_queue(bp, i) {
4808 struct bnx2x_fastpath *fp = &bp->fp[i];
4809
34f80b04 4810 fp->bp = bp;
a2fbb9ea 4811 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4812 fp->index = i;
34f80b04
EG
4813 fp->cl_id = BP_L_ID(bp) + i;
4814 fp->sb_id = fp->cl_id;
4815 DP(NETIF_MSG_IFUP,
4816 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4817 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4818 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4819 FP_SB_ID(fp));
4820 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4821 }
4822
5c862848
EG
4823 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4824 DEF_SB_ID);
4825 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4826 bnx2x_update_coalesce(bp);
4827 bnx2x_init_rx_rings(bp);
4828 bnx2x_init_tx_ring(bp);
4829 bnx2x_init_sp_ring(bp);
4830 bnx2x_init_context(bp);
471de716 4831 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4832 bnx2x_init_ind_table(bp);
0ef00459
EG
4833 bnx2x_stats_init(bp);
4834
4835 /* At this point, we are ready for interrupts */
4836 atomic_set(&bp->intr_sem, 0);
4837
4838 /* flush all before enabling interrupts */
4839 mb();
4840 mmiowb();
4841
615f8fd9 4842 bnx2x_int_enable(bp);
a2fbb9ea
ET
4843}
4844
4845/* end of nic init */
4846
4847/*
4848 * gzip service functions
4849 */
4850
4851static int bnx2x_gunzip_init(struct bnx2x *bp)
4852{
4853 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4854 &bp->gunzip_mapping);
4855 if (bp->gunzip_buf == NULL)
4856 goto gunzip_nomem1;
4857
4858 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4859 if (bp->strm == NULL)
4860 goto gunzip_nomem2;
4861
4862 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4863 GFP_KERNEL);
4864 if (bp->strm->workspace == NULL)
4865 goto gunzip_nomem3;
4866
4867 return 0;
4868
4869gunzip_nomem3:
4870 kfree(bp->strm);
4871 bp->strm = NULL;
4872
4873gunzip_nomem2:
4874 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4875 bp->gunzip_mapping);
4876 bp->gunzip_buf = NULL;
4877
4878gunzip_nomem1:
4879 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4880 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4881 return -ENOMEM;
4882}
4883
4884static void bnx2x_gunzip_end(struct bnx2x *bp)
4885{
4886 kfree(bp->strm->workspace);
4887
4888 kfree(bp->strm);
4889 bp->strm = NULL;
4890
4891 if (bp->gunzip_buf) {
4892 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4893 bp->gunzip_mapping);
4894 bp->gunzip_buf = NULL;
4895 }
4896}
4897
4898static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4899{
4900 int n, rc;
4901
4902 /* check gzip header */
4903 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4904 return -EINVAL;
4905
4906 n = 10;
4907
34f80b04 4908#define FNAME 0x8
a2fbb9ea
ET
4909
4910 if (zbuf[3] & FNAME)
4911 while ((zbuf[n++] != 0) && (n < len));
4912
4913 bp->strm->next_in = zbuf + n;
4914 bp->strm->avail_in = len - n;
4915 bp->strm->next_out = bp->gunzip_buf;
4916 bp->strm->avail_out = FW_BUF_SIZE;
4917
4918 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4919 if (rc != Z_OK)
4920 return rc;
4921
4922 rc = zlib_inflate(bp->strm, Z_FINISH);
4923 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4924 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4925 bp->dev->name, bp->strm->msg);
4926
4927 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4928 if (bp->gunzip_outlen & 0x3)
4929 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4930 " gunzip_outlen (%d) not aligned\n",
4931 bp->dev->name, bp->gunzip_outlen);
4932 bp->gunzip_outlen >>= 2;
4933
4934 zlib_inflateEnd(bp->strm);
4935
4936 if (rc == Z_STREAM_END)
4937 return 0;
4938
4939 return rc;
4940}
4941
4942/* nic load/unload */
4943
4944/*
34f80b04 4945 * General service functions
a2fbb9ea
ET
4946 */
4947
4948/* send a NIG loopback debug packet */
4949static void bnx2x_lb_pckt(struct bnx2x *bp)
4950{
a2fbb9ea 4951 u32 wb_write[3];
a2fbb9ea
ET
4952
4953 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4954 wb_write[0] = 0x55555555;
4955 wb_write[1] = 0x55555555;
34f80b04 4956 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4957 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4958
4959 /* NON-IP protocol */
a2fbb9ea
ET
4960 wb_write[0] = 0x09000000;
4961 wb_write[1] = 0x55555555;
34f80b04 4962 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4963 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4964}
4965
4966/* some of the internal memories
4967 * are not directly readable from the driver
4968 * to test them we send debug packets
4969 */
4970static int bnx2x_int_mem_test(struct bnx2x *bp)
4971{
4972 int factor;
4973 int count, i;
4974 u32 val = 0;
4975
ad8d3948 4976 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4977 factor = 120;
ad8d3948
EG
4978 else if (CHIP_REV_IS_EMUL(bp))
4979 factor = 200;
4980 else
a2fbb9ea 4981 factor = 1;
a2fbb9ea
ET
4982
4983 DP(NETIF_MSG_HW, "start part1\n");
4984
4985 /* Disable inputs of parser neighbor blocks */
4986 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4987 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4988 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4989 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4990
4991 /* Write 0 to parser credits for CFC search request */
4992 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4993
4994 /* send Ethernet packet */
4995 bnx2x_lb_pckt(bp);
4996
4997 /* TODO do i reset NIG statistic? */
4998 /* Wait until NIG register shows 1 packet of size 0x10 */
4999 count = 1000 * factor;
5000 while (count) {
34f80b04 5001
a2fbb9ea
ET
5002 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5003 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5004 if (val == 0x10)
5005 break;
5006
5007 msleep(10);
5008 count--;
5009 }
5010 if (val != 0x10) {
5011 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5012 return -1;
5013 }
5014
5015 /* Wait until PRS register shows 1 packet */
5016 count = 1000 * factor;
5017 while (count) {
5018 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5019 if (val == 1)
5020 break;
5021
5022 msleep(10);
5023 count--;
5024 }
5025 if (val != 0x1) {
5026 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5027 return -2;
5028 }
5029
5030 /* Reset and init BRB, PRS */
34f80b04 5031 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5032 msleep(50);
34f80b04 5033 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5034 msleep(50);
5035 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5036 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5037
5038 DP(NETIF_MSG_HW, "part2\n");
5039
5040 /* Disable inputs of parser neighbor blocks */
5041 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5042 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5043 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5044 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5045
5046 /* Write 0 to parser credits for CFC search request */
5047 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5048
5049 /* send 10 Ethernet packets */
5050 for (i = 0; i < 10; i++)
5051 bnx2x_lb_pckt(bp);
5052
5053 /* Wait until NIG register shows 10 + 1
5054 packets of size 11*0x10 = 0xb0 */
5055 count = 1000 * factor;
5056 while (count) {
34f80b04 5057
a2fbb9ea
ET
5058 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5059 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5060 if (val == 0xb0)
5061 break;
5062
5063 msleep(10);
5064 count--;
5065 }
5066 if (val != 0xb0) {
5067 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5068 return -3;
5069 }
5070
5071 /* Wait until PRS register shows 2 packets */
5072 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5073 if (val != 2)
5074 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5075
5076 /* Write 1 to parser credits for CFC search request */
5077 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5078
5079 /* Wait until PRS register shows 3 packets */
5080 msleep(10 * factor);
5081 /* Wait until NIG register shows 1 packet of size 0x10 */
5082 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5083 if (val != 3)
5084 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5085
5086 /* clear NIG EOP FIFO */
5087 for (i = 0; i < 11; i++)
5088 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5089 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5090 if (val != 1) {
5091 BNX2X_ERR("clear of NIG failed\n");
5092 return -4;
5093 }
5094
5095 /* Reset and init BRB, PRS, NIG */
5096 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5097 msleep(50);
5098 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5099 msleep(50);
5100 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5101 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5102#ifndef BCM_ISCSI
5103 /* set NIC mode */
5104 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5105#endif
5106
5107 /* Enable inputs of parser neighbor blocks */
5108 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5109 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5110 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5111 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5112
5113 DP(NETIF_MSG_HW, "done\n");
5114
5115 return 0; /* OK */
5116}
5117
5118static void enable_blocks_attention(struct bnx2x *bp)
5119{
5120 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5121 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5122 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5123 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5124 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5125 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5126 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5127 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5128 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5129/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5130/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5131 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5132 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5133 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5134/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5135/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5136 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5137 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5138 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5139 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5140/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5141/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5142 if (CHIP_REV_IS_FPGA(bp))
5143 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5144 else
5145 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5146 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5147 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5148 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5149/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5150/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5151 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5152 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5153/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5154 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5155}
5156
34f80b04 5157
81f75bbf
EG
5158static void bnx2x_reset_common(struct bnx2x *bp)
5159{
5160 /* reset_common */
5161 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5162 0xd3ffff7f);
5163 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5164}
5165
34f80b04 5166static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5167{
a2fbb9ea 5168 u32 val, i;
a2fbb9ea 5169
34f80b04 5170 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5171
81f75bbf 5172 bnx2x_reset_common(bp);
34f80b04
EG
5173 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5175
34f80b04
EG
5176 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5177 if (CHIP_IS_E1H(bp))
5178 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5179
34f80b04
EG
5180 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5181 msleep(30);
5182 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5183
34f80b04
EG
5184 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5185 if (CHIP_IS_E1(bp)) {
5186 /* enable HW interrupt from PXP on USDM overflow
5187 bit 16 on INT_MASK_0 */
5188 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5189 }
a2fbb9ea 5190
34f80b04
EG
5191 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5192 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5193
5194#ifdef __BIG_ENDIAN
34f80b04
EG
5195 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5196 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5197 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5198 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5199 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
34f80b04
EG
5200
5201/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5202 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5203 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5204 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5205 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5206#endif
5207
34f80b04 5208 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5209#ifdef BCM_ISCSI
34f80b04
EG
5210 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5211 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5212 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5213#endif
5214
34f80b04
EG
5215 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5216 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5217
34f80b04
EG
5218 /* let the HW do it's magic ... */
5219 msleep(100);
5220 /* finish PXP init */
5221 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5222 if (val != 1) {
5223 BNX2X_ERR("PXP2 CFG failed\n");
5224 return -EBUSY;
5225 }
5226 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5227 if (val != 1) {
5228 BNX2X_ERR("PXP2 RD_INIT failed\n");
5229 return -EBUSY;
5230 }
a2fbb9ea 5231
34f80b04
EG
5232 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5233 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5234
34f80b04 5235 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5236
34f80b04
EG
5237 /* clean the DMAE memory */
5238 bp->dmae_ready = 1;
5239 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5240
34f80b04
EG
5241 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5242 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5243 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5244 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5245
34f80b04
EG
5246 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5247 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5248 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5249 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5250
5251 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5252 /* soft reset pulse */
5253 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5254 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5255
5256#ifdef BCM_ISCSI
34f80b04 5257 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5258#endif
a2fbb9ea 5259
34f80b04
EG
5260 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5261 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5262 if (!CHIP_REV_IS_SLOW(bp)) {
5263 /* enable hw interrupt from doorbell Q */
5264 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5265 }
a2fbb9ea 5266
34f80b04
EG
5267 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5268 if (CHIP_REV_IS_SLOW(bp)) {
5269 /* fix for emulation and FPGA for no pause */
5270 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5271 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5272 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5273 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5274 }
a2fbb9ea 5275
34f80b04 5276 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5277 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5278 /* set NIC mode */
5279 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5280 if (CHIP_IS_E1H(bp))
5281 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5282
34f80b04
EG
5283 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5284 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5285 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5286 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5287
34f80b04
EG
5288 if (CHIP_IS_E1H(bp)) {
5289 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5290 STORM_INTMEM_SIZE_E1H/2);
5291 bnx2x_init_fill(bp,
5292 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5293 0, STORM_INTMEM_SIZE_E1H/2);
5294 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5295 STORM_INTMEM_SIZE_E1H/2);
5296 bnx2x_init_fill(bp,
5297 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5298 0, STORM_INTMEM_SIZE_E1H/2);
5299 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5300 STORM_INTMEM_SIZE_E1H/2);
5301 bnx2x_init_fill(bp,
5302 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5303 0, STORM_INTMEM_SIZE_E1H/2);
5304 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5305 STORM_INTMEM_SIZE_E1H/2);
5306 bnx2x_init_fill(bp,
5307 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5308 0, STORM_INTMEM_SIZE_E1H/2);
5309 } else { /* E1 */
ad8d3948
EG
5310 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5311 STORM_INTMEM_SIZE_E1);
5312 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5313 STORM_INTMEM_SIZE_E1);
5314 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5315 STORM_INTMEM_SIZE_E1);
5316 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5317 STORM_INTMEM_SIZE_E1);
34f80b04 5318 }
a2fbb9ea 5319
34f80b04
EG
5320 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5321 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5322 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5323 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5324
34f80b04
EG
5325 /* sync semi rtc */
5326 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5327 0x80000000);
5328 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5329 0x80000000);
a2fbb9ea 5330
34f80b04
EG
5331 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5332 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5333 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5334
34f80b04
EG
5335 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5336 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5337 REG_WR(bp, i, 0xc0cac01a);
5338 /* TODO: replace with something meaningful */
5339 }
5340 if (CHIP_IS_E1H(bp))
5341 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5342 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5343
34f80b04
EG
5344 if (sizeof(union cdu_context) != 1024)
5345 /* we currently assume that a context is 1024 bytes */
5346 printk(KERN_ALERT PFX "please adjust the size of"
5347 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5348
34f80b04
EG
5349 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5350 val = (4 << 24) + (0 << 12) + 1024;
5351 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5352 if (CHIP_IS_E1(bp)) {
5353 /* !!! fix pxp client crdit until excel update */
5354 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5355 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5356 }
a2fbb9ea 5357
34f80b04
EG
5358 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5359 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5360
34f80b04
EG
5361 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5362 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5363
34f80b04
EG
5364 /* PXPCS COMMON comes here */
5365 /* Reset PCIE errors for debug */
5366 REG_WR(bp, 0x2814, 0xffffffff);
5367 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5368
34f80b04
EG
5369 /* EMAC0 COMMON comes here */
5370 /* EMAC1 COMMON comes here */
5371 /* DBU COMMON comes here */
5372 /* DBG COMMON comes here */
5373
5374 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5375 if (CHIP_IS_E1H(bp)) {
5376 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5377 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5378 }
5379
5380 if (CHIP_REV_IS_SLOW(bp))
5381 msleep(200);
5382
5383 /* finish CFC init */
5384 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5385 if (val != 1) {
5386 BNX2X_ERR("CFC LL_INIT failed\n");
5387 return -EBUSY;
5388 }
5389 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5390 if (val != 1) {
5391 BNX2X_ERR("CFC AC_INIT failed\n");
5392 return -EBUSY;
5393 }
5394 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5395 if (val != 1) {
5396 BNX2X_ERR("CFC CAM_INIT failed\n");
5397 return -EBUSY;
5398 }
5399 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5400
34f80b04
EG
5401 /* read NIG statistic
5402 to see if this is our first up since powerup */
5403 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5404 val = *bnx2x_sp(bp, wb_data[0]);
5405
5406 /* do internal memory self test */
5407 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5408 BNX2X_ERR("internal mem self test failed\n");
5409 return -EBUSY;
5410 }
5411
5412 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5413 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5414 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5415 /* Fan failure is indicated by SPIO 5 */
5416 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5417 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5418
5419 /* set to active low mode */
5420 val = REG_RD(bp, MISC_REG_SPIO_INT);
5421 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5422 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5423 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5424
34f80b04
EG
5425 /* enable interrupt to signal the IGU */
5426 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5427 val |= (1 << MISC_REGISTERS_SPIO_5);
5428 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5429 break;
f1410647 5430
34f80b04
EG
5431 default:
5432 break;
5433 }
f1410647 5434
34f80b04
EG
5435 /* clear PXP2 attentions */
5436 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5437
34f80b04 5438 enable_blocks_attention(bp);
a2fbb9ea 5439
6bbca910
YR
5440 if (!BP_NOMCP(bp)) {
5441 bnx2x_acquire_phy_lock(bp);
5442 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5443 bnx2x_release_phy_lock(bp);
5444 } else
5445 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5446
34f80b04
EG
5447 return 0;
5448}
a2fbb9ea 5449
34f80b04
EG
5450static int bnx2x_init_port(struct bnx2x *bp)
5451{
5452 int port = BP_PORT(bp);
5453 u32 val;
a2fbb9ea 5454
34f80b04
EG
5455 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5456
5457 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5458
5459 /* Port PXP comes here */
5460 /* Port PXP2 comes here */
a2fbb9ea
ET
5461#ifdef BCM_ISCSI
5462 /* Port0 1
5463 * Port1 385 */
5464 i++;
5465 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5466 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5467 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5468 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5469
5470 /* Port0 2
5471 * Port1 386 */
5472 i++;
5473 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5474 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5475 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5476 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5477
5478 /* Port0 3
5479 * Port1 387 */
5480 i++;
5481 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5482 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5483 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5484 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5485#endif
34f80b04 5486 /* Port CMs come here */
a2fbb9ea
ET
5487
5488 /* Port QM comes here */
a2fbb9ea
ET
5489#ifdef BCM_ISCSI
5490 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5491 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5492
5493 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5494 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5495#endif
5496 /* Port DQ comes here */
5497 /* Port BRB1 comes here */
ad8d3948 5498 /* Port PRS comes here */
a2fbb9ea
ET
5499 /* Port TSDM comes here */
5500 /* Port CSDM comes here */
5501 /* Port USDM comes here */
5502 /* Port XSDM comes here */
34f80b04
EG
5503 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5504 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5505 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5506 port ? USEM_PORT1_END : USEM_PORT0_END);
5507 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5508 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5509 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5510 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5511 /* Port UPB comes here */
34f80b04
EG
5512 /* Port XPB comes here */
5513
5514 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5515 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5516
5517 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5518 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5519
5520 /* update threshold */
34f80b04 5521 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5522 /* update init credit */
34f80b04 5523 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5524
5525 /* probe changes */
34f80b04 5526 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5527 msleep(5);
34f80b04 5528 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5529
5530#ifdef BCM_ISCSI
5531 /* tell the searcher where the T2 table is */
5532 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5533
5534 wb_write[0] = U64_LO(bp->t2_mapping);
5535 wb_write[1] = U64_HI(bp->t2_mapping);
5536 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5537 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5538 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5539 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5540
5541 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5542 /* Port SRCH comes here */
5543#endif
5544 /* Port CDU comes here */
5545 /* Port CFC comes here */
34f80b04
EG
5546
5547 if (CHIP_IS_E1(bp)) {
5548 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5549 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5550 }
5551 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5552 port ? HC_PORT1_END : HC_PORT0_END);
5553
5554 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5555 MISC_AEU_PORT0_START,
34f80b04
EG
5556 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5557 /* init aeu_mask_attn_func_0/1:
5558 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5559 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5560 * bits 4-7 are used for "per vn group attention" */
5561 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5562 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5563
a2fbb9ea
ET
5564 /* Port PXPCS comes here */
5565 /* Port EMAC0 comes here */
5566 /* Port EMAC1 comes here */
5567 /* Port DBU comes here */
5568 /* Port DBG comes here */
34f80b04
EG
5569 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5570 port ? NIG_PORT1_END : NIG_PORT0_END);
5571
5572 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5573
5574 if (CHIP_IS_E1H(bp)) {
5575 u32 wsum;
5576 struct cmng_struct_per_port m_cmng_port;
5577 int vn;
5578
5579 /* 0x2 disable e1hov, 0x1 enable */
5580 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5581 (IS_E1HMF(bp) ? 0x1 : 0x2));
5582
5583 /* Init RATE SHAPING and FAIRNESS contexts.
5584 Initialize as if there is 10G link. */
5585 wsum = bnx2x_calc_vn_wsum(bp);
5586 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5587 if (IS_E1HMF(bp))
5588 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5589 bnx2x_init_vn_minmax(bp, 2*vn + port,
5590 wsum, 10000, &m_cmng_port);
5591 }
5592
a2fbb9ea
ET
5593 /* Port MCP comes here */
5594 /* Port DMAE comes here */
5595
34f80b04 5596 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5597 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5598 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5599 /* add SPIO 5 to group 0 */
5600 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5601 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5602 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5603 break;
5604
5605 default:
5606 break;
5607 }
5608
c18487ee 5609 bnx2x__link_reset(bp);
a2fbb9ea 5610
34f80b04
EG
5611 return 0;
5612}
5613
5614#define ILT_PER_FUNC (768/2)
5615#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5616/* the phys address is shifted right 12 bits and has an added
5617 1=valid bit added to the 53rd bit
5618 then since this is a wide register(TM)
5619 we split it into two 32 bit writes
5620 */
5621#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5622#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5623#define PXP_ONE_ILT(x) (((x) << 10) | x)
5624#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5625
5626#define CNIC_ILT_LINES 0
5627
5628static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5629{
5630 int reg;
5631
5632 if (CHIP_IS_E1H(bp))
5633 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5634 else /* E1 */
5635 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5636
5637 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5638}
5639
5640static int bnx2x_init_func(struct bnx2x *bp)
5641{
5642 int port = BP_PORT(bp);
5643 int func = BP_FUNC(bp);
5644 int i;
5645
5646 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5647
5648 i = FUNC_ILT_BASE(func);
5649
5650 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5651 if (CHIP_IS_E1H(bp)) {
5652 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5653 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5654 } else /* E1 */
5655 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5656 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5657
5658
5659 if (CHIP_IS_E1H(bp)) {
5660 for (i = 0; i < 9; i++)
5661 bnx2x_init_block(bp,
5662 cm_start[func][i], cm_end[func][i]);
5663
5664 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5665 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5666 }
5667
5668 /* HC init per function */
5669 if (CHIP_IS_E1H(bp)) {
5670 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5671
5672 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5673 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5674 }
5675 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5676
5677 if (CHIP_IS_E1H(bp))
5678 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5679
c14423fe 5680 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5681 REG_WR(bp, 0x2114, 0xffffffff);
5682 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5683
34f80b04
EG
5684 return 0;
5685}
5686
5687static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5688{
5689 int i, rc = 0;
a2fbb9ea 5690
34f80b04
EG
5691 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5692 BP_FUNC(bp), load_code);
a2fbb9ea 5693
34f80b04
EG
5694 bp->dmae_ready = 0;
5695 mutex_init(&bp->dmae_mutex);
5696 bnx2x_gunzip_init(bp);
a2fbb9ea 5697
34f80b04
EG
5698 switch (load_code) {
5699 case FW_MSG_CODE_DRV_LOAD_COMMON:
5700 rc = bnx2x_init_common(bp);
5701 if (rc)
5702 goto init_hw_err;
5703 /* no break */
5704
5705 case FW_MSG_CODE_DRV_LOAD_PORT:
5706 bp->dmae_ready = 1;
5707 rc = bnx2x_init_port(bp);
5708 if (rc)
5709 goto init_hw_err;
5710 /* no break */
5711
5712 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5713 bp->dmae_ready = 1;
5714 rc = bnx2x_init_func(bp);
5715 if (rc)
5716 goto init_hw_err;
5717 break;
5718
5719 default:
5720 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5721 break;
5722 }
5723
5724 if (!BP_NOMCP(bp)) {
5725 int func = BP_FUNC(bp);
a2fbb9ea
ET
5726
5727 bp->fw_drv_pulse_wr_seq =
34f80b04 5728 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5729 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5730 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5731 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5732 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5733 } else
5734 bp->func_stx = 0;
a2fbb9ea 5735
34f80b04
EG
5736 /* this needs to be done before gunzip end */
5737 bnx2x_zero_def_sb(bp);
5738 for_each_queue(bp, i)
5739 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5740
5741init_hw_err:
5742 bnx2x_gunzip_end(bp);
5743
5744 return rc;
a2fbb9ea
ET
5745}
5746
c14423fe 5747/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5748static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5749{
34f80b04 5750 int func = BP_FUNC(bp);
f1410647
ET
5751 u32 seq = ++bp->fw_seq;
5752 u32 rc = 0;
19680c48
EG
5753 u32 cnt = 1;
5754 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5755
34f80b04 5756 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5757 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5758
19680c48
EG
5759 do {
5760 /* let the FW do it's magic ... */
5761 msleep(delay);
a2fbb9ea 5762
19680c48 5763 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5764
19680c48
EG
5765 /* Give the FW up to 2 second (200*10ms) */
5766 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5767
5768 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5769 cnt*delay, rc, seq);
a2fbb9ea
ET
5770
5771 /* is this a reply to our command? */
5772 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5773 rc &= FW_MSG_CODE_MASK;
f1410647 5774
a2fbb9ea
ET
5775 } else {
5776 /* FW BUG! */
5777 BNX2X_ERR("FW failed to respond!\n");
5778 bnx2x_fw_dump(bp);
5779 rc = 0;
5780 }
f1410647 5781
a2fbb9ea
ET
5782 return rc;
5783}
5784
5785static void bnx2x_free_mem(struct bnx2x *bp)
5786{
5787
5788#define BNX2X_PCI_FREE(x, y, size) \
5789 do { \
5790 if (x) { \
5791 pci_free_consistent(bp->pdev, size, x, y); \
5792 x = NULL; \
5793 y = 0; \
5794 } \
5795 } while (0)
5796
5797#define BNX2X_FREE(x) \
5798 do { \
5799 if (x) { \
5800 vfree(x); \
5801 x = NULL; \
5802 } \
5803 } while (0)
5804
5805 int i;
5806
5807 /* fastpath */
5808 for_each_queue(bp, i) {
5809
5810 /* Status blocks */
5811 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5812 bnx2x_fp(bp, i, status_blk_mapping),
5813 sizeof(struct host_status_block) +
5814 sizeof(struct eth_tx_db_data));
5815
5816 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5817 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5818 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5819 bnx2x_fp(bp, i, tx_desc_mapping),
5820 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5821
5822 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5823 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5824 bnx2x_fp(bp, i, rx_desc_mapping),
5825 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5826
5827 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5828 bnx2x_fp(bp, i, rx_comp_mapping),
5829 sizeof(struct eth_fast_path_rx_cqe) *
5830 NUM_RCQ_BD);
a2fbb9ea 5831
7a9b2557 5832 /* SGE ring */
32626230 5833 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5834 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5835 bnx2x_fp(bp, i, rx_sge_mapping),
5836 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5837 }
a2fbb9ea
ET
5838 /* end of fastpath */
5839
5840 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5841 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5842
5843 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5844 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5845
5846#ifdef BCM_ISCSI
5847 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5848 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5849 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5850 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5851#endif
7a9b2557 5852 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5853
5854#undef BNX2X_PCI_FREE
5855#undef BNX2X_KFREE
5856}
5857
5858static int bnx2x_alloc_mem(struct bnx2x *bp)
5859{
5860
5861#define BNX2X_PCI_ALLOC(x, y, size) \
5862 do { \
5863 x = pci_alloc_consistent(bp->pdev, size, y); \
5864 if (x == NULL) \
5865 goto alloc_mem_err; \
5866 memset(x, 0, size); \
5867 } while (0)
5868
5869#define BNX2X_ALLOC(x, size) \
5870 do { \
5871 x = vmalloc(size); \
5872 if (x == NULL) \
5873 goto alloc_mem_err; \
5874 memset(x, 0, size); \
5875 } while (0)
5876
5877 int i;
5878
5879 /* fastpath */
a2fbb9ea
ET
5880 for_each_queue(bp, i) {
5881 bnx2x_fp(bp, i, bp) = bp;
5882
5883 /* Status blocks */
5884 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5885 &bnx2x_fp(bp, i, status_blk_mapping),
5886 sizeof(struct host_status_block) +
5887 sizeof(struct eth_tx_db_data));
5888
5889 bnx2x_fp(bp, i, hw_tx_prods) =
5890 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5891
5892 bnx2x_fp(bp, i, tx_prods_mapping) =
5893 bnx2x_fp(bp, i, status_blk_mapping) +
5894 sizeof(struct host_status_block);
5895
5896 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5897 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5898 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5899 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5900 &bnx2x_fp(bp, i, tx_desc_mapping),
5901 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5902
5903 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5904 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5905 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5906 &bnx2x_fp(bp, i, rx_desc_mapping),
5907 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5908
5909 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5910 &bnx2x_fp(bp, i, rx_comp_mapping),
5911 sizeof(struct eth_fast_path_rx_cqe) *
5912 NUM_RCQ_BD);
5913
7a9b2557
VZ
5914 /* SGE ring */
5915 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5916 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5917 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5918 &bnx2x_fp(bp, i, rx_sge_mapping),
5919 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5920 }
5921 /* end of fastpath */
5922
5923 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5924 sizeof(struct host_def_status_block));
5925
5926 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5927 sizeof(struct bnx2x_slowpath));
5928
5929#ifdef BCM_ISCSI
5930 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5931
5932 /* Initialize T1 */
5933 for (i = 0; i < 64*1024; i += 64) {
5934 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5935 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5936 }
5937
5938 /* allocate searcher T2 table
5939 we allocate 1/4 of alloc num for T2
5940 (which is not entered into the ILT) */
5941 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5942
5943 /* Initialize T2 */
5944 for (i = 0; i < 16*1024; i += 64)
5945 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5946
c14423fe 5947 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5948 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5949
5950 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5951 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5952
5953 /* QM queues (128*MAX_CONN) */
5954 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5955#endif
5956
5957 /* Slow path ring */
5958 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5959
5960 return 0;
5961
5962alloc_mem_err:
5963 bnx2x_free_mem(bp);
5964 return -ENOMEM;
5965
5966#undef BNX2X_PCI_ALLOC
5967#undef BNX2X_ALLOC
5968}
5969
5970static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5971{
5972 int i;
5973
5974 for_each_queue(bp, i) {
5975 struct bnx2x_fastpath *fp = &bp->fp[i];
5976
5977 u16 bd_cons = fp->tx_bd_cons;
5978 u16 sw_prod = fp->tx_pkt_prod;
5979 u16 sw_cons = fp->tx_pkt_cons;
5980
a2fbb9ea
ET
5981 while (sw_cons != sw_prod) {
5982 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5983 sw_cons++;
5984 }
5985 }
5986}
5987
5988static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5989{
5990 int i, j;
5991
5992 for_each_queue(bp, j) {
5993 struct bnx2x_fastpath *fp = &bp->fp[j];
5994
a2fbb9ea
ET
5995 for (i = 0; i < NUM_RX_BD; i++) {
5996 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5997 struct sk_buff *skb = rx_buf->skb;
5998
5999 if (skb == NULL)
6000 continue;
6001
6002 pci_unmap_single(bp->pdev,
6003 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6004 bp->rx_buf_size,
a2fbb9ea
ET
6005 PCI_DMA_FROMDEVICE);
6006
6007 rx_buf->skb = NULL;
6008 dev_kfree_skb(skb);
6009 }
7a9b2557 6010 if (!fp->disable_tpa)
32626230
EG
6011 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6012 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6013 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6014 }
6015}
6016
6017static void bnx2x_free_skbs(struct bnx2x *bp)
6018{
6019 bnx2x_free_tx_skbs(bp);
6020 bnx2x_free_rx_skbs(bp);
6021}
6022
6023static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6024{
34f80b04 6025 int i, offset = 1;
a2fbb9ea
ET
6026
6027 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6028 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6029 bp->msix_table[0].vector);
6030
6031 for_each_queue(bp, i) {
c14423fe 6032 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6033 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6034 bnx2x_fp(bp, i, state));
6035
228241eb
ET
6036 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6037 BNX2X_ERR("IRQ of fp #%d being freed while "
6038 "state != closed\n", i);
a2fbb9ea 6039
34f80b04 6040 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6041 }
a2fbb9ea
ET
6042}
6043
6044static void bnx2x_free_irq(struct bnx2x *bp)
6045{
a2fbb9ea 6046 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6047 bnx2x_free_msix_irqs(bp);
6048 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6049 bp->flags &= ~USING_MSIX_FLAG;
6050
6051 } else
6052 free_irq(bp->pdev->irq, bp->dev);
6053}
6054
6055static int bnx2x_enable_msix(struct bnx2x *bp)
6056{
34f80b04 6057 int i, rc, offset;
a2fbb9ea
ET
6058
6059 bp->msix_table[0].entry = 0;
34f80b04
EG
6060 offset = 1;
6061 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6062
34f80b04
EG
6063 for_each_queue(bp, i) {
6064 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6065
34f80b04
EG
6066 bp->msix_table[i + offset].entry = igu_vec;
6067 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6068 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6069 }
6070
34f80b04
EG
6071 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6072 bp->num_queues + offset);
6073 if (rc) {
6074 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6075 return -1;
6076 }
a2fbb9ea
ET
6077 bp->flags |= USING_MSIX_FLAG;
6078
6079 return 0;
a2fbb9ea
ET
6080}
6081
a2fbb9ea
ET
6082static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6083{
34f80b04 6084 int i, rc, offset = 1;
a2fbb9ea 6085
a2fbb9ea
ET
6086 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6087 bp->dev->name, bp->dev);
a2fbb9ea
ET
6088 if (rc) {
6089 BNX2X_ERR("request sp irq failed\n");
6090 return -EBUSY;
6091 }
6092
6093 for_each_queue(bp, i) {
34f80b04 6094 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6095 bnx2x_msix_fp_int, 0,
6096 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6097 if (rc) {
3196a88a
EG
6098 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6099 i + offset, -rc);
a2fbb9ea
ET
6100 bnx2x_free_msix_irqs(bp);
6101 return -EBUSY;
6102 }
6103
6104 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6105 }
6106
6107 return 0;
a2fbb9ea
ET
6108}
6109
6110static int bnx2x_req_irq(struct bnx2x *bp)
6111{
34f80b04 6112 int rc;
a2fbb9ea 6113
34f80b04
EG
6114 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6115 bp->dev->name, bp->dev);
a2fbb9ea
ET
6116 if (!rc)
6117 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6118
6119 return rc;
a2fbb9ea
ET
6120}
6121
65abd74d
YG
6122static void bnx2x_napi_enable(struct bnx2x *bp)
6123{
6124 int i;
6125
6126 for_each_queue(bp, i)
6127 napi_enable(&bnx2x_fp(bp, i, napi));
6128}
6129
6130static void bnx2x_napi_disable(struct bnx2x *bp)
6131{
6132 int i;
6133
6134 for_each_queue(bp, i)
6135 napi_disable(&bnx2x_fp(bp, i, napi));
6136}
6137
6138static void bnx2x_netif_start(struct bnx2x *bp)
6139{
6140 if (atomic_dec_and_test(&bp->intr_sem)) {
6141 if (netif_running(bp->dev)) {
6142 if (bp->state == BNX2X_STATE_OPEN)
6143 netif_wake_queue(bp->dev);
6144 bnx2x_napi_enable(bp);
6145 bnx2x_int_enable(bp);
6146 }
6147 }
6148}
6149
f8ef6e44 6150static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6151{
f8ef6e44 6152 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6153 bnx2x_napi_disable(bp);
65abd74d 6154 if (netif_running(bp->dev)) {
65abd74d
YG
6155 netif_tx_disable(bp->dev);
6156 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6157 }
6158}
6159
a2fbb9ea
ET
6160/*
6161 * Init service functions
6162 */
6163
3101c2bc 6164static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6165{
6166 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6167 int port = BP_PORT(bp);
a2fbb9ea
ET
6168
6169 /* CAM allocation
6170 * unicasts 0-31:port0 32-63:port1
6171 * multicast 64-127:port0 128-191:port1
6172 */
6173 config->hdr.length_6b = 2;
af246401 6174 config->hdr.offset = port ? 32 : 0;
34f80b04 6175 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6176 config->hdr.reserved1 = 0;
6177
6178 /* primary MAC */
6179 config->config_table[0].cam_entry.msb_mac_addr =
6180 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6181 config->config_table[0].cam_entry.middle_mac_addr =
6182 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6183 config->config_table[0].cam_entry.lsb_mac_addr =
6184 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6185 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6186 if (set)
6187 config->config_table[0].target_table_entry.flags = 0;
6188 else
6189 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6190 config->config_table[0].target_table_entry.client_id = 0;
6191 config->config_table[0].target_table_entry.vlan_id = 0;
6192
3101c2bc
YG
6193 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6194 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6195 config->config_table[0].cam_entry.msb_mac_addr,
6196 config->config_table[0].cam_entry.middle_mac_addr,
6197 config->config_table[0].cam_entry.lsb_mac_addr);
6198
6199 /* broadcast */
6200 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6201 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6202 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6203 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6204 if (set)
6205 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6206 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6207 else
6208 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6209 config->config_table[1].target_table_entry.client_id = 0;
6210 config->config_table[1].target_table_entry.vlan_id = 0;
6211
6212 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6213 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6214 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6215}
6216
3101c2bc 6217static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6218{
6219 struct mac_configuration_cmd_e1h *config =
6220 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6221
3101c2bc 6222 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6223 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6224 return;
6225 }
6226
6227 /* CAM allocation for E1H
6228 * unicasts: by func number
6229 * multicast: 20+FUNC*20, 20 each
6230 */
6231 config->hdr.length_6b = 1;
6232 config->hdr.offset = BP_FUNC(bp);
6233 config->hdr.client_id = BP_CL_ID(bp);
6234 config->hdr.reserved1 = 0;
6235
6236 /* primary MAC */
6237 config->config_table[0].msb_mac_addr =
6238 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6239 config->config_table[0].middle_mac_addr =
6240 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6241 config->config_table[0].lsb_mac_addr =
6242 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6243 config->config_table[0].client_id = BP_L_ID(bp);
6244 config->config_table[0].vlan_id = 0;
6245 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6246 if (set)
6247 config->config_table[0].flags = BP_PORT(bp);
6248 else
6249 config->config_table[0].flags =
6250 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6251
3101c2bc
YG
6252 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6253 (set ? "setting" : "clearing"),
34f80b04
EG
6254 config->config_table[0].msb_mac_addr,
6255 config->config_table[0].middle_mac_addr,
6256 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6257
6258 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6259 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6260 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6261}
6262
a2fbb9ea
ET
6263static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6264 int *state_p, int poll)
6265{
6266 /* can take a while if any port is running */
34f80b04 6267 int cnt = 500;
a2fbb9ea 6268
c14423fe
ET
6269 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6270 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6271
6272 might_sleep();
34f80b04 6273 while (cnt--) {
a2fbb9ea
ET
6274 if (poll) {
6275 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6276 /* if index is different from 0
6277 * the reply for some commands will
3101c2bc 6278 * be on the non default queue
a2fbb9ea
ET
6279 */
6280 if (idx)
6281 bnx2x_rx_int(&bp->fp[idx], 10);
6282 }
a2fbb9ea 6283
3101c2bc 6284 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6285 if (*state_p == state)
a2fbb9ea
ET
6286 return 0;
6287
a2fbb9ea 6288 msleep(1);
a2fbb9ea
ET
6289 }
6290
a2fbb9ea 6291 /* timeout! */
49d66772
ET
6292 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6293 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6294#ifdef BNX2X_STOP_ON_ERROR
6295 bnx2x_panic();
6296#endif
a2fbb9ea 6297
49d66772 6298 return -EBUSY;
a2fbb9ea
ET
6299}
6300
6301static int bnx2x_setup_leading(struct bnx2x *bp)
6302{
34f80b04 6303 int rc;
a2fbb9ea 6304
c14423fe 6305 /* reset IGU state */
34f80b04 6306 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6307
6308 /* SETUP ramrod */
6309 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6310
34f80b04
EG
6311 /* Wait for completion */
6312 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6313
34f80b04 6314 return rc;
a2fbb9ea
ET
6315}
6316
6317static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6318{
a2fbb9ea 6319 /* reset IGU state */
34f80b04 6320 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6321
228241eb 6322 /* SETUP ramrod */
a2fbb9ea
ET
6323 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6324 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6325
6326 /* Wait for completion */
6327 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6328 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6329}
6330
a2fbb9ea
ET
6331static int bnx2x_poll(struct napi_struct *napi, int budget);
6332static void bnx2x_set_rx_mode(struct net_device *dev);
6333
34f80b04
EG
6334/* must be called with rtnl_lock */
6335static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6336{
228241eb 6337 u32 load_code;
2dfe0e1f 6338 int i, rc = 0;
34f80b04
EG
6339#ifdef BNX2X_STOP_ON_ERROR
6340 if (unlikely(bp->panic))
6341 return -EPERM;
6342#endif
a2fbb9ea
ET
6343
6344 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6345
34f80b04
EG
6346 if (use_inta) {
6347 bp->num_queues = 1;
6348
6349 } else {
6350 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6351 /* user requested number */
6352 bp->num_queues = use_multi;
6353
6354 else if (use_multi)
6355 bp->num_queues = min_t(u32, num_online_cpus(),
6356 BP_MAX_QUEUES(bp));
6357 else
a2fbb9ea 6358 bp->num_queues = 1;
34f80b04 6359
2dfe0e1f
EG
6360 DP(NETIF_MSG_IFUP,
6361 "set number of queues to %d\n", bp->num_queues);
6362
6363 /* if we can't use MSI-X we only need one fp,
6364 * so try to enable MSI-X with the requested number of fp's
6365 * and fallback to MSI or legacy INTx with one fp
6366 */
6367 rc = bnx2x_enable_msix(bp);
6368 if (rc) {
34f80b04
EG
6369 /* failed to enable MSI-X */
6370 bp->num_queues = 1;
6371 if (use_multi)
6372 BNX2X_ERR("Multi requested but failed"
6373 " to enable MSI-X\n");
a2fbb9ea
ET
6374 }
6375 }
c14423fe 6376
a2fbb9ea
ET
6377 if (bnx2x_alloc_mem(bp))
6378 return -ENOMEM;
6379
7a9b2557
VZ
6380 for_each_queue(bp, i)
6381 bnx2x_fp(bp, i, disable_tpa) =
6382 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6383
2dfe0e1f
EG
6384 for_each_queue(bp, i)
6385 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6386 bnx2x_poll, 128);
6387
6388#ifdef BNX2X_STOP_ON_ERROR
6389 for_each_queue(bp, i) {
6390 struct bnx2x_fastpath *fp = &bp->fp[i];
6391
6392 fp->poll_no_work = 0;
6393 fp->poll_calls = 0;
6394 fp->poll_max_calls = 0;
6395 fp->poll_complete = 0;
6396 fp->poll_exit = 0;
6397 }
6398#endif
6399 bnx2x_napi_enable(bp);
6400
34f80b04
EG
6401 if (bp->flags & USING_MSIX_FLAG) {
6402 rc = bnx2x_req_msix_irqs(bp);
6403 if (rc) {
6404 pci_disable_msix(bp->pdev);
2dfe0e1f 6405 goto load_error1;
34f80b04 6406 }
2dfe0e1f 6407 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
34f80b04
EG
6408 } else {
6409 bnx2x_ack_int(bp);
6410 rc = bnx2x_req_irq(bp);
6411 if (rc) {
2dfe0e1f
EG
6412 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6413 goto load_error1;
a2fbb9ea
ET
6414 }
6415 }
6416
2dfe0e1f
EG
6417 /* Send LOAD_REQUEST command to MCP
6418 Returns the type of LOAD command:
6419 if it is the first port to be initialized
6420 common blocks should be initialized, otherwise - not
6421 */
6422 if (!BP_NOMCP(bp)) {
6423 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6424 if (!load_code) {
6425 BNX2X_ERR("MCP response failure, aborting\n");
6426 rc = -EBUSY;
6427 goto load_error2;
6428 }
6429 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6430 rc = -EBUSY; /* other port in diagnostic mode */
6431 goto load_error2;
6432 }
6433
6434 } else {
6435 int port = BP_PORT(bp);
6436
6437 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6438 load_count[0], load_count[1], load_count[2]);
6439 load_count[0]++;
6440 load_count[1 + port]++;
6441 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6442 load_count[0], load_count[1], load_count[2]);
6443 if (load_count[0] == 1)
6444 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6445 else if (load_count[1 + port] == 1)
6446 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6447 else
6448 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6449 }
6450
6451 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6452 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6453 bp->port.pmf = 1;
6454 else
6455 bp->port.pmf = 0;
6456 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6457
a2fbb9ea 6458 /* Initialize HW */
34f80b04
EG
6459 rc = bnx2x_init_hw(bp, load_code);
6460 if (rc) {
a2fbb9ea 6461 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6462 goto load_error2;
a2fbb9ea
ET
6463 }
6464
a2fbb9ea 6465 /* Setup NIC internals and enable interrupts */
471de716 6466 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6467
6468 /* Send LOAD_DONE command to MCP */
34f80b04 6469 if (!BP_NOMCP(bp)) {
228241eb
ET
6470 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6471 if (!load_code) {
da5a662a 6472 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6473 rc = -EBUSY;
2dfe0e1f 6474 goto load_error3;
a2fbb9ea
ET
6475 }
6476 }
6477
6478 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6479
34f80b04
EG
6480 rc = bnx2x_setup_leading(bp);
6481 if (rc) {
da5a662a 6482 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6483 goto load_error3;
34f80b04 6484 }
a2fbb9ea 6485
34f80b04
EG
6486 if (CHIP_IS_E1H(bp))
6487 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6488 BNX2X_ERR("!!! mf_cfg function disabled\n");
6489 bp->state = BNX2X_STATE_DISABLED;
6490 }
a2fbb9ea 6491
34f80b04
EG
6492 if (bp->state == BNX2X_STATE_OPEN)
6493 for_each_nondefault_queue(bp, i) {
6494 rc = bnx2x_setup_multi(bp, i);
6495 if (rc)
2dfe0e1f 6496 goto load_error3;
34f80b04 6497 }
a2fbb9ea 6498
34f80b04 6499 if (CHIP_IS_E1(bp))
3101c2bc 6500 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6501 else
3101c2bc 6502 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6503
6504 if (bp->port.pmf)
6505 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6506
6507 /* Start fast path */
34f80b04
EG
6508 switch (load_mode) {
6509 case LOAD_NORMAL:
6510 /* Tx queue should be only reenabled */
6511 netif_wake_queue(bp->dev);
2dfe0e1f 6512 /* Initialize the receive filter. */
34f80b04
EG
6513 bnx2x_set_rx_mode(bp->dev);
6514 break;
6515
6516 case LOAD_OPEN:
a2fbb9ea 6517 netif_start_queue(bp->dev);
2dfe0e1f 6518 /* Initialize the receive filter. */
34f80b04 6519 bnx2x_set_rx_mode(bp->dev);
34f80b04 6520 break;
a2fbb9ea 6521
34f80b04 6522 case LOAD_DIAG:
2dfe0e1f 6523 /* Initialize the receive filter. */
a2fbb9ea 6524 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6525 bp->state = BNX2X_STATE_DIAG;
6526 break;
6527
6528 default:
6529 break;
a2fbb9ea
ET
6530 }
6531
34f80b04
EG
6532 if (!bp->port.pmf)
6533 bnx2x__link_status_update(bp);
6534
a2fbb9ea
ET
6535 /* start the timer */
6536 mod_timer(&bp->timer, jiffies + bp->current_interval);
6537
34f80b04 6538
a2fbb9ea
ET
6539 return 0;
6540
2dfe0e1f
EG
6541load_error3:
6542 bnx2x_int_disable_sync(bp, 1);
6543 if (!BP_NOMCP(bp)) {
6544 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6545 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6546 }
6547 bp->port.pmf = 0;
7a9b2557
VZ
6548 /* Free SKBs, SGEs, TPA pool and driver internals */
6549 bnx2x_free_skbs(bp);
6550 for_each_queue(bp, i)
3196a88a 6551 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6552load_error2:
d1014634
YG
6553 /* Release IRQs */
6554 bnx2x_free_irq(bp);
2dfe0e1f
EG
6555load_error1:
6556 bnx2x_napi_disable(bp);
7cde1c8b
EG
6557 for_each_queue(bp, i)
6558 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6559 bnx2x_free_mem(bp);
6560
6561 /* TBD we really need to reset the chip
6562 if we want to recover from this */
34f80b04 6563 return rc;
a2fbb9ea
ET
6564}
6565
6566static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6567{
a2fbb9ea
ET
6568 int rc;
6569
c14423fe 6570 /* halt the connection */
a2fbb9ea 6571 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6572 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6573
34f80b04 6574 /* Wait for completion */
a2fbb9ea 6575 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6576 &(bp->fp[index].state), 1);
c14423fe 6577 if (rc) /* timeout */
a2fbb9ea
ET
6578 return rc;
6579
6580 /* delete cfc entry */
6581 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6582
34f80b04
EG
6583 /* Wait for completion */
6584 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6585 &(bp->fp[index].state), 1);
6586 return rc;
a2fbb9ea
ET
6587}
6588
da5a662a 6589static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6590{
49d66772 6591 u16 dsb_sp_prod_idx;
c14423fe 6592 /* if the other port is handling traffic,
a2fbb9ea 6593 this can take a lot of time */
34f80b04
EG
6594 int cnt = 500;
6595 int rc;
a2fbb9ea
ET
6596
6597 might_sleep();
6598
6599 /* Send HALT ramrod */
6600 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6601 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6602
34f80b04
EG
6603 /* Wait for completion */
6604 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6605 &(bp->fp[0].state), 1);
6606 if (rc) /* timeout */
da5a662a 6607 return rc;
a2fbb9ea 6608
49d66772 6609 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6610
228241eb 6611 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6612 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6613
49d66772 6614 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6615 we are going to reset the chip anyway
6616 so there is not much to do if this times out
6617 */
34f80b04 6618 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6619 if (!cnt) {
6620 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6621 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6622 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6623#ifdef BNX2X_STOP_ON_ERROR
6624 bnx2x_panic();
da5a662a
VZ
6625#else
6626 rc = -EBUSY;
34f80b04
EG
6627#endif
6628 break;
6629 }
6630 cnt--;
da5a662a 6631 msleep(1);
5650d9d4 6632 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
6633 }
6634 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6635 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6636
6637 return rc;
a2fbb9ea
ET
6638}
6639
34f80b04
EG
6640static void bnx2x_reset_func(struct bnx2x *bp)
6641{
6642 int port = BP_PORT(bp);
6643 int func = BP_FUNC(bp);
6644 int base, i;
6645
6646 /* Configure IGU */
6647 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6648 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6649
6650 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6651
6652 /* Clear ILT */
6653 base = FUNC_ILT_BASE(func);
6654 for (i = base; i < base + ILT_PER_FUNC; i++)
6655 bnx2x_ilt_wr(bp, i, 0);
6656}
6657
6658static void bnx2x_reset_port(struct bnx2x *bp)
6659{
6660 int port = BP_PORT(bp);
6661 u32 val;
6662
6663 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6664
6665 /* Do not rcv packets to BRB */
6666 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6667 /* Do not direct rcv packets that are not for MCP to the BRB */
6668 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6669 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6670
6671 /* Configure AEU */
6672 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6673
6674 msleep(100);
6675 /* Check for BRB port occupancy */
6676 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6677 if (val)
6678 DP(NETIF_MSG_IFDOWN,
33471629 6679 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6680
6681 /* TODO: Close Doorbell port? */
6682}
6683
34f80b04
EG
6684static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6685{
6686 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6687 BP_FUNC(bp), reset_code);
6688
6689 switch (reset_code) {
6690 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6691 bnx2x_reset_port(bp);
6692 bnx2x_reset_func(bp);
6693 bnx2x_reset_common(bp);
6694 break;
6695
6696 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6697 bnx2x_reset_port(bp);
6698 bnx2x_reset_func(bp);
6699 break;
6700
6701 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6702 bnx2x_reset_func(bp);
6703 break;
49d66772 6704
34f80b04
EG
6705 default:
6706 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6707 break;
6708 }
6709}
6710
33471629 6711/* must be called with rtnl_lock */
34f80b04 6712static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6713{
da5a662a 6714 int port = BP_PORT(bp);
a2fbb9ea 6715 u32 reset_code = 0;
da5a662a 6716 int i, cnt, rc;
a2fbb9ea
ET
6717
6718 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6719
228241eb
ET
6720 bp->rx_mode = BNX2X_RX_MODE_NONE;
6721 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6722
f8ef6e44 6723 bnx2x_netif_stop(bp, 1);
e94d8af3 6724
34f80b04
EG
6725 del_timer_sync(&bp->timer);
6726 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6727 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6728 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6729
70b9986c
EG
6730 /* Release IRQs */
6731 bnx2x_free_irq(bp);
6732
da5a662a 6733 /* Wait until tx fast path tasks complete */
228241eb
ET
6734 for_each_queue(bp, i) {
6735 struct bnx2x_fastpath *fp = &bp->fp[i];
6736
34f80b04
EG
6737 cnt = 1000;
6738 smp_rmb();
e8b5fc51 6739 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6740
65abd74d 6741 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6742 if (!cnt) {
6743 BNX2X_ERR("timeout waiting for queue[%d]\n",
6744 i);
6745#ifdef BNX2X_STOP_ON_ERROR
6746 bnx2x_panic();
6747 return -EBUSY;
6748#else
6749 break;
6750#endif
6751 }
6752 cnt--;
da5a662a 6753 msleep(1);
34f80b04
EG
6754 smp_rmb();
6755 }
228241eb 6756 }
da5a662a
VZ
6757 /* Give HW time to discard old tx messages */
6758 msleep(1);
a2fbb9ea 6759
3101c2bc
YG
6760 if (CHIP_IS_E1(bp)) {
6761 struct mac_configuration_cmd *config =
6762 bnx2x_sp(bp, mcast_config);
6763
6764 bnx2x_set_mac_addr_e1(bp, 0);
6765
6766 for (i = 0; i < config->hdr.length_6b; i++)
6767 CAM_INVALIDATE(config->config_table[i]);
6768
6769 config->hdr.length_6b = i;
6770 if (CHIP_REV_IS_SLOW(bp))
6771 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6772 else
6773 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6774 config->hdr.client_id = BP_CL_ID(bp);
6775 config->hdr.reserved1 = 0;
6776
6777 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6778 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6779 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6780
6781 } else { /* E1H */
65abd74d
YG
6782 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6783
3101c2bc
YG
6784 bnx2x_set_mac_addr_e1h(bp, 0);
6785
6786 for (i = 0; i < MC_HASH_SIZE; i++)
6787 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6788 }
6789
65abd74d
YG
6790 if (unload_mode == UNLOAD_NORMAL)
6791 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6792
6793 else if (bp->flags & NO_WOL_FLAG) {
6794 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6795 if (CHIP_IS_E1H(bp))
6796 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6797
6798 } else if (bp->wol) {
6799 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6800 u8 *mac_addr = bp->dev->dev_addr;
6801 u32 val;
6802 /* The mac address is written to entries 1-4 to
6803 preserve entry 0 which is used by the PMF */
6804 u8 entry = (BP_E1HVN(bp) + 1)*8;
6805
6806 val = (mac_addr[0] << 8) | mac_addr[1];
6807 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6808
6809 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6810 (mac_addr[4] << 8) | mac_addr[5];
6811 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6812
6813 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6814
6815 } else
6816 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6817
34f80b04
EG
6818 /* Close multi and leading connections
6819 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6820 for_each_nondefault_queue(bp, i)
6821 if (bnx2x_stop_multi(bp, i))
228241eb 6822 goto unload_error;
a2fbb9ea 6823
da5a662a
VZ
6824 rc = bnx2x_stop_leading(bp);
6825 if (rc) {
34f80b04 6826 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6827#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6828 return -EBUSY;
da5a662a
VZ
6829#else
6830 goto unload_error;
34f80b04 6831#endif
228241eb
ET
6832 }
6833
6834unload_error:
34f80b04 6835 if (!BP_NOMCP(bp))
228241eb 6836 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6837 else {
6838 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6839 load_count[0], load_count[1], load_count[2]);
6840 load_count[0]--;
da5a662a 6841 load_count[1 + port]--;
34f80b04
EG
6842 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6843 load_count[0], load_count[1], load_count[2]);
6844 if (load_count[0] == 0)
6845 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6846 else if (load_count[1 + port] == 0)
34f80b04
EG
6847 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6848 else
6849 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6850 }
a2fbb9ea 6851
34f80b04
EG
6852 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6853 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6854 bnx2x__link_reset(bp);
a2fbb9ea
ET
6855
6856 /* Reset the chip */
228241eb 6857 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6858
6859 /* Report UNLOAD_DONE to MCP */
34f80b04 6860 if (!BP_NOMCP(bp))
a2fbb9ea 6861 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6862 bp->port.pmf = 0;
a2fbb9ea 6863
7a9b2557 6864 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6865 bnx2x_free_skbs(bp);
7a9b2557 6866 for_each_queue(bp, i)
3196a88a 6867 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7cde1c8b
EG
6868 for_each_queue(bp, i)
6869 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6870 bnx2x_free_mem(bp);
6871
6872 bp->state = BNX2X_STATE_CLOSED;
228241eb 6873
a2fbb9ea
ET
6874 netif_carrier_off(bp->dev);
6875
6876 return 0;
6877}
6878
34f80b04
EG
6879static void bnx2x_reset_task(struct work_struct *work)
6880{
6881 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6882
6883#ifdef BNX2X_STOP_ON_ERROR
6884 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6885 " so reset not done to allow debug dump,\n"
6886 KERN_ERR " you will need to reboot when done\n");
6887 return;
6888#endif
6889
6890 rtnl_lock();
6891
6892 if (!netif_running(bp->dev))
6893 goto reset_task_exit;
6894
6895 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6896 bnx2x_nic_load(bp, LOAD_NORMAL);
6897
6898reset_task_exit:
6899 rtnl_unlock();
6900}
6901
a2fbb9ea
ET
6902/* end of nic load/unload */
6903
6904/* ethtool_ops */
6905
6906/*
6907 * Init service functions
6908 */
6909
34f80b04
EG
6910static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6911{
6912 u32 val;
6913
6914 /* Check if there is any driver already loaded */
6915 val = REG_RD(bp, MISC_REG_UNPREPARED);
6916 if (val == 0x1) {
6917 /* Check if it is the UNDI driver
6918 * UNDI driver initializes CID offset for normal bell to 0x7
6919 */
4a37fb66 6920 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6921 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6922 if (val == 0x7) {
6923 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6924 /* save our func */
34f80b04 6925 int func = BP_FUNC(bp);
da5a662a
VZ
6926 u32 swap_en;
6927 u32 swap_val;
34f80b04 6928
b4661739
EG
6929 /* clear the UNDI indication */
6930 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6931
34f80b04
EG
6932 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6933
6934 /* try unload UNDI on port 0 */
6935 bp->func = 0;
da5a662a
VZ
6936 bp->fw_seq =
6937 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6938 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6939 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6940
6941 /* if UNDI is loaded on the other port */
6942 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6943
da5a662a
VZ
6944 /* send "DONE" for previous unload */
6945 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6946
6947 /* unload UNDI on port 1 */
34f80b04 6948 bp->func = 1;
da5a662a
VZ
6949 bp->fw_seq =
6950 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6951 DRV_MSG_SEQ_NUMBER_MASK);
6952 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6953
6954 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6955 }
6956
b4661739
EG
6957 /* now it's safe to release the lock */
6958 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6959
da5a662a
VZ
6960 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6961 HC_REG_CONFIG_0), 0x1000);
6962
6963 /* close input traffic and wait for it */
6964 /* Do not rcv packets to BRB */
6965 REG_WR(bp,
6966 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6967 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6968 /* Do not direct rcv packets that are not for MCP to
6969 * the BRB */
6970 REG_WR(bp,
6971 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6972 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6973 /* clear AEU */
6974 REG_WR(bp,
6975 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6976 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6977 msleep(10);
6978
6979 /* save NIG port swap info */
6980 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6981 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6982 /* reset device */
6983 REG_WR(bp,
6984 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6985 0xd3ffffff);
34f80b04
EG
6986 REG_WR(bp,
6987 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6988 0x1403);
da5a662a
VZ
6989 /* take the NIG out of reset and restore swap values */
6990 REG_WR(bp,
6991 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6992 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6993 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6994 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6995
6996 /* send unload done to the MCP */
6997 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6998
6999 /* restore our func and fw_seq */
7000 bp->func = func;
7001 bp->fw_seq =
7002 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7003 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7004
7005 } else
7006 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7007 }
7008}
7009
7010static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7011{
7012 u32 val, val2, val3, val4, id;
72ce58c3 7013 u16 pmc;
34f80b04
EG
7014
7015 /* Get the chip revision id and number. */
7016 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7017 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7018 id = ((val & 0xffff) << 16);
7019 val = REG_RD(bp, MISC_REG_CHIP_REV);
7020 id |= ((val & 0xf) << 12);
7021 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7022 id |= ((val & 0xff) << 4);
5a40e08e 7023 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7024 id |= (val & 0xf);
7025 bp->common.chip_id = id;
7026 bp->link_params.chip_id = bp->common.chip_id;
7027 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7028
7029 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7030 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7031 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7032 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7033 bp->common.flash_size, bp->common.flash_size);
7034
7035 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7036 bp->link_params.shmem_base = bp->common.shmem_base;
7037 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7038
7039 if (!bp->common.shmem_base ||
7040 (bp->common.shmem_base < 0xA0000) ||
7041 (bp->common.shmem_base >= 0xC0000)) {
7042 BNX2X_DEV_INFO("MCP not active\n");
7043 bp->flags |= NO_MCP_FLAG;
7044 return;
7045 }
7046
7047 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7048 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7049 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7050 BNX2X_ERR("BAD MCP validity signature\n");
7051
7052 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7053 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7054
7055 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7056 bp->common.hw_config, bp->common.board);
7057
7058 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7059 SHARED_HW_CFG_LED_MODE_MASK) >>
7060 SHARED_HW_CFG_LED_MODE_SHIFT);
7061
7062 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7063 bp->common.bc_ver = val;
7064 BNX2X_DEV_INFO("bc_ver %X\n", val);
7065 if (val < BNX2X_BC_VER) {
7066 /* for now only warn
7067 * later we might need to enforce this */
7068 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7069 " please upgrade BC\n", BNX2X_BC_VER, val);
7070 }
72ce58c3
EG
7071
7072 if (BP_E1HVN(bp) == 0) {
7073 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7074 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7075 } else {
7076 /* no WOL capability for E1HVN != 0 */
7077 bp->flags |= NO_WOL_FLAG;
7078 }
7079 BNX2X_DEV_INFO("%sWoL capable\n",
7080 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7081
7082 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7083 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7084 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7085 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7086
7087 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7088 val, val2, val3, val4);
7089}
7090
7091static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7092 u32 switch_cfg)
a2fbb9ea 7093{
34f80b04 7094 int port = BP_PORT(bp);
a2fbb9ea
ET
7095 u32 ext_phy_type;
7096
a2fbb9ea
ET
7097 switch (switch_cfg) {
7098 case SWITCH_CFG_1G:
7099 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7100
c18487ee
YR
7101 ext_phy_type =
7102 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7103 switch (ext_phy_type) {
7104 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7105 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7106 ext_phy_type);
7107
34f80b04
EG
7108 bp->port.supported |= (SUPPORTED_10baseT_Half |
7109 SUPPORTED_10baseT_Full |
7110 SUPPORTED_100baseT_Half |
7111 SUPPORTED_100baseT_Full |
7112 SUPPORTED_1000baseT_Full |
7113 SUPPORTED_2500baseX_Full |
7114 SUPPORTED_TP |
7115 SUPPORTED_FIBRE |
7116 SUPPORTED_Autoneg |
7117 SUPPORTED_Pause |
7118 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7119 break;
7120
7121 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7122 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7123 ext_phy_type);
7124
34f80b04
EG
7125 bp->port.supported |= (SUPPORTED_10baseT_Half |
7126 SUPPORTED_10baseT_Full |
7127 SUPPORTED_100baseT_Half |
7128 SUPPORTED_100baseT_Full |
7129 SUPPORTED_1000baseT_Full |
7130 SUPPORTED_TP |
7131 SUPPORTED_FIBRE |
7132 SUPPORTED_Autoneg |
7133 SUPPORTED_Pause |
7134 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7135 break;
7136
7137 default:
7138 BNX2X_ERR("NVRAM config error. "
7139 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7140 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7141 return;
7142 }
7143
34f80b04
EG
7144 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7145 port*0x10);
7146 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7147 break;
7148
7149 case SWITCH_CFG_10G:
7150 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7151
c18487ee
YR
7152 ext_phy_type =
7153 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7154 switch (ext_phy_type) {
7155 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7156 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7157 ext_phy_type);
7158
34f80b04
EG
7159 bp->port.supported |= (SUPPORTED_10baseT_Half |
7160 SUPPORTED_10baseT_Full |
7161 SUPPORTED_100baseT_Half |
7162 SUPPORTED_100baseT_Full |
7163 SUPPORTED_1000baseT_Full |
7164 SUPPORTED_2500baseX_Full |
7165 SUPPORTED_10000baseT_Full |
7166 SUPPORTED_TP |
7167 SUPPORTED_FIBRE |
7168 SUPPORTED_Autoneg |
7169 SUPPORTED_Pause |
7170 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7171 break;
7172
7173 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7174 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7175 ext_phy_type);
f1410647 7176
34f80b04
EG
7177 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7178 SUPPORTED_FIBRE |
7179 SUPPORTED_Pause |
7180 SUPPORTED_Asym_Pause);
f1410647
ET
7181 break;
7182
a2fbb9ea 7183 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7184 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7185 ext_phy_type);
7186
34f80b04
EG
7187 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7188 SUPPORTED_1000baseT_Full |
7189 SUPPORTED_FIBRE |
7190 SUPPORTED_Pause |
7191 SUPPORTED_Asym_Pause);
f1410647
ET
7192 break;
7193
7194 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7195 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7196 ext_phy_type);
7197
34f80b04
EG
7198 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7199 SUPPORTED_1000baseT_Full |
7200 SUPPORTED_FIBRE |
7201 SUPPORTED_Autoneg |
7202 SUPPORTED_Pause |
7203 SUPPORTED_Asym_Pause);
f1410647
ET
7204 break;
7205
c18487ee
YR
7206 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7207 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7208 ext_phy_type);
7209
34f80b04
EG
7210 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7211 SUPPORTED_2500baseX_Full |
7212 SUPPORTED_1000baseT_Full |
7213 SUPPORTED_FIBRE |
7214 SUPPORTED_Autoneg |
7215 SUPPORTED_Pause |
7216 SUPPORTED_Asym_Pause);
c18487ee
YR
7217 break;
7218
f1410647
ET
7219 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7220 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7221 ext_phy_type);
7222
34f80b04
EG
7223 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7224 SUPPORTED_TP |
7225 SUPPORTED_Autoneg |
7226 SUPPORTED_Pause |
7227 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7228 break;
7229
c18487ee
YR
7230 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7231 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7232 bp->link_params.ext_phy_config);
7233 break;
7234
a2fbb9ea
ET
7235 default:
7236 BNX2X_ERR("NVRAM config error. "
7237 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7238 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7239 return;
7240 }
7241
34f80b04
EG
7242 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7243 port*0x18);
7244 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7245
a2fbb9ea
ET
7246 break;
7247
7248 default:
7249 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7250 bp->port.link_config);
a2fbb9ea
ET
7251 return;
7252 }
34f80b04 7253 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7254
7255 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7256 if (!(bp->link_params.speed_cap_mask &
7257 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7258 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7259
c18487ee
YR
7260 if (!(bp->link_params.speed_cap_mask &
7261 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7262 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7263
c18487ee
YR
7264 if (!(bp->link_params.speed_cap_mask &
7265 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7266 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7267
c18487ee
YR
7268 if (!(bp->link_params.speed_cap_mask &
7269 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7270 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7271
c18487ee
YR
7272 if (!(bp->link_params.speed_cap_mask &
7273 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7274 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7275 SUPPORTED_1000baseT_Full);
a2fbb9ea 7276
c18487ee
YR
7277 if (!(bp->link_params.speed_cap_mask &
7278 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7279 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7280
c18487ee
YR
7281 if (!(bp->link_params.speed_cap_mask &
7282 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7283 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7284
34f80b04 7285 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7286}
7287
34f80b04 7288static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7289{
c18487ee 7290 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7291
34f80b04 7292 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7293 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7294 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7295 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7296 bp->port.advertising = bp->port.supported;
a2fbb9ea 7297 } else {
c18487ee
YR
7298 u32 ext_phy_type =
7299 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7300
7301 if ((ext_phy_type ==
7302 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7303 (ext_phy_type ==
7304 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7305 /* force 10G, no AN */
c18487ee 7306 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7307 bp->port.advertising =
a2fbb9ea
ET
7308 (ADVERTISED_10000baseT_Full |
7309 ADVERTISED_FIBRE);
7310 break;
7311 }
7312 BNX2X_ERR("NVRAM config error. "
7313 "Invalid link_config 0x%x"
7314 " Autoneg not supported\n",
34f80b04 7315 bp->port.link_config);
a2fbb9ea
ET
7316 return;
7317 }
7318 break;
7319
7320 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7321 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7322 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7323 bp->port.advertising = (ADVERTISED_10baseT_Full |
7324 ADVERTISED_TP);
a2fbb9ea
ET
7325 } else {
7326 BNX2X_ERR("NVRAM config error. "
7327 "Invalid link_config 0x%x"
7328 " speed_cap_mask 0x%x\n",
34f80b04 7329 bp->port.link_config,
c18487ee 7330 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7331 return;
7332 }
7333 break;
7334
7335 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7336 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7337 bp->link_params.req_line_speed = SPEED_10;
7338 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7339 bp->port.advertising = (ADVERTISED_10baseT_Half |
7340 ADVERTISED_TP);
a2fbb9ea
ET
7341 } else {
7342 BNX2X_ERR("NVRAM config error. "
7343 "Invalid link_config 0x%x"
7344 " speed_cap_mask 0x%x\n",
34f80b04 7345 bp->port.link_config,
c18487ee 7346 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7347 return;
7348 }
7349 break;
7350
7351 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7352 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7353 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7354 bp->port.advertising = (ADVERTISED_100baseT_Full |
7355 ADVERTISED_TP);
a2fbb9ea
ET
7356 } else {
7357 BNX2X_ERR("NVRAM config error. "
7358 "Invalid link_config 0x%x"
7359 " speed_cap_mask 0x%x\n",
34f80b04 7360 bp->port.link_config,
c18487ee 7361 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7362 return;
7363 }
7364 break;
7365
7366 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7367 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7368 bp->link_params.req_line_speed = SPEED_100;
7369 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7370 bp->port.advertising = (ADVERTISED_100baseT_Half |
7371 ADVERTISED_TP);
a2fbb9ea
ET
7372 } else {
7373 BNX2X_ERR("NVRAM config error. "
7374 "Invalid link_config 0x%x"
7375 " speed_cap_mask 0x%x\n",
34f80b04 7376 bp->port.link_config,
c18487ee 7377 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7378 return;
7379 }
7380 break;
7381
7382 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7383 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7384 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7385 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7386 ADVERTISED_TP);
a2fbb9ea
ET
7387 } else {
7388 BNX2X_ERR("NVRAM config error. "
7389 "Invalid link_config 0x%x"
7390 " speed_cap_mask 0x%x\n",
34f80b04 7391 bp->port.link_config,
c18487ee 7392 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7393 return;
7394 }
7395 break;
7396
7397 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7398 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7399 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7400 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7401 ADVERTISED_TP);
a2fbb9ea
ET
7402 } else {
7403 BNX2X_ERR("NVRAM config error. "
7404 "Invalid link_config 0x%x"
7405 " speed_cap_mask 0x%x\n",
34f80b04 7406 bp->port.link_config,
c18487ee 7407 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7408 return;
7409 }
7410 break;
7411
7412 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7413 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7414 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7415 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7416 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7417 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7418 ADVERTISED_FIBRE);
a2fbb9ea
ET
7419 } else {
7420 BNX2X_ERR("NVRAM config error. "
7421 "Invalid link_config 0x%x"
7422 " speed_cap_mask 0x%x\n",
34f80b04 7423 bp->port.link_config,
c18487ee 7424 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7425 return;
7426 }
7427 break;
7428
7429 default:
7430 BNX2X_ERR("NVRAM config error. "
7431 "BAD link speed link_config 0x%x\n",
34f80b04 7432 bp->port.link_config);
c18487ee 7433 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7434 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7435 break;
7436 }
a2fbb9ea 7437
34f80b04
EG
7438 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7439 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7440 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7441 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7442 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7443
c18487ee 7444 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7445 " advertising 0x%x\n",
c18487ee
YR
7446 bp->link_params.req_line_speed,
7447 bp->link_params.req_duplex,
34f80b04 7448 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7449}
7450
34f80b04 7451static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7452{
34f80b04
EG
7453 int port = BP_PORT(bp);
7454 u32 val, val2;
a2fbb9ea 7455
c18487ee 7456 bp->link_params.bp = bp;
34f80b04 7457 bp->link_params.port = port;
c18487ee 7458
c18487ee 7459 bp->link_params.serdes_config =
f1410647 7460 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7461 bp->link_params.lane_config =
a2fbb9ea 7462 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7463 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7464 SHMEM_RD(bp,
7465 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7466 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7467 SHMEM_RD(bp,
7468 dev_info.port_hw_config[port].speed_capability_mask);
7469
34f80b04 7470 bp->port.link_config =
a2fbb9ea
ET
7471 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7472
34f80b04
EG
7473 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7474 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7475 " link_config 0x%08x\n",
c18487ee
YR
7476 bp->link_params.serdes_config,
7477 bp->link_params.lane_config,
7478 bp->link_params.ext_phy_config,
34f80b04 7479 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7480
34f80b04 7481 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7482 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7483 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7484
7485 bnx2x_link_settings_requested(bp);
7486
7487 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7488 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7489 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7490 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7491 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7492 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7493 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7494 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7495 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7496 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7497}
7498
7499static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7500{
7501 int func = BP_FUNC(bp);
7502 u32 val, val2;
7503 int rc = 0;
a2fbb9ea 7504
34f80b04 7505 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7506
34f80b04
EG
7507 bp->e1hov = 0;
7508 bp->e1hmf = 0;
7509 if (CHIP_IS_E1H(bp)) {
7510 bp->mf_config =
7511 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7512
3196a88a
EG
7513 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7514 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7515 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7516
34f80b04
EG
7517 bp->e1hov = val;
7518 bp->e1hmf = 1;
7519 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7520 "(0x%04x)\n",
7521 func, bp->e1hov, bp->e1hov);
7522 } else {
7523 BNX2X_DEV_INFO("Single function mode\n");
7524 if (BP_E1HVN(bp)) {
7525 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7526 " aborting\n", func);
7527 rc = -EPERM;
7528 }
7529 }
7530 }
a2fbb9ea 7531
34f80b04
EG
7532 if (!BP_NOMCP(bp)) {
7533 bnx2x_get_port_hwinfo(bp);
7534
7535 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7536 DRV_MSG_SEQ_NUMBER_MASK);
7537 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7538 }
7539
7540 if (IS_E1HMF(bp)) {
7541 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7542 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7543 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7544 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7545 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7546 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7547 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7548 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7549 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7550 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7551 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7552 ETH_ALEN);
7553 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7554 ETH_ALEN);
a2fbb9ea 7555 }
34f80b04
EG
7556
7557 return rc;
a2fbb9ea
ET
7558 }
7559
34f80b04
EG
7560 if (BP_NOMCP(bp)) {
7561 /* only supposed to happen on emulation/FPGA */
33471629 7562 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7563 random_ether_addr(bp->dev->dev_addr);
7564 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7565 }
a2fbb9ea 7566
34f80b04
EG
7567 return rc;
7568}
7569
7570static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7571{
7572 int func = BP_FUNC(bp);
7573 int rc;
7574
da5a662a
VZ
7575 /* Disable interrupt handling until HW is initialized */
7576 atomic_set(&bp->intr_sem, 1);
7577
34f80b04 7578 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7579
1cf167f2 7580 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7581 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7582
7583 rc = bnx2x_get_hwinfo(bp);
7584
7585 /* need to reset chip if undi was active */
7586 if (!BP_NOMCP(bp))
7587 bnx2x_undi_unload(bp);
7588
7589 if (CHIP_REV_IS_FPGA(bp))
7590 printk(KERN_ERR PFX "FPGA detected\n");
7591
7592 if (BP_NOMCP(bp) && (func == 0))
7593 printk(KERN_ERR PFX
7594 "MCP disabled, must load devices in order!\n");
7595
7a9b2557
VZ
7596 /* Set TPA flags */
7597 if (disable_tpa) {
7598 bp->flags &= ~TPA_ENABLE_FLAG;
7599 bp->dev->features &= ~NETIF_F_LRO;
7600 } else {
7601 bp->flags |= TPA_ENABLE_FLAG;
7602 bp->dev->features |= NETIF_F_LRO;
7603 }
7604
7605
34f80b04
EG
7606 bp->tx_ring_size = MAX_TX_AVAIL;
7607 bp->rx_ring_size = MAX_RX_AVAIL;
7608
7609 bp->rx_csum = 1;
7610 bp->rx_offset = 0;
7611
7612 bp->tx_ticks = 50;
7613 bp->rx_ticks = 25;
7614
34f80b04
EG
7615 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7616 bp->current_interval = (poll ? poll : bp->timer_interval);
7617
7618 init_timer(&bp->timer);
7619 bp->timer.expires = jiffies + bp->current_interval;
7620 bp->timer.data = (unsigned long) bp;
7621 bp->timer.function = bnx2x_timer;
7622
7623 return rc;
a2fbb9ea
ET
7624}
7625
7626/*
7627 * ethtool service functions
7628 */
7629
7630/* All ethtool functions called with rtnl_lock */
7631
7632static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7633{
7634 struct bnx2x *bp = netdev_priv(dev);
7635
34f80b04
EG
7636 cmd->supported = bp->port.supported;
7637 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7638
7639 if (netif_carrier_ok(dev)) {
c18487ee
YR
7640 cmd->speed = bp->link_vars.line_speed;
7641 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7642 } else {
c18487ee
YR
7643 cmd->speed = bp->link_params.req_line_speed;
7644 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7645 }
34f80b04
EG
7646 if (IS_E1HMF(bp)) {
7647 u16 vn_max_rate;
7648
7649 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7650 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7651 if (vn_max_rate < cmd->speed)
7652 cmd->speed = vn_max_rate;
7653 }
a2fbb9ea 7654
c18487ee
YR
7655 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7656 u32 ext_phy_type =
7657 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7658
7659 switch (ext_phy_type) {
7660 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7661 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7662 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7663 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7665 cmd->port = PORT_FIBRE;
7666 break;
7667
7668 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7669 cmd->port = PORT_TP;
7670 break;
7671
c18487ee
YR
7672 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7673 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7674 bp->link_params.ext_phy_config);
7675 break;
7676
f1410647
ET
7677 default:
7678 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7679 bp->link_params.ext_phy_config);
7680 break;
f1410647
ET
7681 }
7682 } else
a2fbb9ea 7683 cmd->port = PORT_TP;
a2fbb9ea 7684
34f80b04 7685 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7686 cmd->transceiver = XCVR_INTERNAL;
7687
c18487ee 7688 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7689 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7690 else
a2fbb9ea 7691 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7692
7693 cmd->maxtxpkt = 0;
7694 cmd->maxrxpkt = 0;
7695
7696 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7697 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7698 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7699 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7700 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7701 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7702 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7703
7704 return 0;
7705}
7706
7707static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7708{
7709 struct bnx2x *bp = netdev_priv(dev);
7710 u32 advertising;
7711
34f80b04
EG
7712 if (IS_E1HMF(bp))
7713 return 0;
7714
a2fbb9ea
ET
7715 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7716 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7717 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7718 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7719 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7720 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7721 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7722
a2fbb9ea 7723 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7724 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7725 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7726 return -EINVAL;
f1410647 7727 }
a2fbb9ea
ET
7728
7729 /* advertise the requested speed and duplex if supported */
34f80b04 7730 cmd->advertising &= bp->port.supported;
a2fbb9ea 7731
c18487ee
YR
7732 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7733 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7734 bp->port.advertising |= (ADVERTISED_Autoneg |
7735 cmd->advertising);
a2fbb9ea
ET
7736
7737 } else { /* forced speed */
7738 /* advertise the requested speed and duplex if supported */
7739 switch (cmd->speed) {
7740 case SPEED_10:
7741 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7742 if (!(bp->port.supported &
f1410647
ET
7743 SUPPORTED_10baseT_Full)) {
7744 DP(NETIF_MSG_LINK,
7745 "10M full not supported\n");
a2fbb9ea 7746 return -EINVAL;
f1410647 7747 }
a2fbb9ea
ET
7748
7749 advertising = (ADVERTISED_10baseT_Full |
7750 ADVERTISED_TP);
7751 } else {
34f80b04 7752 if (!(bp->port.supported &
f1410647
ET
7753 SUPPORTED_10baseT_Half)) {
7754 DP(NETIF_MSG_LINK,
7755 "10M half not supported\n");
a2fbb9ea 7756 return -EINVAL;
f1410647 7757 }
a2fbb9ea
ET
7758
7759 advertising = (ADVERTISED_10baseT_Half |
7760 ADVERTISED_TP);
7761 }
7762 break;
7763
7764 case SPEED_100:
7765 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7766 if (!(bp->port.supported &
f1410647
ET
7767 SUPPORTED_100baseT_Full)) {
7768 DP(NETIF_MSG_LINK,
7769 "100M full not supported\n");
a2fbb9ea 7770 return -EINVAL;
f1410647 7771 }
a2fbb9ea
ET
7772
7773 advertising = (ADVERTISED_100baseT_Full |
7774 ADVERTISED_TP);
7775 } else {
34f80b04 7776 if (!(bp->port.supported &
f1410647
ET
7777 SUPPORTED_100baseT_Half)) {
7778 DP(NETIF_MSG_LINK,
7779 "100M half not supported\n");
a2fbb9ea 7780 return -EINVAL;
f1410647 7781 }
a2fbb9ea
ET
7782
7783 advertising = (ADVERTISED_100baseT_Half |
7784 ADVERTISED_TP);
7785 }
7786 break;
7787
7788 case SPEED_1000:
f1410647
ET
7789 if (cmd->duplex != DUPLEX_FULL) {
7790 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7791 return -EINVAL;
f1410647 7792 }
a2fbb9ea 7793
34f80b04 7794 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7795 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7796 return -EINVAL;
f1410647 7797 }
a2fbb9ea
ET
7798
7799 advertising = (ADVERTISED_1000baseT_Full |
7800 ADVERTISED_TP);
7801 break;
7802
7803 case SPEED_2500:
f1410647
ET
7804 if (cmd->duplex != DUPLEX_FULL) {
7805 DP(NETIF_MSG_LINK,
7806 "2.5G half not supported\n");
a2fbb9ea 7807 return -EINVAL;
f1410647 7808 }
a2fbb9ea 7809
34f80b04 7810 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7811 DP(NETIF_MSG_LINK,
7812 "2.5G full not supported\n");
a2fbb9ea 7813 return -EINVAL;
f1410647 7814 }
a2fbb9ea 7815
f1410647 7816 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7817 ADVERTISED_TP);
7818 break;
7819
7820 case SPEED_10000:
f1410647
ET
7821 if (cmd->duplex != DUPLEX_FULL) {
7822 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7823 return -EINVAL;
f1410647 7824 }
a2fbb9ea 7825
34f80b04 7826 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7827 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7828 return -EINVAL;
f1410647 7829 }
a2fbb9ea
ET
7830
7831 advertising = (ADVERTISED_10000baseT_Full |
7832 ADVERTISED_FIBRE);
7833 break;
7834
7835 default:
f1410647 7836 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7837 return -EINVAL;
7838 }
7839
c18487ee
YR
7840 bp->link_params.req_line_speed = cmd->speed;
7841 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7842 bp->port.advertising = advertising;
a2fbb9ea
ET
7843 }
7844
c18487ee 7845 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7846 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7847 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7848 bp->port.advertising);
a2fbb9ea 7849
34f80b04 7850 if (netif_running(dev)) {
bb2a0f7a 7851 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7852 bnx2x_link_set(bp);
7853 }
a2fbb9ea
ET
7854
7855 return 0;
7856}
7857
c18487ee
YR
7858#define PHY_FW_VER_LEN 10
7859
a2fbb9ea
ET
7860static void bnx2x_get_drvinfo(struct net_device *dev,
7861 struct ethtool_drvinfo *info)
7862{
7863 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7864 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7865
7866 strcpy(info->driver, DRV_MODULE_NAME);
7867 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7868
7869 phy_fw_ver[0] = '\0';
34f80b04 7870 if (bp->port.pmf) {
4a37fb66 7871 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7872 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7873 (bp->state != BNX2X_STATE_CLOSED),
7874 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7875 bnx2x_release_phy_lock(bp);
34f80b04 7876 }
c18487ee 7877
f0e53a84
EG
7878 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7879 (bp->common.bc_ver & 0xff0000) >> 16,
7880 (bp->common.bc_ver & 0xff00) >> 8,
7881 (bp->common.bc_ver & 0xff),
7882 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7883 strcpy(info->bus_info, pci_name(bp->pdev));
7884 info->n_stats = BNX2X_NUM_STATS;
7885 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7886 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7887 info->regdump_len = 0;
7888}
7889
7890static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7891{
7892 struct bnx2x *bp = netdev_priv(dev);
7893
7894 if (bp->flags & NO_WOL_FLAG) {
7895 wol->supported = 0;
7896 wol->wolopts = 0;
7897 } else {
7898 wol->supported = WAKE_MAGIC;
7899 if (bp->wol)
7900 wol->wolopts = WAKE_MAGIC;
7901 else
7902 wol->wolopts = 0;
7903 }
7904 memset(&wol->sopass, 0, sizeof(wol->sopass));
7905}
7906
7907static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7908{
7909 struct bnx2x *bp = netdev_priv(dev);
7910
7911 if (wol->wolopts & ~WAKE_MAGIC)
7912 return -EINVAL;
7913
7914 if (wol->wolopts & WAKE_MAGIC) {
7915 if (bp->flags & NO_WOL_FLAG)
7916 return -EINVAL;
7917
7918 bp->wol = 1;
34f80b04 7919 } else
a2fbb9ea 7920 bp->wol = 0;
34f80b04 7921
a2fbb9ea
ET
7922 return 0;
7923}
7924
7925static u32 bnx2x_get_msglevel(struct net_device *dev)
7926{
7927 struct bnx2x *bp = netdev_priv(dev);
7928
7929 return bp->msglevel;
7930}
7931
7932static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7933{
7934 struct bnx2x *bp = netdev_priv(dev);
7935
7936 if (capable(CAP_NET_ADMIN))
7937 bp->msglevel = level;
7938}
7939
7940static int bnx2x_nway_reset(struct net_device *dev)
7941{
7942 struct bnx2x *bp = netdev_priv(dev);
7943
34f80b04
EG
7944 if (!bp->port.pmf)
7945 return 0;
a2fbb9ea 7946
34f80b04 7947 if (netif_running(dev)) {
bb2a0f7a 7948 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7949 bnx2x_link_set(bp);
7950 }
a2fbb9ea
ET
7951
7952 return 0;
7953}
7954
7955static int bnx2x_get_eeprom_len(struct net_device *dev)
7956{
7957 struct bnx2x *bp = netdev_priv(dev);
7958
34f80b04 7959 return bp->common.flash_size;
a2fbb9ea
ET
7960}
7961
7962static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7963{
34f80b04 7964 int port = BP_PORT(bp);
a2fbb9ea
ET
7965 int count, i;
7966 u32 val = 0;
7967
7968 /* adjust timeout for emulation/FPGA */
7969 count = NVRAM_TIMEOUT_COUNT;
7970 if (CHIP_REV_IS_SLOW(bp))
7971 count *= 100;
7972
7973 /* request access to nvram interface */
7974 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7975 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7976
7977 for (i = 0; i < count*10; i++) {
7978 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7979 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7980 break;
7981
7982 udelay(5);
7983 }
7984
7985 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7986 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7987 return -EBUSY;
7988 }
7989
7990 return 0;
7991}
7992
7993static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7994{
34f80b04 7995 int port = BP_PORT(bp);
a2fbb9ea
ET
7996 int count, i;
7997 u32 val = 0;
7998
7999 /* adjust timeout for emulation/FPGA */
8000 count = NVRAM_TIMEOUT_COUNT;
8001 if (CHIP_REV_IS_SLOW(bp))
8002 count *= 100;
8003
8004 /* relinquish nvram interface */
8005 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8006 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8007
8008 for (i = 0; i < count*10; i++) {
8009 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8010 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8011 break;
8012
8013 udelay(5);
8014 }
8015
8016 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8017 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8018 return -EBUSY;
8019 }
8020
8021 return 0;
8022}
8023
8024static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8025{
8026 u32 val;
8027
8028 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8029
8030 /* enable both bits, even on read */
8031 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8032 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8033 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8034}
8035
8036static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8037{
8038 u32 val;
8039
8040 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8041
8042 /* disable both bits, even after read */
8043 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8044 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8045 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8046}
8047
8048static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8049 u32 cmd_flags)
8050{
f1410647 8051 int count, i, rc;
a2fbb9ea
ET
8052 u32 val;
8053
8054 /* build the command word */
8055 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8056
8057 /* need to clear DONE bit separately */
8058 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8059
8060 /* address of the NVRAM to read from */
8061 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8062 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8063
8064 /* issue a read command */
8065 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8066
8067 /* adjust timeout for emulation/FPGA */
8068 count = NVRAM_TIMEOUT_COUNT;
8069 if (CHIP_REV_IS_SLOW(bp))
8070 count *= 100;
8071
8072 /* wait for completion */
8073 *ret_val = 0;
8074 rc = -EBUSY;
8075 for (i = 0; i < count; i++) {
8076 udelay(5);
8077 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8078
8079 if (val & MCPR_NVM_COMMAND_DONE) {
8080 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8081 /* we read nvram data in cpu order
8082 * but ethtool sees it as an array of bytes
8083 * converting to big-endian will do the work */
8084 val = cpu_to_be32(val);
8085 *ret_val = val;
8086 rc = 0;
8087 break;
8088 }
8089 }
8090
8091 return rc;
8092}
8093
8094static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8095 int buf_size)
8096{
8097 int rc;
8098 u32 cmd_flags;
8099 u32 val;
8100
8101 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8102 DP(BNX2X_MSG_NVM,
c14423fe 8103 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8104 offset, buf_size);
8105 return -EINVAL;
8106 }
8107
34f80b04
EG
8108 if (offset + buf_size > bp->common.flash_size) {
8109 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8110 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8111 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8112 return -EINVAL;
8113 }
8114
8115 /* request access to nvram interface */
8116 rc = bnx2x_acquire_nvram_lock(bp);
8117 if (rc)
8118 return rc;
8119
8120 /* enable access to nvram interface */
8121 bnx2x_enable_nvram_access(bp);
8122
8123 /* read the first word(s) */
8124 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8125 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8126 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8127 memcpy(ret_buf, &val, 4);
8128
8129 /* advance to the next dword */
8130 offset += sizeof(u32);
8131 ret_buf += sizeof(u32);
8132 buf_size -= sizeof(u32);
8133 cmd_flags = 0;
8134 }
8135
8136 if (rc == 0) {
8137 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8138 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8139 memcpy(ret_buf, &val, 4);
8140 }
8141
8142 /* disable access to nvram interface */
8143 bnx2x_disable_nvram_access(bp);
8144 bnx2x_release_nvram_lock(bp);
8145
8146 return rc;
8147}
8148
8149static int bnx2x_get_eeprom(struct net_device *dev,
8150 struct ethtool_eeprom *eeprom, u8 *eebuf)
8151{
8152 struct bnx2x *bp = netdev_priv(dev);
8153 int rc;
8154
2add3acb
EG
8155 if (!netif_running(dev))
8156 return -EAGAIN;
8157
34f80b04 8158 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8159 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8160 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8161 eeprom->len, eeprom->len);
8162
8163 /* parameters already validated in ethtool_get_eeprom */
8164
8165 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8166
8167 return rc;
8168}
8169
8170static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8171 u32 cmd_flags)
8172{
f1410647 8173 int count, i, rc;
a2fbb9ea
ET
8174
8175 /* build the command word */
8176 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8177
8178 /* need to clear DONE bit separately */
8179 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8180
8181 /* write the data */
8182 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8183
8184 /* address of the NVRAM to write to */
8185 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8186 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8187
8188 /* issue the write command */
8189 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8190
8191 /* adjust timeout for emulation/FPGA */
8192 count = NVRAM_TIMEOUT_COUNT;
8193 if (CHIP_REV_IS_SLOW(bp))
8194 count *= 100;
8195
8196 /* wait for completion */
8197 rc = -EBUSY;
8198 for (i = 0; i < count; i++) {
8199 udelay(5);
8200 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8201 if (val & MCPR_NVM_COMMAND_DONE) {
8202 rc = 0;
8203 break;
8204 }
8205 }
8206
8207 return rc;
8208}
8209
f1410647 8210#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8211
8212static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8213 int buf_size)
8214{
8215 int rc;
8216 u32 cmd_flags;
8217 u32 align_offset;
8218 u32 val;
8219
34f80b04
EG
8220 if (offset + buf_size > bp->common.flash_size) {
8221 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8222 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8223 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8224 return -EINVAL;
8225 }
8226
8227 /* request access to nvram interface */
8228 rc = bnx2x_acquire_nvram_lock(bp);
8229 if (rc)
8230 return rc;
8231
8232 /* enable access to nvram interface */
8233 bnx2x_enable_nvram_access(bp);
8234
8235 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8236 align_offset = (offset & ~0x03);
8237 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8238
8239 if (rc == 0) {
8240 val &= ~(0xff << BYTE_OFFSET(offset));
8241 val |= (*data_buf << BYTE_OFFSET(offset));
8242
8243 /* nvram data is returned as an array of bytes
8244 * convert it back to cpu order */
8245 val = be32_to_cpu(val);
8246
a2fbb9ea
ET
8247 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8248 cmd_flags);
8249 }
8250
8251 /* disable access to nvram interface */
8252 bnx2x_disable_nvram_access(bp);
8253 bnx2x_release_nvram_lock(bp);
8254
8255 return rc;
8256}
8257
8258static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8259 int buf_size)
8260{
8261 int rc;
8262 u32 cmd_flags;
8263 u32 val;
8264 u32 written_so_far;
8265
34f80b04 8266 if (buf_size == 1) /* ethtool */
a2fbb9ea 8267 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8268
8269 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8270 DP(BNX2X_MSG_NVM,
c14423fe 8271 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8272 offset, buf_size);
8273 return -EINVAL;
8274 }
8275
34f80b04
EG
8276 if (offset + buf_size > bp->common.flash_size) {
8277 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8278 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8279 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8280 return -EINVAL;
8281 }
8282
8283 /* request access to nvram interface */
8284 rc = bnx2x_acquire_nvram_lock(bp);
8285 if (rc)
8286 return rc;
8287
8288 /* enable access to nvram interface */
8289 bnx2x_enable_nvram_access(bp);
8290
8291 written_so_far = 0;
8292 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8293 while ((written_so_far < buf_size) && (rc == 0)) {
8294 if (written_so_far == (buf_size - sizeof(u32)))
8295 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8296 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8297 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8298 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8299 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8300
8301 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8302
8303 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8304
8305 /* advance to the next dword */
8306 offset += sizeof(u32);
8307 data_buf += sizeof(u32);
8308 written_so_far += sizeof(u32);
8309 cmd_flags = 0;
8310 }
8311
8312 /* disable access to nvram interface */
8313 bnx2x_disable_nvram_access(bp);
8314 bnx2x_release_nvram_lock(bp);
8315
8316 return rc;
8317}
8318
8319static int bnx2x_set_eeprom(struct net_device *dev,
8320 struct ethtool_eeprom *eeprom, u8 *eebuf)
8321{
8322 struct bnx2x *bp = netdev_priv(dev);
8323 int rc;
8324
9f4c9583
EG
8325 if (!netif_running(dev))
8326 return -EAGAIN;
8327
34f80b04 8328 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8329 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8330 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8331 eeprom->len, eeprom->len);
8332
8333 /* parameters already validated in ethtool_set_eeprom */
8334
c18487ee 8335 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8336 if (eeprom->magic == 0x00504859)
8337 if (bp->port.pmf) {
8338
4a37fb66 8339 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8340 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8341 bp->link_params.ext_phy_config,
8342 (bp->state != BNX2X_STATE_CLOSED),
8343 eebuf, eeprom->len);
bb2a0f7a
YG
8344 if ((bp->state == BNX2X_STATE_OPEN) ||
8345 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8346 rc |= bnx2x_link_reset(&bp->link_params,
8347 &bp->link_vars);
8348 rc |= bnx2x_phy_init(&bp->link_params,
8349 &bp->link_vars);
bb2a0f7a 8350 }
4a37fb66 8351 bnx2x_release_phy_lock(bp);
34f80b04
EG
8352
8353 } else /* Only the PMF can access the PHY */
8354 return -EINVAL;
8355 else
c18487ee 8356 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8357
8358 return rc;
8359}
8360
8361static int bnx2x_get_coalesce(struct net_device *dev,
8362 struct ethtool_coalesce *coal)
8363{
8364 struct bnx2x *bp = netdev_priv(dev);
8365
8366 memset(coal, 0, sizeof(struct ethtool_coalesce));
8367
8368 coal->rx_coalesce_usecs = bp->rx_ticks;
8369 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8370
8371 return 0;
8372}
8373
8374static int bnx2x_set_coalesce(struct net_device *dev,
8375 struct ethtool_coalesce *coal)
8376{
8377 struct bnx2x *bp = netdev_priv(dev);
8378
8379 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8380 if (bp->rx_ticks > 3000)
8381 bp->rx_ticks = 3000;
8382
8383 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8384 if (bp->tx_ticks > 0x3000)
8385 bp->tx_ticks = 0x3000;
8386
34f80b04 8387 if (netif_running(dev))
a2fbb9ea
ET
8388 bnx2x_update_coalesce(bp);
8389
8390 return 0;
8391}
8392
8393static void bnx2x_get_ringparam(struct net_device *dev,
8394 struct ethtool_ringparam *ering)
8395{
8396 struct bnx2x *bp = netdev_priv(dev);
8397
8398 ering->rx_max_pending = MAX_RX_AVAIL;
8399 ering->rx_mini_max_pending = 0;
8400 ering->rx_jumbo_max_pending = 0;
8401
8402 ering->rx_pending = bp->rx_ring_size;
8403 ering->rx_mini_pending = 0;
8404 ering->rx_jumbo_pending = 0;
8405
8406 ering->tx_max_pending = MAX_TX_AVAIL;
8407 ering->tx_pending = bp->tx_ring_size;
8408}
8409
8410static int bnx2x_set_ringparam(struct net_device *dev,
8411 struct ethtool_ringparam *ering)
8412{
8413 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8414 int rc = 0;
a2fbb9ea
ET
8415
8416 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8417 (ering->tx_pending > MAX_TX_AVAIL) ||
8418 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8419 return -EINVAL;
8420
8421 bp->rx_ring_size = ering->rx_pending;
8422 bp->tx_ring_size = ering->tx_pending;
8423
34f80b04
EG
8424 if (netif_running(dev)) {
8425 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8426 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8427 }
8428
34f80b04 8429 return rc;
a2fbb9ea
ET
8430}
8431
8432static void bnx2x_get_pauseparam(struct net_device *dev,
8433 struct ethtool_pauseparam *epause)
8434{
8435 struct bnx2x *bp = netdev_priv(dev);
8436
c0700f90 8437 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8438 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8439
c0700f90
DM
8440 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8441 BNX2X_FLOW_CTRL_RX);
8442 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8443 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8444
8445 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8446 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8447 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8448}
8449
8450static int bnx2x_set_pauseparam(struct net_device *dev,
8451 struct ethtool_pauseparam *epause)
8452{
8453 struct bnx2x *bp = netdev_priv(dev);
8454
34f80b04
EG
8455 if (IS_E1HMF(bp))
8456 return 0;
8457
a2fbb9ea
ET
8458 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8459 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8460 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8461
c0700f90 8462 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8463
f1410647 8464 if (epause->rx_pause)
c0700f90 8465 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8466
f1410647 8467 if (epause->tx_pause)
c0700f90 8468 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8469
c0700f90
DM
8470 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8471 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8472
c18487ee 8473 if (epause->autoneg) {
34f80b04 8474 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8475 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8476 return -EINVAL;
8477 }
a2fbb9ea 8478
c18487ee 8479 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8480 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8481 }
a2fbb9ea 8482
c18487ee
YR
8483 DP(NETIF_MSG_LINK,
8484 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8485
8486 if (netif_running(dev)) {
bb2a0f7a 8487 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8488 bnx2x_link_set(bp);
8489 }
a2fbb9ea
ET
8490
8491 return 0;
8492}
8493
df0f2343
VZ
8494static int bnx2x_set_flags(struct net_device *dev, u32 data)
8495{
8496 struct bnx2x *bp = netdev_priv(dev);
8497 int changed = 0;
8498 int rc = 0;
8499
8500 /* TPA requires Rx CSUM offloading */
8501 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8502 if (!(dev->features & NETIF_F_LRO)) {
8503 dev->features |= NETIF_F_LRO;
8504 bp->flags |= TPA_ENABLE_FLAG;
8505 changed = 1;
8506 }
8507
8508 } else if (dev->features & NETIF_F_LRO) {
8509 dev->features &= ~NETIF_F_LRO;
8510 bp->flags &= ~TPA_ENABLE_FLAG;
8511 changed = 1;
8512 }
8513
8514 if (changed && netif_running(dev)) {
8515 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8516 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8517 }
8518
8519 return rc;
8520}
8521
a2fbb9ea
ET
8522static u32 bnx2x_get_rx_csum(struct net_device *dev)
8523{
8524 struct bnx2x *bp = netdev_priv(dev);
8525
8526 return bp->rx_csum;
8527}
8528
8529static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8530{
8531 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8532 int rc = 0;
a2fbb9ea
ET
8533
8534 bp->rx_csum = data;
df0f2343
VZ
8535
8536 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8537 TPA'ed packets will be discarded due to wrong TCP CSUM */
8538 if (!data) {
8539 u32 flags = ethtool_op_get_flags(dev);
8540
8541 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8542 }
8543
8544 return rc;
a2fbb9ea
ET
8545}
8546
8547static int bnx2x_set_tso(struct net_device *dev, u32 data)
8548{
755735eb 8549 if (data) {
a2fbb9ea 8550 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8551 dev->features |= NETIF_F_TSO6;
8552 } else {
a2fbb9ea 8553 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8554 dev->features &= ~NETIF_F_TSO6;
8555 }
8556
a2fbb9ea
ET
8557 return 0;
8558}
8559
f3c87cdd 8560static const struct {
a2fbb9ea
ET
8561 char string[ETH_GSTRING_LEN];
8562} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8563 { "register_test (offline)" },
8564 { "memory_test (offline)" },
8565 { "loopback_test (offline)" },
8566 { "nvram_test (online)" },
8567 { "interrupt_test (online)" },
8568 { "link_test (online)" },
8569 { "idle check (online)" },
8570 { "MC errors (online)" }
a2fbb9ea
ET
8571};
8572
8573static int bnx2x_self_test_count(struct net_device *dev)
8574{
8575 return BNX2X_NUM_TESTS;
8576}
8577
f3c87cdd
YG
8578static int bnx2x_test_registers(struct bnx2x *bp)
8579{
8580 int idx, i, rc = -ENODEV;
8581 u32 wr_val = 0;
9dabc424 8582 int port = BP_PORT(bp);
f3c87cdd
YG
8583 static const struct {
8584 u32 offset0;
8585 u32 offset1;
8586 u32 mask;
8587 } reg_tbl[] = {
8588/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8589 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8590 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8591 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8592 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8593 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8594 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8595 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8596 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8597 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8598/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8599 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8600 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8601 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8602 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8603 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8604 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8605 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8606 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8607 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8608/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8609 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8610 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8611 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8612 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8613 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8614 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8615 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8616 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8617 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8618/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8619 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8620 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8621 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8622 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8623 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8624 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8625 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8626
8627 { 0xffffffff, 0, 0x00000000 }
8628 };
8629
8630 if (!netif_running(bp->dev))
8631 return rc;
8632
8633 /* Repeat the test twice:
8634 First by writing 0x00000000, second by writing 0xffffffff */
8635 for (idx = 0; idx < 2; idx++) {
8636
8637 switch (idx) {
8638 case 0:
8639 wr_val = 0;
8640 break;
8641 case 1:
8642 wr_val = 0xffffffff;
8643 break;
8644 }
8645
8646 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8647 u32 offset, mask, save_val, val;
f3c87cdd
YG
8648
8649 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8650 mask = reg_tbl[i].mask;
8651
8652 save_val = REG_RD(bp, offset);
8653
8654 REG_WR(bp, offset, wr_val);
8655 val = REG_RD(bp, offset);
8656
8657 /* Restore the original register's value */
8658 REG_WR(bp, offset, save_val);
8659
8660 /* verify that value is as expected value */
8661 if ((val & mask) != (wr_val & mask))
8662 goto test_reg_exit;
8663 }
8664 }
8665
8666 rc = 0;
8667
8668test_reg_exit:
8669 return rc;
8670}
8671
8672static int bnx2x_test_memory(struct bnx2x *bp)
8673{
8674 int i, j, rc = -ENODEV;
8675 u32 val;
8676 static const struct {
8677 u32 offset;
8678 int size;
8679 } mem_tbl[] = {
8680 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8681 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8682 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8683 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8684 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8685 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8686 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8687
8688 { 0xffffffff, 0 }
8689 };
8690 static const struct {
8691 char *name;
8692 u32 offset;
9dabc424
YG
8693 u32 e1_mask;
8694 u32 e1h_mask;
f3c87cdd 8695 } prty_tbl[] = {
9dabc424
YG
8696 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8697 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8698 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8699 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8700 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8701 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8702
8703 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8704 };
8705
8706 if (!netif_running(bp->dev))
8707 return rc;
8708
8709 /* Go through all the memories */
8710 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8711 for (j = 0; j < mem_tbl[i].size; j++)
8712 REG_RD(bp, mem_tbl[i].offset + j*4);
8713
8714 /* Check the parity status */
8715 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8716 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8717 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8718 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8719 DP(NETIF_MSG_HW,
8720 "%s is 0x%x\n", prty_tbl[i].name, val);
8721 goto test_mem_exit;
8722 }
8723 }
8724
8725 rc = 0;
8726
8727test_mem_exit:
8728 return rc;
8729}
8730
f3c87cdd
YG
8731static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8732{
8733 int cnt = 1000;
8734
8735 if (link_up)
8736 while (bnx2x_link_test(bp) && cnt--)
8737 msleep(10);
8738}
8739
8740static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8741{
8742 unsigned int pkt_size, num_pkts, i;
8743 struct sk_buff *skb;
8744 unsigned char *packet;
8745 struct bnx2x_fastpath *fp = &bp->fp[0];
8746 u16 tx_start_idx, tx_idx;
8747 u16 rx_start_idx, rx_idx;
8748 u16 pkt_prod;
8749 struct sw_tx_bd *tx_buf;
8750 struct eth_tx_bd *tx_bd;
8751 dma_addr_t mapping;
8752 union eth_rx_cqe *cqe;
8753 u8 cqe_fp_flags;
8754 struct sw_rx_bd *rx_buf;
8755 u16 len;
8756 int rc = -ENODEV;
8757
8758 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8759 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 8760 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
8761
8762 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 8763 u16 cnt = 1000;
f3c87cdd 8764 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 8765 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 8766 /* wait until link state is restored */
3910c8ae
EG
8767 if (link_up)
8768 while (cnt-- && bnx2x_test_link(&bp->link_params,
8769 &bp->link_vars))
8770 msleep(10);
f3c87cdd
YG
8771 } else
8772 return -EINVAL;
8773
8774 pkt_size = 1514;
8775 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8776 if (!skb) {
8777 rc = -ENOMEM;
8778 goto test_loopback_exit;
8779 }
8780 packet = skb_put(skb, pkt_size);
8781 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8782 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8783 for (i = ETH_HLEN; i < pkt_size; i++)
8784 packet[i] = (unsigned char) (i & 0xff);
8785
8786 num_pkts = 0;
8787 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8788 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8789
8790 pkt_prod = fp->tx_pkt_prod++;
8791 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8792 tx_buf->first_bd = fp->tx_bd_prod;
8793 tx_buf->skb = skb;
8794
8795 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8796 mapping = pci_map_single(bp->pdev, skb->data,
8797 skb_headlen(skb), PCI_DMA_TODEVICE);
8798 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8799 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8800 tx_bd->nbd = cpu_to_le16(1);
8801 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8802 tx_bd->vlan = cpu_to_le16(pkt_prod);
8803 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8804 ETH_TX_BD_FLAGS_END_BD);
8805 tx_bd->general_data = ((UNICAST_ADDRESS <<
8806 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8807
58f4c4cf
EG
8808 wmb();
8809
f3c87cdd
YG
8810 fp->hw_tx_prods->bds_prod =
8811 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8812 mb(); /* FW restriction: must not reorder writing nbd and packets */
8813 fp->hw_tx_prods->packets_prod =
8814 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8815 DOORBELL(bp, FP_IDX(fp), 0);
8816
8817 mmiowb();
8818
8819 num_pkts++;
8820 fp->tx_bd_prod++;
8821 bp->dev->trans_start = jiffies;
8822
8823 udelay(100);
8824
8825 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8826 if (tx_idx != tx_start_idx + num_pkts)
8827 goto test_loopback_exit;
8828
8829 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8830 if (rx_idx != rx_start_idx + num_pkts)
8831 goto test_loopback_exit;
8832
8833 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8834 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8835 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8836 goto test_loopback_rx_exit;
8837
8838 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8839 if (len != pkt_size)
8840 goto test_loopback_rx_exit;
8841
8842 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8843 skb = rx_buf->skb;
8844 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8845 for (i = ETH_HLEN; i < pkt_size; i++)
8846 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8847 goto test_loopback_rx_exit;
8848
8849 rc = 0;
8850
8851test_loopback_rx_exit:
f3c87cdd
YG
8852
8853 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8854 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8855 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8856 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8857
8858 /* Update producers */
8859 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8860 fp->rx_sge_prod);
f3c87cdd
YG
8861
8862test_loopback_exit:
8863 bp->link_params.loopback_mode = LOOPBACK_NONE;
8864
8865 return rc;
8866}
8867
8868static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8869{
8870 int rc = 0;
8871
8872 if (!netif_running(bp->dev))
8873 return BNX2X_LOOPBACK_FAILED;
8874
f8ef6e44 8875 bnx2x_netif_stop(bp, 1);
3910c8ae 8876 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
8877
8878 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8879 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8880 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8881 }
8882
8883 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8884 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8885 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8886 }
8887
3910c8ae 8888 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8889 bnx2x_netif_start(bp);
8890
8891 return rc;
8892}
8893
8894#define CRC32_RESIDUAL 0xdebb20e3
8895
8896static int bnx2x_test_nvram(struct bnx2x *bp)
8897{
8898 static const struct {
8899 int offset;
8900 int size;
8901 } nvram_tbl[] = {
8902 { 0, 0x14 }, /* bootstrap */
8903 { 0x14, 0xec }, /* dir */
8904 { 0x100, 0x350 }, /* manuf_info */
8905 { 0x450, 0xf0 }, /* feature_info */
8906 { 0x640, 0x64 }, /* upgrade_key_info */
8907 { 0x6a4, 0x64 },
8908 { 0x708, 0x70 }, /* manuf_key_info */
8909 { 0x778, 0x70 },
8910 { 0, 0 }
8911 };
8912 u32 buf[0x350 / 4];
8913 u8 *data = (u8 *)buf;
8914 int i, rc;
8915 u32 magic, csum;
8916
8917 rc = bnx2x_nvram_read(bp, 0, data, 4);
8918 if (rc) {
8919 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8920 goto test_nvram_exit;
8921 }
8922
8923 magic = be32_to_cpu(buf[0]);
8924 if (magic != 0x669955aa) {
8925 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8926 rc = -ENODEV;
8927 goto test_nvram_exit;
8928 }
8929
8930 for (i = 0; nvram_tbl[i].size; i++) {
8931
8932 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8933 nvram_tbl[i].size);
8934 if (rc) {
8935 DP(NETIF_MSG_PROBE,
8936 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8937 goto test_nvram_exit;
8938 }
8939
8940 csum = ether_crc_le(nvram_tbl[i].size, data);
8941 if (csum != CRC32_RESIDUAL) {
8942 DP(NETIF_MSG_PROBE,
8943 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8944 rc = -ENODEV;
8945 goto test_nvram_exit;
8946 }
8947 }
8948
8949test_nvram_exit:
8950 return rc;
8951}
8952
8953static int bnx2x_test_intr(struct bnx2x *bp)
8954{
8955 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8956 int i, rc;
8957
8958 if (!netif_running(bp->dev))
8959 return -ENODEV;
8960
8961 config->hdr.length_6b = 0;
af246401
EG
8962 if (CHIP_IS_E1(bp))
8963 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8964 else
8965 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
8966 config->hdr.client_id = BP_CL_ID(bp);
8967 config->hdr.reserved1 = 0;
8968
8969 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8970 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8971 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8972 if (rc == 0) {
8973 bp->set_mac_pending++;
8974 for (i = 0; i < 10; i++) {
8975 if (!bp->set_mac_pending)
8976 break;
8977 msleep_interruptible(10);
8978 }
8979 if (i == 10)
8980 rc = -ENODEV;
8981 }
8982
8983 return rc;
8984}
8985
a2fbb9ea
ET
8986static void bnx2x_self_test(struct net_device *dev,
8987 struct ethtool_test *etest, u64 *buf)
8988{
8989 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8990
8991 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8992
f3c87cdd 8993 if (!netif_running(dev))
a2fbb9ea 8994 return;
a2fbb9ea 8995
33471629 8996 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8997 if (IS_E1HMF(bp))
8998 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8999
9000 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9001 u8 link_up;
9002
9003 link_up = bp->link_vars.link_up;
9004 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9005 bnx2x_nic_load(bp, LOAD_DIAG);
9006 /* wait until link state is restored */
9007 bnx2x_wait_for_link(bp, link_up);
9008
9009 if (bnx2x_test_registers(bp) != 0) {
9010 buf[0] = 1;
9011 etest->flags |= ETH_TEST_FL_FAILED;
9012 }
9013 if (bnx2x_test_memory(bp) != 0) {
9014 buf[1] = 1;
9015 etest->flags |= ETH_TEST_FL_FAILED;
9016 }
9017 buf[2] = bnx2x_test_loopback(bp, link_up);
9018 if (buf[2] != 0)
9019 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9020
f3c87cdd
YG
9021 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9022 bnx2x_nic_load(bp, LOAD_NORMAL);
9023 /* wait until link state is restored */
9024 bnx2x_wait_for_link(bp, link_up);
9025 }
9026 if (bnx2x_test_nvram(bp) != 0) {
9027 buf[3] = 1;
a2fbb9ea
ET
9028 etest->flags |= ETH_TEST_FL_FAILED;
9029 }
f3c87cdd
YG
9030 if (bnx2x_test_intr(bp) != 0) {
9031 buf[4] = 1;
9032 etest->flags |= ETH_TEST_FL_FAILED;
9033 }
9034 if (bp->port.pmf)
9035 if (bnx2x_link_test(bp) != 0) {
9036 buf[5] = 1;
9037 etest->flags |= ETH_TEST_FL_FAILED;
9038 }
9039 buf[7] = bnx2x_mc_assert(bp);
9040 if (buf[7] != 0)
9041 etest->flags |= ETH_TEST_FL_FAILED;
9042
9043#ifdef BNX2X_EXTRA_DEBUG
9044 bnx2x_panic_dump(bp);
9045#endif
a2fbb9ea
ET
9046}
9047
bb2a0f7a
YG
9048static const struct {
9049 long offset;
9050 int size;
9051 u32 flags;
66e855f3
YG
9052#define STATS_FLAGS_PORT 1
9053#define STATS_FLAGS_FUNC 2
9054 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9055} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
9056/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9057 8, STATS_FLAGS_FUNC, "rx_bytes" },
9058 { STATS_OFFSET32(error_bytes_received_hi),
9059 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9060 { STATS_OFFSET32(total_bytes_transmitted_hi),
9061 8, STATS_FLAGS_FUNC, "tx_bytes" },
9062 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9063 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 9064 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 9065 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 9066 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 9067 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 9068 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 9069 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9070 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9071 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9072 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9073 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9074/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9075 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9076 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9077 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9078 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9079 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9080 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9081 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9082 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9083 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9084 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9085 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9086 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9087 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9088 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9089 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9090 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9091 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9092 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9093 8, STATS_FLAGS_PORT, "rx_fragments" },
9094/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9095 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9096 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9097 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9098 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9099 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9100 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9101 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9102 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9103 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9104 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9105 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9106 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9107 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9108 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9109 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9110 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9111 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9112 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9113 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9114/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9115 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9116 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9117 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9118 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9119 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9120 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9121 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9122 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9123 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9124 { STATS_OFFSET32(mac_filter_discard),
9125 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9126 { STATS_OFFSET32(no_buff_discard),
9127 4, STATS_FLAGS_FUNC, "rx_discards" },
9128 { STATS_OFFSET32(xxoverflow_discard),
9129 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9130 { STATS_OFFSET32(brb_drop_hi),
9131 8, STATS_FLAGS_PORT, "brb_discard" },
9132 { STATS_OFFSET32(brb_truncate_hi),
9133 8, STATS_FLAGS_PORT, "brb_truncate" },
9134/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9135 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9136 { STATS_OFFSET32(rx_skb_alloc_failed),
9137 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9138/* 42 */{ STATS_OFFSET32(hw_csum_err),
9139 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9140};
9141
66e855f3
YG
9142#define IS_NOT_E1HMF_STAT(bp, i) \
9143 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9144
a2fbb9ea
ET
9145static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9146{
bb2a0f7a
YG
9147 struct bnx2x *bp = netdev_priv(dev);
9148 int i, j;
9149
a2fbb9ea
ET
9150 switch (stringset) {
9151 case ETH_SS_STATS:
bb2a0f7a 9152 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9153 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9154 continue;
9155 strcpy(buf + j*ETH_GSTRING_LEN,
9156 bnx2x_stats_arr[i].string);
9157 j++;
9158 }
a2fbb9ea
ET
9159 break;
9160
9161 case ETH_SS_TEST:
9162 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9163 break;
9164 }
9165}
9166
9167static int bnx2x_get_stats_count(struct net_device *dev)
9168{
bb2a0f7a
YG
9169 struct bnx2x *bp = netdev_priv(dev);
9170 int i, num_stats = 0;
9171
9172 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9173 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9174 continue;
9175 num_stats++;
9176 }
9177 return num_stats;
a2fbb9ea
ET
9178}
9179
9180static void bnx2x_get_ethtool_stats(struct net_device *dev,
9181 struct ethtool_stats *stats, u64 *buf)
9182{
9183 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9184 u32 *hw_stats = (u32 *)&bp->eth_stats;
9185 int i, j;
a2fbb9ea 9186
bb2a0f7a 9187 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9188 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9189 continue;
bb2a0f7a
YG
9190
9191 if (bnx2x_stats_arr[i].size == 0) {
9192 /* skip this counter */
9193 buf[j] = 0;
9194 j++;
a2fbb9ea
ET
9195 continue;
9196 }
bb2a0f7a 9197 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9198 /* 4-byte counter */
bb2a0f7a
YG
9199 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9200 j++;
a2fbb9ea
ET
9201 continue;
9202 }
9203 /* 8-byte counter */
bb2a0f7a
YG
9204 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9205 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9206 j++;
a2fbb9ea
ET
9207 }
9208}
9209
9210static int bnx2x_phys_id(struct net_device *dev, u32 data)
9211{
9212 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9213 int port = BP_PORT(bp);
a2fbb9ea
ET
9214 int i;
9215
34f80b04
EG
9216 if (!netif_running(dev))
9217 return 0;
9218
9219 if (!bp->port.pmf)
9220 return 0;
9221
a2fbb9ea
ET
9222 if (data == 0)
9223 data = 2;
9224
9225 for (i = 0; i < (data * 2); i++) {
c18487ee 9226 if ((i % 2) == 0)
34f80b04 9227 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9228 bp->link_params.hw_led_mode,
9229 bp->link_params.chip_id);
9230 else
34f80b04 9231 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9232 bp->link_params.hw_led_mode,
9233 bp->link_params.chip_id);
9234
a2fbb9ea
ET
9235 msleep_interruptible(500);
9236 if (signal_pending(current))
9237 break;
9238 }
9239
c18487ee 9240 if (bp->link_vars.link_up)
34f80b04 9241 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9242 bp->link_vars.line_speed,
9243 bp->link_params.hw_led_mode,
9244 bp->link_params.chip_id);
a2fbb9ea
ET
9245
9246 return 0;
9247}
9248
9249static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9250 .get_settings = bnx2x_get_settings,
9251 .set_settings = bnx2x_set_settings,
9252 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9253 .get_wol = bnx2x_get_wol,
9254 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9255 .get_msglevel = bnx2x_get_msglevel,
9256 .set_msglevel = bnx2x_set_msglevel,
9257 .nway_reset = bnx2x_nway_reset,
9258 .get_link = ethtool_op_get_link,
9259 .get_eeprom_len = bnx2x_get_eeprom_len,
9260 .get_eeprom = bnx2x_get_eeprom,
9261 .set_eeprom = bnx2x_set_eeprom,
9262 .get_coalesce = bnx2x_get_coalesce,
9263 .set_coalesce = bnx2x_set_coalesce,
9264 .get_ringparam = bnx2x_get_ringparam,
9265 .set_ringparam = bnx2x_set_ringparam,
9266 .get_pauseparam = bnx2x_get_pauseparam,
9267 .set_pauseparam = bnx2x_set_pauseparam,
9268 .get_rx_csum = bnx2x_get_rx_csum,
9269 .set_rx_csum = bnx2x_set_rx_csum,
9270 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9271 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9272 .set_flags = bnx2x_set_flags,
9273 .get_flags = ethtool_op_get_flags,
9274 .get_sg = ethtool_op_get_sg,
9275 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9276 .get_tso = ethtool_op_get_tso,
9277 .set_tso = bnx2x_set_tso,
9278 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9279 .self_test = bnx2x_self_test,
9280 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9281 .phys_id = bnx2x_phys_id,
9282 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9283 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9284};
9285
9286/* end of ethtool_ops */
9287
9288/****************************************************************************
9289* General service functions
9290****************************************************************************/
9291
9292static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9293{
9294 u16 pmcsr;
9295
9296 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9297
9298 switch (state) {
9299 case PCI_D0:
34f80b04 9300 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9301 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9302 PCI_PM_CTRL_PME_STATUS));
9303
9304 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9305 /* delay required during transition out of D3hot */
a2fbb9ea 9306 msleep(20);
34f80b04 9307 break;
a2fbb9ea 9308
34f80b04
EG
9309 case PCI_D3hot:
9310 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9311 pmcsr |= 3;
a2fbb9ea 9312
34f80b04
EG
9313 if (bp->wol)
9314 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9315
34f80b04
EG
9316 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9317 pmcsr);
a2fbb9ea 9318
34f80b04
EG
9319 /* No more memory access after this point until
9320 * device is brought back to D0.
9321 */
9322 break;
9323
9324 default:
9325 return -EINVAL;
9326 }
9327 return 0;
a2fbb9ea
ET
9328}
9329
237907c1
EG
9330static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9331{
9332 u16 rx_cons_sb;
9333
9334 /* Tell compiler that status block fields can change */
9335 barrier();
9336 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9337 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9338 rx_cons_sb++;
9339 return (fp->rx_comp_cons != rx_cons_sb);
9340}
9341
34f80b04
EG
9342/*
9343 * net_device service functions
9344 */
9345
a2fbb9ea
ET
9346static int bnx2x_poll(struct napi_struct *napi, int budget)
9347{
9348 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9349 napi);
9350 struct bnx2x *bp = fp->bp;
9351 int work_done = 0;
9352
9353#ifdef BNX2X_STOP_ON_ERROR
9354 if (unlikely(bp->panic))
34f80b04 9355 goto poll_panic;
a2fbb9ea
ET
9356#endif
9357
9358 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9359 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9360 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9361
9362 bnx2x_update_fpsb_idx(fp);
9363
237907c1 9364 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9365 bnx2x_tx_int(fp, budget);
9366
237907c1 9367 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9368 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9369 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9370
9371 /* must not complete if we consumed full budget */
da5a662a 9372 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9373
9374#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9375poll_panic:
a2fbb9ea 9376#endif
908a7a16 9377 netif_rx_complete(napi);
a2fbb9ea 9378
34f80b04 9379 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9380 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9381 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9382 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9383 }
a2fbb9ea
ET
9384 return work_done;
9385}
9386
755735eb
EG
9387
9388/* we split the first BD into headers and data BDs
33471629 9389 * to ease the pain of our fellow microcode engineers
755735eb
EG
9390 * we use one mapping for both BDs
9391 * So far this has only been observed to happen
9392 * in Other Operating Systems(TM)
9393 */
9394static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9395 struct bnx2x_fastpath *fp,
9396 struct eth_tx_bd **tx_bd, u16 hlen,
9397 u16 bd_prod, int nbd)
9398{
9399 struct eth_tx_bd *h_tx_bd = *tx_bd;
9400 struct eth_tx_bd *d_tx_bd;
9401 dma_addr_t mapping;
9402 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9403
9404 /* first fix first BD */
9405 h_tx_bd->nbd = cpu_to_le16(nbd);
9406 h_tx_bd->nbytes = cpu_to_le16(hlen);
9407
9408 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9409 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9410 h_tx_bd->addr_lo, h_tx_bd->nbd);
9411
9412 /* now get a new data BD
9413 * (after the pbd) and fill it */
9414 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9415 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9416
9417 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9418 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9419
9420 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9421 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9422 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9423 d_tx_bd->vlan = 0;
9424 /* this marks the BD as one that has no individual mapping
9425 * the FW ignores this flag in a BD not marked start
9426 */
9427 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9428 DP(NETIF_MSG_TX_QUEUED,
9429 "TSO split data size is %d (%x:%x)\n",
9430 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9431
9432 /* update tx_bd for marking the last BD flag */
9433 *tx_bd = d_tx_bd;
9434
9435 return bd_prod;
9436}
9437
9438static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9439{
9440 if (fix > 0)
9441 csum = (u16) ~csum_fold(csum_sub(csum,
9442 csum_partial(t_header - fix, fix, 0)));
9443
9444 else if (fix < 0)
9445 csum = (u16) ~csum_fold(csum_add(csum,
9446 csum_partial(t_header, -fix, 0)));
9447
9448 return swab16(csum);
9449}
9450
9451static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9452{
9453 u32 rc;
9454
9455 if (skb->ip_summed != CHECKSUM_PARTIAL)
9456 rc = XMIT_PLAIN;
9457
9458 else {
9459 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9460 rc = XMIT_CSUM_V6;
9461 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9462 rc |= XMIT_CSUM_TCP;
9463
9464 } else {
9465 rc = XMIT_CSUM_V4;
9466 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9467 rc |= XMIT_CSUM_TCP;
9468 }
9469 }
9470
9471 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9472 rc |= XMIT_GSO_V4;
9473
9474 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9475 rc |= XMIT_GSO_V6;
9476
9477 return rc;
9478}
9479
632da4d6 9480#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
9481/* check if packet requires linearization (packet is too fragmented) */
9482static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9483 u32 xmit_type)
9484{
9485 int to_copy = 0;
9486 int hlen = 0;
9487 int first_bd_sz = 0;
9488
9489 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9490 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9491
9492 if (xmit_type & XMIT_GSO) {
9493 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9494 /* Check if LSO packet needs to be copied:
9495 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9496 int wnd_size = MAX_FETCH_BD - 3;
33471629 9497 /* Number of windows to check */
755735eb
EG
9498 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9499 int wnd_idx = 0;
9500 int frag_idx = 0;
9501 u32 wnd_sum = 0;
9502
9503 /* Headers length */
9504 hlen = (int)(skb_transport_header(skb) - skb->data) +
9505 tcp_hdrlen(skb);
9506
9507 /* Amount of data (w/o headers) on linear part of SKB*/
9508 first_bd_sz = skb_headlen(skb) - hlen;
9509
9510 wnd_sum = first_bd_sz;
9511
9512 /* Calculate the first sum - it's special */
9513 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9514 wnd_sum +=
9515 skb_shinfo(skb)->frags[frag_idx].size;
9516
9517 /* If there was data on linear skb data - check it */
9518 if (first_bd_sz > 0) {
9519 if (unlikely(wnd_sum < lso_mss)) {
9520 to_copy = 1;
9521 goto exit_lbl;
9522 }
9523
9524 wnd_sum -= first_bd_sz;
9525 }
9526
9527 /* Others are easier: run through the frag list and
9528 check all windows */
9529 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9530 wnd_sum +=
9531 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9532
9533 if (unlikely(wnd_sum < lso_mss)) {
9534 to_copy = 1;
9535 break;
9536 }
9537 wnd_sum -=
9538 skb_shinfo(skb)->frags[wnd_idx].size;
9539 }
9540
9541 } else {
9542 /* in non-LSO too fragmented packet should always
9543 be linearized */
9544 to_copy = 1;
9545 }
9546 }
9547
9548exit_lbl:
9549 if (unlikely(to_copy))
9550 DP(NETIF_MSG_TX_QUEUED,
9551 "Linearization IS REQUIRED for %s packet. "
9552 "num_frags %d hlen %d first_bd_sz %d\n",
9553 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9554 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9555
9556 return to_copy;
9557}
632da4d6 9558#endif
755735eb
EG
9559
9560/* called with netif_tx_lock
a2fbb9ea 9561 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9562 * netif_wake_queue()
a2fbb9ea
ET
9563 */
9564static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9565{
9566 struct bnx2x *bp = netdev_priv(dev);
9567 struct bnx2x_fastpath *fp;
9568 struct sw_tx_bd *tx_buf;
9569 struct eth_tx_bd *tx_bd;
9570 struct eth_tx_parse_bd *pbd = NULL;
9571 u16 pkt_prod, bd_prod;
755735eb 9572 int nbd, fp_index;
a2fbb9ea 9573 dma_addr_t mapping;
755735eb
EG
9574 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9575 int vlan_off = (bp->e1hov ? 4 : 0);
9576 int i;
9577 u8 hlen = 0;
a2fbb9ea
ET
9578
9579#ifdef BNX2X_STOP_ON_ERROR
9580 if (unlikely(bp->panic))
9581 return NETDEV_TX_BUSY;
9582#endif
9583
755735eb 9584 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9585 fp = &bp->fp[fp_index];
755735eb 9586
231fd58a 9587 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9588 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9589 netif_stop_queue(dev);
9590 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9591 return NETDEV_TX_BUSY;
9592 }
9593
755735eb
EG
9594 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9595 " gso type %x xmit_type %x\n",
9596 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9597 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9598
632da4d6 9599#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 9600 /* First, check if we need to linearize the skb
755735eb
EG
9601 (due to FW restrictions) */
9602 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9603 /* Statistics of linearization */
9604 bp->lin_cnt++;
9605 if (skb_linearize(skb) != 0) {
9606 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9607 "silently dropping this SKB\n");
9608 dev_kfree_skb_any(skb);
da5a662a 9609 return NETDEV_TX_OK;
755735eb
EG
9610 }
9611 }
632da4d6 9612#endif
755735eb 9613
a2fbb9ea 9614 /*
755735eb 9615 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9616 then for TSO or xsum we have a parsing info BD,
755735eb 9617 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9618 (don't forget to mark the last one as last,
9619 and to unmap only AFTER you write to the BD ...)
755735eb 9620 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9621 */
9622
9623 pkt_prod = fp->tx_pkt_prod++;
755735eb 9624 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9625
755735eb 9626 /* get a tx_buf and first BD */
a2fbb9ea
ET
9627 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9628 tx_bd = &fp->tx_desc_ring[bd_prod];
9629
9630 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9631 tx_bd->general_data = (UNICAST_ADDRESS <<
9632 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9633 /* header nbd */
9634 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9635
755735eb
EG
9636 /* remember the first BD of the packet */
9637 tx_buf->first_bd = fp->tx_bd_prod;
9638 tx_buf->skb = skb;
a2fbb9ea
ET
9639
9640 DP(NETIF_MSG_TX_QUEUED,
9641 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9642 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9643
0c6671b0
EG
9644#ifdef BCM_VLAN
9645 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9646 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
9647 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9648 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9649 vlan_off += 4;
9650 } else
0c6671b0 9651#endif
755735eb 9652 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9653
755735eb 9654 if (xmit_type) {
755735eb 9655 /* turn on parsing and get a BD */
a2fbb9ea
ET
9656 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9657 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9658
9659 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9660 }
9661
9662 if (xmit_type & XMIT_CSUM) {
9663 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9664
9665 /* for now NS flag is not used in Linux */
755735eb 9666 pbd->global_data = (hlen |
96fc1784 9667 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9668 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9669
755735eb
EG
9670 pbd->ip_hlen = (skb_transport_header(skb) -
9671 skb_network_header(skb)) / 2;
9672
9673 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9674
755735eb
EG
9675 pbd->total_hlen = cpu_to_le16(hlen);
9676 hlen = hlen*2 - vlan_off;
a2fbb9ea 9677
755735eb
EG
9678 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9679
9680 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9681 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9682 ETH_TX_BD_FLAGS_IP_CSUM;
9683 else
9684 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9685
9686 if (xmit_type & XMIT_CSUM_TCP) {
9687 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9688
9689 } else {
9690 s8 fix = SKB_CS_OFF(skb); /* signed! */
9691
a2fbb9ea 9692 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9693 pbd->cs_offset = fix / 2;
a2fbb9ea 9694
755735eb
EG
9695 DP(NETIF_MSG_TX_QUEUED,
9696 "hlen %d offset %d fix %d csum before fix %x\n",
9697 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9698 SKB_CS(skb));
9699
9700 /* HW bug: fixup the CSUM */
9701 pbd->tcp_pseudo_csum =
9702 bnx2x_csum_fix(skb_transport_header(skb),
9703 SKB_CS(skb), fix);
9704
9705 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9706 pbd->tcp_pseudo_csum);
9707 }
a2fbb9ea
ET
9708 }
9709
9710 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9711 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9712
9713 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9714 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9715 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9716 tx_bd->nbd = cpu_to_le16(nbd);
9717 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9718
9719 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9720 " nbytes %d flags %x vlan %x\n",
9721 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9722 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9723 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9724
755735eb 9725 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9726
9727 DP(NETIF_MSG_TX_QUEUED,
9728 "TSO packet len %d hlen %d total len %d tso size %d\n",
9729 skb->len, hlen, skb_headlen(skb),
9730 skb_shinfo(skb)->gso_size);
9731
9732 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9733
755735eb
EG
9734 if (unlikely(skb_headlen(skb) > hlen))
9735 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9736 bd_prod, ++nbd);
a2fbb9ea
ET
9737
9738 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9739 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9740 pbd->tcp_flags = pbd_tcp_flags(skb);
9741
9742 if (xmit_type & XMIT_GSO_V4) {
9743 pbd->ip_id = swab16(ip_hdr(skb)->id);
9744 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9745 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9746 ip_hdr(skb)->daddr,
9747 0, IPPROTO_TCP, 0));
755735eb
EG
9748
9749 } else
9750 pbd->tcp_pseudo_csum =
9751 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9752 &ipv6_hdr(skb)->daddr,
9753 0, IPPROTO_TCP, 0));
9754
a2fbb9ea
ET
9755 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9756 }
9757
755735eb
EG
9758 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9759 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9760
755735eb
EG
9761 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9762 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9763
755735eb
EG
9764 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9765 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9766
755735eb
EG
9767 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9768 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9769 tx_bd->nbytes = cpu_to_le16(frag->size);
9770 tx_bd->vlan = cpu_to_le16(pkt_prod);
9771 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9772
755735eb
EG
9773 DP(NETIF_MSG_TX_QUEUED,
9774 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9775 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9776 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9777 }
9778
755735eb 9779 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9780 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9781
9782 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9783 tx_bd, tx_bd->bd_flags.as_bitfield);
9784
a2fbb9ea
ET
9785 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9786
755735eb 9787 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9788 * if the packet contains or ends with it
9789 */
9790 if (TX_BD_POFF(bd_prod) < nbd)
9791 nbd++;
9792
9793 if (pbd)
9794 DP(NETIF_MSG_TX_QUEUED,
9795 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9796 " tcp_flags %x xsum %x seq %u hlen %u\n",
9797 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9798 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9799 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9800
755735eb 9801 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9802
58f4c4cf
EG
9803 /*
9804 * Make sure that the BD data is updated before updating the producer
9805 * since FW might read the BD right after the producer is updated.
9806 * This is only applicable for weak-ordered memory model archs such
9807 * as IA-64. The following barrier is also mandatory since FW will
9808 * assumes packets must have BDs.
9809 */
9810 wmb();
9811
96fc1784
ET
9812 fp->hw_tx_prods->bds_prod =
9813 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9814 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9815 fp->hw_tx_prods->packets_prod =
9816 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9817 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9818
9819 mmiowb();
9820
755735eb 9821 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9822 dev->trans_start = jiffies;
9823
9824 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9825 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9826 if we put Tx into XOFF state. */
9827 smp_mb();
a2fbb9ea 9828 netif_stop_queue(dev);
bb2a0f7a 9829 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9830 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9831 netif_wake_queue(dev);
9832 }
9833 fp->tx_pkt++;
9834
9835 return NETDEV_TX_OK;
9836}
9837
bb2a0f7a 9838/* called with rtnl_lock */
a2fbb9ea
ET
9839static int bnx2x_open(struct net_device *dev)
9840{
9841 struct bnx2x *bp = netdev_priv(dev);
9842
6eccabb3
EG
9843 netif_carrier_off(dev);
9844
a2fbb9ea
ET
9845 bnx2x_set_power_state(bp, PCI_D0);
9846
bb2a0f7a 9847 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9848}
9849
bb2a0f7a 9850/* called with rtnl_lock */
a2fbb9ea
ET
9851static int bnx2x_close(struct net_device *dev)
9852{
a2fbb9ea
ET
9853 struct bnx2x *bp = netdev_priv(dev);
9854
9855 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9856 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9857 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9858 if (!CHIP_REV_IS_SLOW(bp))
9859 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9860
9861 return 0;
9862}
9863
34f80b04
EG
9864/* called with netif_tx_lock from set_multicast */
9865static void bnx2x_set_rx_mode(struct net_device *dev)
9866{
9867 struct bnx2x *bp = netdev_priv(dev);
9868 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9869 int port = BP_PORT(bp);
9870
9871 if (bp->state != BNX2X_STATE_OPEN) {
9872 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9873 return;
9874 }
9875
9876 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9877
9878 if (dev->flags & IFF_PROMISC)
9879 rx_mode = BNX2X_RX_MODE_PROMISC;
9880
9881 else if ((dev->flags & IFF_ALLMULTI) ||
9882 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9883 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9884
9885 else { /* some multicasts */
9886 if (CHIP_IS_E1(bp)) {
9887 int i, old, offset;
9888 struct dev_mc_list *mclist;
9889 struct mac_configuration_cmd *config =
9890 bnx2x_sp(bp, mcast_config);
9891
9892 for (i = 0, mclist = dev->mc_list;
9893 mclist && (i < dev->mc_count);
9894 i++, mclist = mclist->next) {
9895
9896 config->config_table[i].
9897 cam_entry.msb_mac_addr =
9898 swab16(*(u16 *)&mclist->dmi_addr[0]);
9899 config->config_table[i].
9900 cam_entry.middle_mac_addr =
9901 swab16(*(u16 *)&mclist->dmi_addr[2]);
9902 config->config_table[i].
9903 cam_entry.lsb_mac_addr =
9904 swab16(*(u16 *)&mclist->dmi_addr[4]);
9905 config->config_table[i].cam_entry.flags =
9906 cpu_to_le16(port);
9907 config->config_table[i].
9908 target_table_entry.flags = 0;
9909 config->config_table[i].
9910 target_table_entry.client_id = 0;
9911 config->config_table[i].
9912 target_table_entry.vlan_id = 0;
9913
9914 DP(NETIF_MSG_IFUP,
9915 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9916 config->config_table[i].
9917 cam_entry.msb_mac_addr,
9918 config->config_table[i].
9919 cam_entry.middle_mac_addr,
9920 config->config_table[i].
9921 cam_entry.lsb_mac_addr);
9922 }
9923 old = config->hdr.length_6b;
9924 if (old > i) {
9925 for (; i < old; i++) {
9926 if (CAM_IS_INVALID(config->
9927 config_table[i])) {
af246401 9928 /* already invalidated */
34f80b04
EG
9929 break;
9930 }
9931 /* invalidate */
9932 CAM_INVALIDATE(config->
9933 config_table[i]);
9934 }
9935 }
9936
9937 if (CHIP_REV_IS_SLOW(bp))
9938 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9939 else
9940 offset = BNX2X_MAX_MULTICAST*(1 + port);
9941
9942 config->hdr.length_6b = i;
9943 config->hdr.offset = offset;
9944 config->hdr.client_id = BP_CL_ID(bp);
9945 config->hdr.reserved1 = 0;
9946
9947 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9948 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9949 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9950 0);
9951 } else { /* E1H */
9952 /* Accept one or more multicasts */
9953 struct dev_mc_list *mclist;
9954 u32 mc_filter[MC_HASH_SIZE];
9955 u32 crc, bit, regidx;
9956 int i;
9957
9958 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9959
9960 for (i = 0, mclist = dev->mc_list;
9961 mclist && (i < dev->mc_count);
9962 i++, mclist = mclist->next) {
9963
7c510e4b
JB
9964 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9965 mclist->dmi_addr);
34f80b04
EG
9966
9967 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9968 bit = (crc >> 24) & 0xff;
9969 regidx = bit >> 5;
9970 bit &= 0x1f;
9971 mc_filter[regidx] |= (1 << bit);
9972 }
9973
9974 for (i = 0; i < MC_HASH_SIZE; i++)
9975 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9976 mc_filter[i]);
9977 }
9978 }
9979
9980 bp->rx_mode = rx_mode;
9981 bnx2x_set_storm_rx_mode(bp);
9982}
9983
9984/* called with rtnl_lock */
a2fbb9ea
ET
9985static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9986{
9987 struct sockaddr *addr = p;
9988 struct bnx2x *bp = netdev_priv(dev);
9989
34f80b04 9990 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9991 return -EINVAL;
9992
9993 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9994 if (netif_running(dev)) {
9995 if (CHIP_IS_E1(bp))
3101c2bc 9996 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9997 else
3101c2bc 9998 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9999 }
a2fbb9ea
ET
10000
10001 return 0;
10002}
10003
c18487ee 10004/* called with rtnl_lock */
a2fbb9ea
ET
10005static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10006{
10007 struct mii_ioctl_data *data = if_mii(ifr);
10008 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10009 int port = BP_PORT(bp);
a2fbb9ea
ET
10010 int err;
10011
10012 switch (cmd) {
10013 case SIOCGMIIPHY:
34f80b04 10014 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10015
c14423fe 10016 /* fallthrough */
c18487ee 10017
a2fbb9ea 10018 case SIOCGMIIREG: {
c18487ee 10019 u16 mii_regval;
a2fbb9ea 10020
c18487ee
YR
10021 if (!netif_running(dev))
10022 return -EAGAIN;
a2fbb9ea 10023
34f80b04 10024 mutex_lock(&bp->port.phy_mutex);
3196a88a 10025 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10026 DEFAULT_PHY_DEV_ADDR,
10027 (data->reg_num & 0x1f), &mii_regval);
10028 data->val_out = mii_regval;
34f80b04 10029 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10030 return err;
10031 }
10032
10033 case SIOCSMIIREG:
10034 if (!capable(CAP_NET_ADMIN))
10035 return -EPERM;
10036
c18487ee
YR
10037 if (!netif_running(dev))
10038 return -EAGAIN;
10039
34f80b04 10040 mutex_lock(&bp->port.phy_mutex);
3196a88a 10041 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10042 DEFAULT_PHY_DEV_ADDR,
10043 (data->reg_num & 0x1f), data->val_in);
34f80b04 10044 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10045 return err;
10046
10047 default:
10048 /* do nothing */
10049 break;
10050 }
10051
10052 return -EOPNOTSUPP;
10053}
10054
34f80b04 10055/* called with rtnl_lock */
a2fbb9ea
ET
10056static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10057{
10058 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10059 int rc = 0;
a2fbb9ea
ET
10060
10061 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10062 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10063 return -EINVAL;
10064
10065 /* This does not race with packet allocation
c14423fe 10066 * because the actual alloc size is
a2fbb9ea
ET
10067 * only updated as part of load
10068 */
10069 dev->mtu = new_mtu;
10070
10071 if (netif_running(dev)) {
34f80b04
EG
10072 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10073 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10074 }
34f80b04
EG
10075
10076 return rc;
a2fbb9ea
ET
10077}
10078
10079static void bnx2x_tx_timeout(struct net_device *dev)
10080{
10081 struct bnx2x *bp = netdev_priv(dev);
10082
10083#ifdef BNX2X_STOP_ON_ERROR
10084 if (!bp->panic)
10085 bnx2x_panic();
10086#endif
10087 /* This allows the netif to be shutdown gracefully before resetting */
10088 schedule_work(&bp->reset_task);
10089}
10090
10091#ifdef BCM_VLAN
34f80b04 10092/* called with rtnl_lock */
a2fbb9ea
ET
10093static void bnx2x_vlan_rx_register(struct net_device *dev,
10094 struct vlan_group *vlgrp)
10095{
10096 struct bnx2x *bp = netdev_priv(dev);
10097
10098 bp->vlgrp = vlgrp;
0c6671b0
EG
10099
10100 /* Set flags according to the required capabilities */
10101 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10102
10103 if (dev->features & NETIF_F_HW_VLAN_TX)
10104 bp->flags |= HW_VLAN_TX_FLAG;
10105
10106 if (dev->features & NETIF_F_HW_VLAN_RX)
10107 bp->flags |= HW_VLAN_RX_FLAG;
10108
a2fbb9ea 10109 if (netif_running(dev))
49d66772 10110 bnx2x_set_client_config(bp);
a2fbb9ea 10111}
34f80b04 10112
a2fbb9ea
ET
10113#endif
10114
10115#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10116static void poll_bnx2x(struct net_device *dev)
10117{
10118 struct bnx2x *bp = netdev_priv(dev);
10119
10120 disable_irq(bp->pdev->irq);
10121 bnx2x_interrupt(bp->pdev->irq, dev);
10122 enable_irq(bp->pdev->irq);
10123}
10124#endif
10125
c64213cd
SH
10126static const struct net_device_ops bnx2x_netdev_ops = {
10127 .ndo_open = bnx2x_open,
10128 .ndo_stop = bnx2x_close,
10129 .ndo_start_xmit = bnx2x_start_xmit,
10130 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10131 .ndo_set_mac_address = bnx2x_change_mac_addr,
10132 .ndo_validate_addr = eth_validate_addr,
10133 .ndo_do_ioctl = bnx2x_ioctl,
10134 .ndo_change_mtu = bnx2x_change_mtu,
10135 .ndo_tx_timeout = bnx2x_tx_timeout,
10136#ifdef BCM_VLAN
10137 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10138#endif
10139#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10140 .ndo_poll_controller = poll_bnx2x,
10141#endif
10142};
10143
10144
34f80b04
EG
10145static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10146 struct net_device *dev)
a2fbb9ea
ET
10147{
10148 struct bnx2x *bp;
10149 int rc;
10150
10151 SET_NETDEV_DEV(dev, &pdev->dev);
10152 bp = netdev_priv(dev);
10153
34f80b04
EG
10154 bp->dev = dev;
10155 bp->pdev = pdev;
a2fbb9ea 10156 bp->flags = 0;
34f80b04 10157 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10158
10159 rc = pci_enable_device(pdev);
10160 if (rc) {
10161 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10162 goto err_out;
10163 }
10164
10165 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10166 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10167 " aborting\n");
10168 rc = -ENODEV;
10169 goto err_out_disable;
10170 }
10171
10172 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10173 printk(KERN_ERR PFX "Cannot find second PCI device"
10174 " base address, aborting\n");
10175 rc = -ENODEV;
10176 goto err_out_disable;
10177 }
10178
34f80b04
EG
10179 if (atomic_read(&pdev->enable_cnt) == 1) {
10180 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10181 if (rc) {
10182 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10183 " aborting\n");
10184 goto err_out_disable;
10185 }
a2fbb9ea 10186
34f80b04
EG
10187 pci_set_master(pdev);
10188 pci_save_state(pdev);
10189 }
a2fbb9ea
ET
10190
10191 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10192 if (bp->pm_cap == 0) {
10193 printk(KERN_ERR PFX "Cannot find power management"
10194 " capability, aborting\n");
10195 rc = -EIO;
10196 goto err_out_release;
10197 }
10198
10199 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10200 if (bp->pcie_cap == 0) {
10201 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10202 " aborting\n");
10203 rc = -EIO;
10204 goto err_out_release;
10205 }
10206
10207 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10208 bp->flags |= USING_DAC_FLAG;
10209 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10210 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10211 " failed, aborting\n");
10212 rc = -EIO;
10213 goto err_out_release;
10214 }
10215
10216 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10217 printk(KERN_ERR PFX "System does not support DMA,"
10218 " aborting\n");
10219 rc = -EIO;
10220 goto err_out_release;
10221 }
10222
34f80b04
EG
10223 dev->mem_start = pci_resource_start(pdev, 0);
10224 dev->base_addr = dev->mem_start;
10225 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10226
10227 dev->irq = pdev->irq;
10228
275f165f 10229 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10230 if (!bp->regview) {
10231 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10232 rc = -ENOMEM;
10233 goto err_out_release;
10234 }
10235
34f80b04
EG
10236 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10237 min_t(u64, BNX2X_DB_SIZE,
10238 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10239 if (!bp->doorbells) {
10240 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10241 rc = -ENOMEM;
10242 goto err_out_unmap;
10243 }
10244
10245 bnx2x_set_power_state(bp, PCI_D0);
10246
34f80b04
EG
10247 /* clean indirect addresses */
10248 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10249 PCICFG_VENDOR_ID_OFFSET);
10250 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10251 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10252 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10253 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10254
34f80b04 10255 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10256
c64213cd 10257 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10258 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10259 dev->features |= NETIF_F_SG;
10260 dev->features |= NETIF_F_HW_CSUM;
10261 if (bp->flags & USING_DAC_FLAG)
10262 dev->features |= NETIF_F_HIGHDMA;
10263#ifdef BCM_VLAN
10264 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10265 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10266#endif
10267 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10268 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10269
10270 return 0;
10271
10272err_out_unmap:
10273 if (bp->regview) {
10274 iounmap(bp->regview);
10275 bp->regview = NULL;
10276 }
a2fbb9ea
ET
10277 if (bp->doorbells) {
10278 iounmap(bp->doorbells);
10279 bp->doorbells = NULL;
10280 }
10281
10282err_out_release:
34f80b04
EG
10283 if (atomic_read(&pdev->enable_cnt) == 1)
10284 pci_release_regions(pdev);
a2fbb9ea
ET
10285
10286err_out_disable:
10287 pci_disable_device(pdev);
10288 pci_set_drvdata(pdev, NULL);
10289
10290err_out:
10291 return rc;
10292}
10293
25047950
ET
10294static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10295{
10296 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10297
10298 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10299 return val;
10300}
10301
10302/* return value of 1=2.5GHz 2=5GHz */
10303static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10304{
10305 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10306
10307 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10308 return val;
10309}
10310
a2fbb9ea
ET
10311static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10312 const struct pci_device_id *ent)
10313{
10314 static int version_printed;
10315 struct net_device *dev = NULL;
10316 struct bnx2x *bp;
25047950 10317 int rc;
a2fbb9ea
ET
10318
10319 if (version_printed++ == 0)
10320 printk(KERN_INFO "%s", version);
10321
10322 /* dev zeroed in init_etherdev */
10323 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10324 if (!dev) {
10325 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10326 return -ENOMEM;
34f80b04 10327 }
a2fbb9ea 10328
a2fbb9ea
ET
10329 bp = netdev_priv(dev);
10330 bp->msglevel = debug;
10331
34f80b04 10332 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10333 if (rc < 0) {
10334 free_netdev(dev);
10335 return rc;
10336 }
10337
a2fbb9ea
ET
10338 pci_set_drvdata(pdev, dev);
10339
34f80b04 10340 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10341 if (rc)
10342 goto init_one_exit;
10343
10344 rc = register_netdev(dev);
34f80b04 10345 if (rc) {
693fc0d1 10346 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10347 goto init_one_exit;
10348 }
10349
10350 bp->common.name = board_info[ent->driver_data].name;
25047950 10351 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10352 " IRQ %d, ", dev->name, bp->common.name,
10353 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10354 bnx2x_get_pcie_width(bp),
10355 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10356 dev->base_addr, bp->pdev->irq);
e174961c 10357 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10358 return 0;
34f80b04
EG
10359
10360init_one_exit:
10361 if (bp->regview)
10362 iounmap(bp->regview);
10363
10364 if (bp->doorbells)
10365 iounmap(bp->doorbells);
10366
10367 free_netdev(dev);
10368
10369 if (atomic_read(&pdev->enable_cnt) == 1)
10370 pci_release_regions(pdev);
10371
10372 pci_disable_device(pdev);
10373 pci_set_drvdata(pdev, NULL);
10374
10375 return rc;
a2fbb9ea
ET
10376}
10377
10378static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10379{
10380 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10381 struct bnx2x *bp;
10382
10383 if (!dev) {
228241eb
ET
10384 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10385 return;
10386 }
228241eb 10387 bp = netdev_priv(dev);
a2fbb9ea 10388
a2fbb9ea
ET
10389 unregister_netdev(dev);
10390
10391 if (bp->regview)
10392 iounmap(bp->regview);
10393
10394 if (bp->doorbells)
10395 iounmap(bp->doorbells);
10396
10397 free_netdev(dev);
34f80b04
EG
10398
10399 if (atomic_read(&pdev->enable_cnt) == 1)
10400 pci_release_regions(pdev);
10401
a2fbb9ea
ET
10402 pci_disable_device(pdev);
10403 pci_set_drvdata(pdev, NULL);
10404}
10405
10406static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10407{
10408 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10409 struct bnx2x *bp;
10410
34f80b04
EG
10411 if (!dev) {
10412 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10413 return -ENODEV;
10414 }
10415 bp = netdev_priv(dev);
a2fbb9ea 10416
34f80b04 10417 rtnl_lock();
a2fbb9ea 10418
34f80b04 10419 pci_save_state(pdev);
228241eb 10420
34f80b04
EG
10421 if (!netif_running(dev)) {
10422 rtnl_unlock();
10423 return 0;
10424 }
a2fbb9ea
ET
10425
10426 netif_device_detach(dev);
a2fbb9ea 10427
da5a662a 10428 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10429
a2fbb9ea 10430 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10431
34f80b04
EG
10432 rtnl_unlock();
10433
a2fbb9ea
ET
10434 return 0;
10435}
10436
10437static int bnx2x_resume(struct pci_dev *pdev)
10438{
10439 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10440 struct bnx2x *bp;
a2fbb9ea
ET
10441 int rc;
10442
228241eb
ET
10443 if (!dev) {
10444 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10445 return -ENODEV;
10446 }
228241eb 10447 bp = netdev_priv(dev);
a2fbb9ea 10448
34f80b04
EG
10449 rtnl_lock();
10450
228241eb 10451 pci_restore_state(pdev);
34f80b04
EG
10452
10453 if (!netif_running(dev)) {
10454 rtnl_unlock();
10455 return 0;
10456 }
10457
a2fbb9ea
ET
10458 bnx2x_set_power_state(bp, PCI_D0);
10459 netif_device_attach(dev);
10460
da5a662a 10461 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10462
34f80b04
EG
10463 rtnl_unlock();
10464
10465 return rc;
a2fbb9ea
ET
10466}
10467
f8ef6e44
YG
10468static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10469{
10470 int i;
10471
10472 bp->state = BNX2X_STATE_ERROR;
10473
10474 bp->rx_mode = BNX2X_RX_MODE_NONE;
10475
10476 bnx2x_netif_stop(bp, 0);
10477
10478 del_timer_sync(&bp->timer);
10479 bp->stats_state = STATS_STATE_DISABLED;
10480 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10481
10482 /* Release IRQs */
10483 bnx2x_free_irq(bp);
10484
10485 if (CHIP_IS_E1(bp)) {
10486 struct mac_configuration_cmd *config =
10487 bnx2x_sp(bp, mcast_config);
10488
10489 for (i = 0; i < config->hdr.length_6b; i++)
10490 CAM_INVALIDATE(config->config_table[i]);
10491 }
10492
10493 /* Free SKBs, SGEs, TPA pool and driver internals */
10494 bnx2x_free_skbs(bp);
10495 for_each_queue(bp, i)
10496 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7cde1c8b
EG
10497 for_each_queue(bp, i)
10498 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
10499 bnx2x_free_mem(bp);
10500
10501 bp->state = BNX2X_STATE_CLOSED;
10502
10503 netif_carrier_off(bp->dev);
10504
10505 return 0;
10506}
10507
10508static void bnx2x_eeh_recover(struct bnx2x *bp)
10509{
10510 u32 val;
10511
10512 mutex_init(&bp->port.phy_mutex);
10513
10514 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10515 bp->link_params.shmem_base = bp->common.shmem_base;
10516 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10517
10518 if (!bp->common.shmem_base ||
10519 (bp->common.shmem_base < 0xA0000) ||
10520 (bp->common.shmem_base >= 0xC0000)) {
10521 BNX2X_DEV_INFO("MCP not active\n");
10522 bp->flags |= NO_MCP_FLAG;
10523 return;
10524 }
10525
10526 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10527 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10528 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10529 BNX2X_ERR("BAD MCP validity signature\n");
10530
10531 if (!BP_NOMCP(bp)) {
10532 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10533 & DRV_MSG_SEQ_NUMBER_MASK);
10534 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10535 }
10536}
10537
493adb1f
WX
10538/**
10539 * bnx2x_io_error_detected - called when PCI error is detected
10540 * @pdev: Pointer to PCI device
10541 * @state: The current pci connection state
10542 *
10543 * This function is called after a PCI bus error affecting
10544 * this device has been detected.
10545 */
10546static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10547 pci_channel_state_t state)
10548{
10549 struct net_device *dev = pci_get_drvdata(pdev);
10550 struct bnx2x *bp = netdev_priv(dev);
10551
10552 rtnl_lock();
10553
10554 netif_device_detach(dev);
10555
10556 if (netif_running(dev))
f8ef6e44 10557 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10558
10559 pci_disable_device(pdev);
10560
10561 rtnl_unlock();
10562
10563 /* Request a slot reset */
10564 return PCI_ERS_RESULT_NEED_RESET;
10565}
10566
10567/**
10568 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10569 * @pdev: Pointer to PCI device
10570 *
10571 * Restart the card from scratch, as if from a cold-boot.
10572 */
10573static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10574{
10575 struct net_device *dev = pci_get_drvdata(pdev);
10576 struct bnx2x *bp = netdev_priv(dev);
10577
10578 rtnl_lock();
10579
10580 if (pci_enable_device(pdev)) {
10581 dev_err(&pdev->dev,
10582 "Cannot re-enable PCI device after reset\n");
10583 rtnl_unlock();
10584 return PCI_ERS_RESULT_DISCONNECT;
10585 }
10586
10587 pci_set_master(pdev);
10588 pci_restore_state(pdev);
10589
10590 if (netif_running(dev))
10591 bnx2x_set_power_state(bp, PCI_D0);
10592
10593 rtnl_unlock();
10594
10595 return PCI_ERS_RESULT_RECOVERED;
10596}
10597
10598/**
10599 * bnx2x_io_resume - called when traffic can start flowing again
10600 * @pdev: Pointer to PCI device
10601 *
10602 * This callback is called when the error recovery driver tells us that
10603 * its OK to resume normal operation.
10604 */
10605static void bnx2x_io_resume(struct pci_dev *pdev)
10606{
10607 struct net_device *dev = pci_get_drvdata(pdev);
10608 struct bnx2x *bp = netdev_priv(dev);
10609
10610 rtnl_lock();
10611
f8ef6e44
YG
10612 bnx2x_eeh_recover(bp);
10613
493adb1f 10614 if (netif_running(dev))
f8ef6e44 10615 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10616
10617 netif_device_attach(dev);
10618
10619 rtnl_unlock();
10620}
10621
10622static struct pci_error_handlers bnx2x_err_handler = {
10623 .error_detected = bnx2x_io_error_detected,
10624 .slot_reset = bnx2x_io_slot_reset,
10625 .resume = bnx2x_io_resume,
10626};
10627
a2fbb9ea 10628static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10629 .name = DRV_MODULE_NAME,
10630 .id_table = bnx2x_pci_tbl,
10631 .probe = bnx2x_init_one,
10632 .remove = __devexit_p(bnx2x_remove_one),
10633 .suspend = bnx2x_suspend,
10634 .resume = bnx2x_resume,
10635 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10636};
10637
10638static int __init bnx2x_init(void)
10639{
1cf167f2
EG
10640 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10641 if (bnx2x_wq == NULL) {
10642 printk(KERN_ERR PFX "Cannot create workqueue\n");
10643 return -ENOMEM;
10644 }
10645
a2fbb9ea
ET
10646 return pci_register_driver(&bnx2x_pci_driver);
10647}
10648
10649static void __exit bnx2x_cleanup(void)
10650{
10651 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10652
10653 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10654}
10655
10656module_init(bnx2x_init);
10657module_exit(bnx2x_cleanup);
10658