]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: New FW
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
19680c48 76static int disable_tpa;
a2fbb9ea
ET
77static int use_inta;
78static int poll;
a2fbb9ea 79static int debug;
34f80b04 80static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
81static int use_multi;
82
19680c48 83module_param(disable_tpa, int, 0);
a2fbb9ea
ET
84module_param(use_inta, int, 0);
85module_param(poll, int, 0);
a2fbb9ea 86module_param(debug, int, 0);
19680c48 87MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
88MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 90MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
91
92#ifdef BNX2X_MULTI
93module_param(use_multi, int, 0);
94MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95#endif
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594 if (msix) {
595 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598 } else {
599 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
601 HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 603
615f8fd9
ET
604 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val, port, addr, msix);
606
607 REG_WR(bp, addr, val);
608
a2fbb9ea
ET
609 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610 }
611
615f8fd9 612 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
613 val, port, addr, msix);
614
615 REG_WR(bp, addr, val);
34f80b04
EG
616
617 if (CHIP_IS_E1H(bp)) {
618 /* init leading/trailing edge */
619 if (IS_E1HMF(bp)) {
620 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621 if (bp->port.pmf)
622 /* enable nig attention */
623 val |= 0x0100;
624 } else
625 val = 0xffff;
626
627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629 }
a2fbb9ea
ET
630}
631
615f8fd9 632static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 633{
34f80b04 634 int port = BP_PORT(bp);
a2fbb9ea
ET
635 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 u32 val = REG_RD(bp, addr);
637
638 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
645
646 REG_WR(bp, addr, val);
647 if (REG_RD(bp, addr) != val)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649}
650
f8ef6e44 651static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 652{
a2fbb9ea
ET
653 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654 int i;
655
34f80b04 656 /* disable interrupt handling */
a2fbb9ea 657 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
658 if (disable_hw)
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
a2fbb9ea
ET
661
662 /* make sure all ISRs are done */
663 if (msix) {
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
666
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
669 } else
670 synchronize_irq(bp->pdev->irq);
671
672 /* make sure sp_task is not running */
1cf167f2
EG
673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
675}
676
34f80b04 677/* fast path */
a2fbb9ea
ET
678
679/*
34f80b04 680 * General service functions
a2fbb9ea
ET
681 */
682
34f80b04 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
684 u8 storm, u16 index, u8 op, u8 update)
685{
5c862848
EG
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
34f80b04 692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
5c862848
EG
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
700}
701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703{
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717}
718
a2fbb9ea
ET
719static u16 bnx2x_ack_int(struct bnx2x *bp)
720{
5c862848
EG
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 724
5c862848
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
a2fbb9ea 727
a2fbb9ea
ET
728 return result;
729}
730
731
732/*
733 * fast path service functions
734 */
735
237907c1
EG
736static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737{
738 u16 tx_cons_sb;
739
740 /* Tell compiler that status block fields can change */
741 barrier();
742 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
743 return (fp->tx_pkt_cons != tx_cons_sb);
744}
745
746static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
747{
748 /* Tell compiler that consumer and producer can change */
749 barrier();
750 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
751
237907c1
EG
752}
753
a2fbb9ea
ET
754/* free skb in the packet ring at pos idx
755 * return idx of last bd freed
756 */
757static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
758 u16 idx)
759{
760 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
761 struct eth_tx_bd *tx_bd;
762 struct sk_buff *skb = tx_buf->skb;
34f80b04 763 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
764 int nbd;
765
766 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
767 idx, tx_buf, skb);
768
769 /* unmap first bd */
770 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
771 tx_bd = &fp->tx_desc_ring[bd_idx];
772 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
773 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
774
775 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 776 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
777#ifdef BNX2X_STOP_ON_ERROR
778 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 779 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
780 bnx2x_panic();
781 }
782#endif
783
784 /* Skip a parse bd and the TSO split header bd
785 since they have no mapping */
786 if (nbd)
787 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
788
789 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
790 ETH_TX_BD_FLAGS_TCP_CSUM |
791 ETH_TX_BD_FLAGS_SW_LSO)) {
792 if (--nbd)
793 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794 tx_bd = &fp->tx_desc_ring[bd_idx];
795 /* is this a TSO split header bd? */
796 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
797 if (--nbd)
798 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
799 }
800 }
801
802 /* now free frags */
803 while (nbd > 0) {
804
805 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
806 tx_bd = &fp->tx_desc_ring[bd_idx];
807 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
808 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
809 if (--nbd)
810 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
811 }
812
813 /* release skb */
53e5e96e 814 WARN_ON(!skb);
a2fbb9ea
ET
815 dev_kfree_skb(skb);
816 tx_buf->first_bd = 0;
817 tx_buf->skb = NULL;
818
34f80b04 819 return new_cons;
a2fbb9ea
ET
820}
821
34f80b04 822static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 823{
34f80b04
EG
824 s16 used;
825 u16 prod;
826 u16 cons;
a2fbb9ea 827
34f80b04 828 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
829 prod = fp->tx_bd_prod;
830 cons = fp->tx_bd_cons;
831
34f80b04
EG
832 /* NUM_TX_RINGS = number of "next-page" entries
833 It will be used as a threshold */
834 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 835
34f80b04 836#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
837 WARN_ON(used < 0);
838 WARN_ON(used > fp->bp->tx_ring_size);
839 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 840#endif
a2fbb9ea 841
34f80b04 842 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
843}
844
845static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
846{
847 struct bnx2x *bp = fp->bp;
848 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
849 int done = 0;
850
851#ifdef BNX2X_STOP_ON_ERROR
852 if (unlikely(bp->panic))
853 return;
854#endif
855
856 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
857 sw_cons = fp->tx_pkt_cons;
858
859 while (sw_cons != hw_cons) {
860 u16 pkt_cons;
861
862 pkt_cons = TX_BD(sw_cons);
863
864 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
865
34f80b04 866 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
867 hw_cons, sw_cons, pkt_cons);
868
34f80b04 869/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
870 rmb();
871 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
872 }
873*/
874 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
875 sw_cons++;
876 done++;
877
878 if (done == work)
879 break;
880 }
881
882 fp->tx_pkt_cons = sw_cons;
883 fp->tx_bd_cons = bd_cons;
884
885 /* Need to make the tx_cons update visible to start_xmit()
886 * before checking for netif_queue_stopped(). Without the
887 * memory barrier, there is a small possibility that start_xmit()
888 * will miss it and cause the queue to be stopped forever.
889 */
890 smp_mb();
891
892 /* TBD need a thresh? */
893 if (unlikely(netif_queue_stopped(bp->dev))) {
894
895 netif_tx_lock(bp->dev);
896
897 if (netif_queue_stopped(bp->dev) &&
da5a662a 898 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
899 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
900 netif_wake_queue(bp->dev);
901
902 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
903 }
904}
905
3196a88a 906
a2fbb9ea
ET
907static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
908 union eth_rx_cqe *rr_cqe)
909{
910 struct bnx2x *bp = fp->bp;
911 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
912 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
913
34f80b04 914 DP(BNX2X_MSG_SP,
a2fbb9ea 915 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
916 FP_IDX(fp), cid, command, bp->state,
917 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
918
919 bp->spq_left++;
920
34f80b04 921 if (FP_IDX(fp)) {
a2fbb9ea
ET
922 switch (command | fp->state) {
923 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
924 BNX2X_FP_STATE_OPENING):
925 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
926 cid);
927 fp->state = BNX2X_FP_STATE_OPEN;
928 break;
929
930 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
931 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
932 cid);
933 fp->state = BNX2X_FP_STATE_HALTED;
934 break;
935
936 default:
34f80b04
EG
937 BNX2X_ERR("unexpected MC reply (%d) "
938 "fp->state is %x\n", command, fp->state);
939 break;
a2fbb9ea 940 }
34f80b04 941 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
942 return;
943 }
c14423fe 944
a2fbb9ea
ET
945 switch (command | bp->state) {
946 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
947 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
948 bp->state = BNX2X_STATE_OPEN;
949 break;
950
951 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
952 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
953 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
954 fp->state = BNX2X_FP_STATE_HALTED;
955 break;
956
a2fbb9ea 957 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 958 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 959 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
960 break;
961
3196a88a 962
a2fbb9ea 963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 966 bp->set_mac_pending = 0;
a2fbb9ea
ET
967 break;
968
49d66772 969 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 970 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
971 break;
972
a2fbb9ea 973 default:
34f80b04 974 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 975 command, bp->state);
34f80b04 976 break;
a2fbb9ea 977 }
34f80b04 978 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
979}
980
7a9b2557
VZ
981static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
982 struct bnx2x_fastpath *fp, u16 index)
983{
984 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
985 struct page *page = sw_buf->page;
986 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
987
988 /* Skip "next page" elements */
989 if (!page)
990 return;
991
992 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 993 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
994 __free_pages(page, PAGES_PER_SGE_SHIFT);
995
996 sw_buf->page = NULL;
997 sge->addr_hi = 0;
998 sge->addr_lo = 0;
999}
1000
1001static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1002 struct bnx2x_fastpath *fp, int last)
1003{
1004 int i;
1005
1006 for (i = 0; i < last; i++)
1007 bnx2x_free_rx_sge(bp, fp, i);
1008}
1009
1010static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1011 struct bnx2x_fastpath *fp, u16 index)
1012{
1013 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1014 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1015 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1016 dma_addr_t mapping;
1017
1018 if (unlikely(page == NULL))
1019 return -ENOMEM;
1020
4f40f2cb 1021 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1022 PCI_DMA_FROMDEVICE);
8d8bb39b 1023 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1024 __free_pages(page, PAGES_PER_SGE_SHIFT);
1025 return -ENOMEM;
1026 }
1027
1028 sw_buf->page = page;
1029 pci_unmap_addr_set(sw_buf, mapping, mapping);
1030
1031 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1032 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1033
1034 return 0;
1035}
1036
a2fbb9ea
ET
1037static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1038 struct bnx2x_fastpath *fp, u16 index)
1039{
1040 struct sk_buff *skb;
1041 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1042 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1043 dma_addr_t mapping;
1044
1045 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1046 if (unlikely(skb == NULL))
1047 return -ENOMEM;
1048
437cf2f1 1049 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1050 PCI_DMA_FROMDEVICE);
8d8bb39b 1051 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1052 dev_kfree_skb(skb);
1053 return -ENOMEM;
1054 }
1055
1056 rx_buf->skb = skb;
1057 pci_unmap_addr_set(rx_buf, mapping, mapping);
1058
1059 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1060 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1061
1062 return 0;
1063}
1064
1065/* note that we are not allocating a new skb,
1066 * we are just moving one from cons to prod
1067 * we are not creating a new mapping,
1068 * so there is no need to check for dma_mapping_error().
1069 */
1070static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1071 struct sk_buff *skb, u16 cons, u16 prod)
1072{
1073 struct bnx2x *bp = fp->bp;
1074 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1075 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1076 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1077 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1078
1079 pci_dma_sync_single_for_device(bp->pdev,
1080 pci_unmap_addr(cons_rx_buf, mapping),
1081 bp->rx_offset + RX_COPY_THRESH,
1082 PCI_DMA_FROMDEVICE);
1083
1084 prod_rx_buf->skb = cons_rx_buf->skb;
1085 pci_unmap_addr_set(prod_rx_buf, mapping,
1086 pci_unmap_addr(cons_rx_buf, mapping));
1087 *prod_bd = *cons_bd;
1088}
1089
7a9b2557
VZ
1090static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1091 u16 idx)
1092{
1093 u16 last_max = fp->last_max_sge;
1094
1095 if (SUB_S16(idx, last_max) > 0)
1096 fp->last_max_sge = idx;
1097}
1098
1099static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1100{
1101 int i, j;
1102
1103 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1104 int idx = RX_SGE_CNT * i - 1;
1105
1106 for (j = 0; j < 2; j++) {
1107 SGE_MASK_CLEAR_BIT(fp, idx);
1108 idx--;
1109 }
1110 }
1111}
1112
1113static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1114 struct eth_fast_path_rx_cqe *fp_cqe)
1115{
1116 struct bnx2x *bp = fp->bp;
4f40f2cb 1117 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1118 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1119 SGE_PAGE_SHIFT;
7a9b2557
VZ
1120 u16 last_max, last_elem, first_elem;
1121 u16 delta = 0;
1122 u16 i;
1123
1124 if (!sge_len)
1125 return;
1126
1127 /* First mark all used pages */
1128 for (i = 0; i < sge_len; i++)
1129 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1130
1131 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1132 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1133
1134 /* Here we assume that the last SGE index is the biggest */
1135 prefetch((void *)(fp->sge_mask));
1136 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1137
1138 last_max = RX_SGE(fp->last_max_sge);
1139 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1140 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1141
1142 /* If ring is not full */
1143 if (last_elem + 1 != first_elem)
1144 last_elem++;
1145
1146 /* Now update the prod */
1147 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1148 if (likely(fp->sge_mask[i]))
1149 break;
1150
1151 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1152 delta += RX_SGE_MASK_ELEM_SZ;
1153 }
1154
1155 if (delta > 0) {
1156 fp->rx_sge_prod += delta;
1157 /* clear page-end entries */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 DP(NETIF_MSG_RX_STATUS,
1162 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1163 fp->last_max_sge, fp->rx_sge_prod);
1164}
1165
1166static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1167{
1168 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1169 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171
33471629
EG
1172 /* Clear the two last indices in the page to 1:
1173 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1174 hence will never be indicated and should be removed from
1175 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp);
1177}
1178
1179static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1180 struct sk_buff *skb, u16 cons, u16 prod)
1181{
1182 struct bnx2x *bp = fp->bp;
1183 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1184 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1185 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1186 dma_addr_t mapping;
1187
1188 /* move empty skb from pool to prod and map it */
1189 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1191 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1192 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1193
1194 /* move partial skb from cons to pool (don't unmap yet) */
1195 fp->tpa_pool[queue] = *cons_rx_buf;
1196
1197 /* mark bin state as start - print error if current state != stop */
1198 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1199 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1200
1201 fp->tpa_state[queue] = BNX2X_TPA_START;
1202
1203 /* point prod_bd to new skb */
1204 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1205 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1206
1207#ifdef BNX2X_STOP_ON_ERROR
1208 fp->tpa_queue_used |= (1 << queue);
1209#ifdef __powerpc64__
1210 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1211#else
1212 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1213#endif
1214 fp->tpa_queue_used);
1215#endif
1216}
1217
1218static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1219 struct sk_buff *skb,
1220 struct eth_fast_path_rx_cqe *fp_cqe,
1221 u16 cqe_idx)
1222{
1223 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1224 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1225 u32 i, frag_len, frag_size, pages;
1226 int err;
1227 int j;
1228
1229 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1230 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1231
1232 /* This is needed in order to enable forwarding support */
1233 if (frag_size)
4f40f2cb 1234 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1235 max(frag_size, (u32)len_on_bd));
1236
1237#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1238 if (pages >
1239 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1240 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1241 pages, cqe_idx);
1242 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1243 fp_cqe->pkt_len, len_on_bd);
1244 bnx2x_panic();
1245 return -EINVAL;
1246 }
1247#endif
1248
1249 /* Run through the SGL and compose the fragmented skb */
1250 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1251 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1252
1253 /* FW gives the indices of the SGE as if the ring is an array
1254 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1255 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1256 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1257 old_rx_pg = *rx_pg;
1258
1259 /* If we fail to allocate a substitute page, we simply stop
1260 where we are and drop the whole packet */
1261 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1262 if (unlikely(err)) {
66e855f3 1263 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1264 return err;
1265 }
1266
1267 /* Unmap the page as we r going to pass it to the stack */
1268 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1269 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1270
1271 /* Add one frag and update the appropriate fields in the skb */
1272 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1273
1274 skb->data_len += frag_len;
1275 skb->truesize += frag_len;
1276 skb->len += frag_len;
1277
1278 frag_size -= frag_len;
1279 }
1280
1281 return 0;
1282}
1283
1284static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1285 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1286 u16 cqe_idx)
1287{
1288 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1289 struct sk_buff *skb = rx_buf->skb;
1290 /* alloc new skb */
1291 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1292
1293 /* Unmap skb in the pool anyway, as we are going to change
1294 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1295 fails. */
1296 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1297 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1298
7a9b2557 1299 if (likely(new_skb)) {
66e855f3
YG
1300 /* fix ip xsum and give it to the stack */
1301 /* (no need to map the new skb) */
0c6671b0
EG
1302#ifdef BCM_VLAN
1303 int is_vlan_cqe =
1304 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1305 PARSING_FLAGS_VLAN);
1306 int is_not_hwaccel_vlan_cqe =
1307 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1308#endif
7a9b2557
VZ
1309
1310 prefetch(skb);
1311 prefetch(((char *)(skb)) + 128);
1312
7a9b2557
VZ
1313#ifdef BNX2X_STOP_ON_ERROR
1314 if (pad + len > bp->rx_buf_size) {
1315 BNX2X_ERR("skb_put is about to fail... "
1316 "pad %d len %d rx_buf_size %d\n",
1317 pad, len, bp->rx_buf_size);
1318 bnx2x_panic();
1319 return;
1320 }
1321#endif
1322
1323 skb_reserve(skb, pad);
1324 skb_put(skb, len);
1325
1326 skb->protocol = eth_type_trans(skb, bp->dev);
1327 skb->ip_summed = CHECKSUM_UNNECESSARY;
0c8dfc83 1328 skb_record_rx_queue(skb, queue);
7a9b2557
VZ
1329
1330 {
1331 struct iphdr *iph;
1332
1333 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1334#ifdef BCM_VLAN
1335 /* If there is no Rx VLAN offloading -
1336 take VLAN tag into an account */
1337 if (unlikely(is_not_hwaccel_vlan_cqe))
1338 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1339#endif
7a9b2557
VZ
1340 iph->check = 0;
1341 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1342 }
1343
1344 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1345 &cqe->fast_path_cqe, cqe_idx)) {
1346#ifdef BCM_VLAN
0c6671b0
EG
1347 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1348 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1349 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1350 le16_to_cpu(cqe->fast_path_cqe.
1351 vlan_tag));
1352 else
1353#endif
1354 netif_receive_skb(skb);
1355 } else {
1356 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1357 " - dropping packet!\n");
1358 dev_kfree_skb(skb);
1359 }
1360
7a9b2557
VZ
1361
1362 /* put new skb in bin */
1363 fp->tpa_pool[queue].skb = new_skb;
1364
1365 } else {
66e855f3 1366 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1367 DP(NETIF_MSG_RX_STATUS,
1368 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1369 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1370 }
1371
1372 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1373}
1374
1375static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1376 struct bnx2x_fastpath *fp,
1377 u16 bd_prod, u16 rx_comp_prod,
1378 u16 rx_sge_prod)
1379{
8d9c5f34 1380 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1381 int i;
1382
1383 /* Update producers */
1384 rx_prods.bd_prod = bd_prod;
1385 rx_prods.cqe_prod = rx_comp_prod;
1386 rx_prods.sge_prod = rx_sge_prod;
1387
58f4c4cf
EG
1388 /*
1389 * Make sure that the BD and SGE data is updated before updating the
1390 * producers since FW might read the BD/SGE right after the producer
1391 * is updated.
1392 * This is only applicable for weak-ordered memory model archs such
1393 * as IA-64. The following barrier is also mandatory since FW will
1394 * assumes BDs must have buffers.
1395 */
1396 wmb();
1397
8d9c5f34
EG
1398 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1399 REG_WR(bp, BAR_USTRORM_INTMEM +
1400 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1401 ((u32 *)&rx_prods)[i]);
1402
58f4c4cf
EG
1403 mmiowb(); /* keep prod updates ordered */
1404
7a9b2557
VZ
1405 DP(NETIF_MSG_RX_STATUS,
1406 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1407 bd_prod, rx_comp_prod, rx_sge_prod);
1408}
1409
a2fbb9ea
ET
1410static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1411{
1412 struct bnx2x *bp = fp->bp;
34f80b04 1413 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1414 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1415 int rx_pkt = 0;
1416
1417#ifdef BNX2X_STOP_ON_ERROR
1418 if (unlikely(bp->panic))
1419 return 0;
1420#endif
1421
34f80b04
EG
1422 /* CQ "next element" is of the size of the regular element,
1423 that's why it's ok here */
a2fbb9ea
ET
1424 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1425 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1426 hw_comp_cons++;
1427
1428 bd_cons = fp->rx_bd_cons;
1429 bd_prod = fp->rx_bd_prod;
34f80b04 1430 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1431 sw_comp_cons = fp->rx_comp_cons;
1432 sw_comp_prod = fp->rx_comp_prod;
1433
1434 /* Memory barrier necessary as speculative reads of the rx
1435 * buffer can be ahead of the index in the status block
1436 */
1437 rmb();
1438
1439 DP(NETIF_MSG_RX_STATUS,
1440 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1441 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1442
1443 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1444 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1445 struct sk_buff *skb;
1446 union eth_rx_cqe *cqe;
34f80b04
EG
1447 u8 cqe_fp_flags;
1448 u16 len, pad;
a2fbb9ea
ET
1449
1450 comp_ring_cons = RCQ_BD(sw_comp_cons);
1451 bd_prod = RX_BD(bd_prod);
1452 bd_cons = RX_BD(bd_cons);
1453
1454 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1455 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1456
a2fbb9ea 1457 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1458 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1459 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1460 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1461 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1462 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1463
1464 /* is this a slowpath msg? */
34f80b04 1465 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1466 bnx2x_sp_event(fp, cqe);
1467 goto next_cqe;
1468
1469 /* this is an rx packet */
1470 } else {
1471 rx_buf = &fp->rx_buf_ring[bd_cons];
1472 skb = rx_buf->skb;
a2fbb9ea
ET
1473 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1474 pad = cqe->fast_path_cqe.placement_offset;
1475
7a9b2557
VZ
1476 /* If CQE is marked both TPA_START and TPA_END
1477 it is a non-TPA CQE */
1478 if ((!fp->disable_tpa) &&
1479 (TPA_TYPE(cqe_fp_flags) !=
1480 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1481 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1482
1483 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1484 DP(NETIF_MSG_RX_STATUS,
1485 "calling tpa_start on queue %d\n",
1486 queue);
1487
1488 bnx2x_tpa_start(fp, queue, skb,
1489 bd_cons, bd_prod);
1490 goto next_rx;
1491 }
1492
1493 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1494 DP(NETIF_MSG_RX_STATUS,
1495 "calling tpa_stop on queue %d\n",
1496 queue);
1497
1498 if (!BNX2X_RX_SUM_FIX(cqe))
1499 BNX2X_ERR("STOP on none TCP "
1500 "data\n");
1501
1502 /* This is a size of the linear data
1503 on this skb */
1504 len = le16_to_cpu(cqe->fast_path_cqe.
1505 len_on_bd);
1506 bnx2x_tpa_stop(bp, fp, queue, pad,
1507 len, cqe, comp_ring_cons);
1508#ifdef BNX2X_STOP_ON_ERROR
1509 if (bp->panic)
1510 return -EINVAL;
1511#endif
1512
1513 bnx2x_update_sge_prod(fp,
1514 &cqe->fast_path_cqe);
1515 goto next_cqe;
1516 }
1517 }
1518
a2fbb9ea
ET
1519 pci_dma_sync_single_for_device(bp->pdev,
1520 pci_unmap_addr(rx_buf, mapping),
1521 pad + RX_COPY_THRESH,
1522 PCI_DMA_FROMDEVICE);
1523 prefetch(skb);
1524 prefetch(((char *)(skb)) + 128);
1525
1526 /* is this an error packet? */
34f80b04 1527 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1528 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1529 "ERROR flags %x rx packet %u\n",
1530 cqe_fp_flags, sw_comp_cons);
66e855f3 1531 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1532 goto reuse_rx;
1533 }
1534
1535 /* Since we don't have a jumbo ring
1536 * copy small packets if mtu > 1500
1537 */
1538 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1539 (len <= RX_COPY_THRESH)) {
1540 struct sk_buff *new_skb;
1541
1542 new_skb = netdev_alloc_skb(bp->dev,
1543 len + pad);
1544 if (new_skb == NULL) {
1545 DP(NETIF_MSG_RX_ERR,
34f80b04 1546 "ERROR packet dropped "
a2fbb9ea 1547 "because of alloc failure\n");
66e855f3 1548 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1549 goto reuse_rx;
1550 }
1551
1552 /* aligned copy */
1553 skb_copy_from_linear_data_offset(skb, pad,
1554 new_skb->data + pad, len);
1555 skb_reserve(new_skb, pad);
1556 skb_put(new_skb, len);
1557
1558 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1559
1560 skb = new_skb;
1561
1562 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1563 pci_unmap_single(bp->pdev,
1564 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1565 bp->rx_buf_size,
a2fbb9ea
ET
1566 PCI_DMA_FROMDEVICE);
1567 skb_reserve(skb, pad);
1568 skb_put(skb, len);
1569
1570 } else {
1571 DP(NETIF_MSG_RX_ERR,
34f80b04 1572 "ERROR packet dropped because "
a2fbb9ea 1573 "of alloc failure\n");
66e855f3 1574 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1575reuse_rx:
1576 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1577 goto next_rx;
1578 }
1579
1580 skb->protocol = eth_type_trans(skb, bp->dev);
1581
1582 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1583 if (bp->rx_csum) {
1adcd8be
EG
1584 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1585 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1586 else
1587 bp->eth_stats.hw_csum_err++;
1588 }
a2fbb9ea
ET
1589 }
1590
1591#ifdef BCM_VLAN
0c6671b0 1592 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1593 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1594 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1595 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1596 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1597 else
1598#endif
34f80b04 1599 netif_receive_skb(skb);
a2fbb9ea 1600
a2fbb9ea
ET
1601
1602next_rx:
1603 rx_buf->skb = NULL;
1604
1605 bd_cons = NEXT_RX_IDX(bd_cons);
1606 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1607 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1608 rx_pkt++;
a2fbb9ea
ET
1609next_cqe:
1610 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1611 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1612
34f80b04 1613 if (rx_pkt == budget)
a2fbb9ea
ET
1614 break;
1615 } /* while */
1616
1617 fp->rx_bd_cons = bd_cons;
34f80b04 1618 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1619 fp->rx_comp_cons = sw_comp_cons;
1620 fp->rx_comp_prod = sw_comp_prod;
1621
7a9b2557
VZ
1622 /* Update producers */
1623 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1624 fp->rx_sge_prod);
a2fbb9ea
ET
1625
1626 fp->rx_pkt += rx_pkt;
1627 fp->rx_calls++;
1628
1629 return rx_pkt;
1630}
1631
1632static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1633{
1634 struct bnx2x_fastpath *fp = fp_cookie;
1635 struct bnx2x *bp = fp->bp;
34f80b04 1636 int index = FP_IDX(fp);
a2fbb9ea 1637
da5a662a
VZ
1638 /* Return here if interrupt is disabled */
1639 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1640 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1641 return IRQ_HANDLED;
1642 }
1643
34f80b04
EG
1644 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1645 index, FP_SB_ID(fp));
1646 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1647
1648#ifdef BNX2X_STOP_ON_ERROR
1649 if (unlikely(bp->panic))
1650 return IRQ_HANDLED;
1651#endif
1652
1653 prefetch(fp->rx_cons_sb);
1654 prefetch(fp->tx_cons_sb);
1655 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
288379f0 1658 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1659
a2fbb9ea
ET
1660 return IRQ_HANDLED;
1661}
1662
1663static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1664{
1665 struct net_device *dev = dev_instance;
1666 struct bnx2x *bp = netdev_priv(dev);
1667 u16 status = bnx2x_ack_int(bp);
34f80b04 1668 u16 mask;
a2fbb9ea 1669
34f80b04 1670 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1671 if (unlikely(status == 0)) {
1672 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1673 return IRQ_NONE;
1674 }
34f80b04 1675 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1676
34f80b04 1677 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1678 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1679 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1680 return IRQ_HANDLED;
1681 }
1682
3196a88a
EG
1683#ifdef BNX2X_STOP_ON_ERROR
1684 if (unlikely(bp->panic))
1685 return IRQ_HANDLED;
1686#endif
1687
34f80b04
EG
1688 mask = 0x2 << bp->fp[0].sb_id;
1689 if (status & mask) {
a2fbb9ea
ET
1690 struct bnx2x_fastpath *fp = &bp->fp[0];
1691
1692 prefetch(fp->rx_cons_sb);
1693 prefetch(fp->tx_cons_sb);
1694 prefetch(&fp->status_blk->c_status_block.status_block_index);
1695 prefetch(&fp->status_blk->u_status_block.status_block_index);
1696
288379f0 1697 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1698
34f80b04 1699 status &= ~mask;
a2fbb9ea
ET
1700 }
1701
a2fbb9ea 1702
34f80b04 1703 if (unlikely(status & 0x1)) {
1cf167f2 1704 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1705
1706 status &= ~0x1;
1707 if (!status)
1708 return IRQ_HANDLED;
1709 }
1710
34f80b04
EG
1711 if (status)
1712 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1713 status);
a2fbb9ea 1714
c18487ee 1715 return IRQ_HANDLED;
a2fbb9ea
ET
1716}
1717
c18487ee 1718/* end of fast path */
a2fbb9ea 1719
bb2a0f7a 1720static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1721
c18487ee
YR
1722/* Link */
1723
1724/*
1725 * General service functions
1726 */
a2fbb9ea 1727
4a37fb66 1728static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1729{
1730 u32 lock_status;
1731 u32 resource_bit = (1 << resource);
4a37fb66
YG
1732 int func = BP_FUNC(bp);
1733 u32 hw_lock_control_reg;
c18487ee 1734 int cnt;
a2fbb9ea 1735
c18487ee
YR
1736 /* Validating that the resource is within range */
1737 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1738 DP(NETIF_MSG_HW,
1739 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1740 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1741 return -EINVAL;
1742 }
a2fbb9ea 1743
4a37fb66
YG
1744 if (func <= 5) {
1745 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1746 } else {
1747 hw_lock_control_reg =
1748 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1749 }
1750
c18487ee 1751 /* Validating that the resource is not already taken */
4a37fb66 1752 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1753 if (lock_status & resource_bit) {
1754 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1755 lock_status, resource_bit);
1756 return -EEXIST;
1757 }
a2fbb9ea 1758
46230476
EG
1759 /* Try for 5 second every 5ms */
1760 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1761 /* Try to acquire the lock */
4a37fb66
YG
1762 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1763 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1764 if (lock_status & resource_bit)
1765 return 0;
a2fbb9ea 1766
c18487ee 1767 msleep(5);
a2fbb9ea 1768 }
c18487ee
YR
1769 DP(NETIF_MSG_HW, "Timeout\n");
1770 return -EAGAIN;
1771}
a2fbb9ea 1772
4a37fb66 1773static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1774{
1775 u32 lock_status;
1776 u32 resource_bit = (1 << resource);
4a37fb66
YG
1777 int func = BP_FUNC(bp);
1778 u32 hw_lock_control_reg;
a2fbb9ea 1779
c18487ee
YR
1780 /* Validating that the resource is within range */
1781 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1782 DP(NETIF_MSG_HW,
1783 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1784 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1785 return -EINVAL;
1786 }
1787
4a37fb66
YG
1788 if (func <= 5) {
1789 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1790 } else {
1791 hw_lock_control_reg =
1792 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1793 }
1794
c18487ee 1795 /* Validating that the resource is currently taken */
4a37fb66 1796 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1797 if (!(lock_status & resource_bit)) {
1798 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1799 lock_status, resource_bit);
1800 return -EFAULT;
a2fbb9ea
ET
1801 }
1802
4a37fb66 1803 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1804 return 0;
1805}
1806
1807/* HW Lock for shared dual port PHYs */
4a37fb66 1808static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1809{
1810 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1811
34f80b04 1812 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1813
c18487ee
YR
1814 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1815 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1816 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1817}
a2fbb9ea 1818
4a37fb66 1819static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1820{
1821 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1822
c18487ee
YR
1823 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1824 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1825 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1826
34f80b04 1827 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1828}
a2fbb9ea 1829
17de50b7 1830int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1831{
1832 /* The GPIO should be swapped if swap register is set and active */
1833 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1834 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1835 int gpio_shift = gpio_num +
1836 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1837 u32 gpio_mask = (1 << gpio_shift);
1838 u32 gpio_reg;
a2fbb9ea 1839
c18487ee
YR
1840 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1841 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1842 return -EINVAL;
1843 }
a2fbb9ea 1844
4a37fb66 1845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1846 /* read GPIO and mask except the float bits */
1847 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1848
c18487ee
YR
1849 switch (mode) {
1850 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1851 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1852 gpio_num, gpio_shift);
1853 /* clear FLOAT and set CLR */
1854 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1855 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1856 break;
a2fbb9ea 1857
c18487ee
YR
1858 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1859 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1860 gpio_num, gpio_shift);
1861 /* clear FLOAT and set SET */
1862 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1863 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1864 break;
a2fbb9ea 1865
17de50b7 1866 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1867 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1868 gpio_num, gpio_shift);
1869 /* set FLOAT */
1870 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1871 break;
a2fbb9ea 1872
c18487ee
YR
1873 default:
1874 break;
a2fbb9ea
ET
1875 }
1876
c18487ee 1877 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1878 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1879
c18487ee 1880 return 0;
a2fbb9ea
ET
1881}
1882
c18487ee 1883static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1884{
c18487ee
YR
1885 u32 spio_mask = (1 << spio_num);
1886 u32 spio_reg;
a2fbb9ea 1887
c18487ee
YR
1888 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1889 (spio_num > MISC_REGISTERS_SPIO_7)) {
1890 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1891 return -EINVAL;
a2fbb9ea
ET
1892 }
1893
4a37fb66 1894 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1895 /* read SPIO and mask except the float bits */
1896 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1897
c18487ee 1898 switch (mode) {
6378c025 1899 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1900 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1901 /* clear FLOAT and set CLR */
1902 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1903 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1904 break;
a2fbb9ea 1905
6378c025 1906 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1907 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1908 /* clear FLOAT and set SET */
1909 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1910 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1911 break;
a2fbb9ea 1912
c18487ee
YR
1913 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1914 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1915 /* set FLOAT */
1916 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1917 break;
a2fbb9ea 1918
c18487ee
YR
1919 default:
1920 break;
a2fbb9ea
ET
1921 }
1922
c18487ee 1923 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1924 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1925
a2fbb9ea
ET
1926 return 0;
1927}
1928
c18487ee 1929static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1930{
ad33ea3a
EG
1931 switch (bp->link_vars.ieee_fc &
1932 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1933 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1934 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1935 ADVERTISED_Pause);
1936 break;
1937 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1938 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1939 ADVERTISED_Pause);
1940 break;
1941 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1942 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1943 break;
1944 default:
34f80b04 1945 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1946 ADVERTISED_Pause);
1947 break;
1948 }
1949}
f1410647 1950
c18487ee
YR
1951static void bnx2x_link_report(struct bnx2x *bp)
1952{
1953 if (bp->link_vars.link_up) {
1954 if (bp->state == BNX2X_STATE_OPEN)
1955 netif_carrier_on(bp->dev);
1956 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1957
c18487ee 1958 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1959
c18487ee
YR
1960 if (bp->link_vars.duplex == DUPLEX_FULL)
1961 printk("full duplex");
1962 else
1963 printk("half duplex");
f1410647 1964
c0700f90
DM
1965 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1966 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1967 printk(", receive ");
c0700f90 1968 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1969 printk("& transmit ");
1970 } else {
1971 printk(", transmit ");
1972 }
1973 printk("flow control ON");
1974 }
1975 printk("\n");
f1410647 1976
c18487ee
YR
1977 } else { /* link_down */
1978 netif_carrier_off(bp->dev);
1979 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1980 }
c18487ee
YR
1981}
1982
1983static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1984{
19680c48
EG
1985 if (!BP_NOMCP(bp)) {
1986 u8 rc;
a2fbb9ea 1987
19680c48 1988 /* Initialize link parameters structure variables */
8c99e7b0
YR
1989 /* It is recommended to turn off RX FC for jumbo frames
1990 for better performance */
1991 if (IS_E1HMF(bp))
c0700f90 1992 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 1993 else if (bp->dev->mtu > 5000)
c0700f90 1994 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1995 else
c0700f90 1996 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1997
4a37fb66 1998 bnx2x_acquire_phy_lock(bp);
19680c48 1999 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2000 bnx2x_release_phy_lock(bp);
a2fbb9ea 2001
3c96c68b
EG
2002 bnx2x_calc_fc_adv(bp);
2003
19680c48
EG
2004 if (bp->link_vars.link_up)
2005 bnx2x_link_report(bp);
a2fbb9ea 2006
34f80b04 2007
19680c48
EG
2008 return rc;
2009 }
2010 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2011 return -EINVAL;
a2fbb9ea
ET
2012}
2013
c18487ee 2014static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2015{
19680c48 2016 if (!BP_NOMCP(bp)) {
4a37fb66 2017 bnx2x_acquire_phy_lock(bp);
19680c48 2018 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2019 bnx2x_release_phy_lock(bp);
a2fbb9ea 2020
19680c48
EG
2021 bnx2x_calc_fc_adv(bp);
2022 } else
2023 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2024}
a2fbb9ea 2025
c18487ee
YR
2026static void bnx2x__link_reset(struct bnx2x *bp)
2027{
19680c48 2028 if (!BP_NOMCP(bp)) {
4a37fb66 2029 bnx2x_acquire_phy_lock(bp);
19680c48 2030 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2031 bnx2x_release_phy_lock(bp);
19680c48
EG
2032 } else
2033 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2034}
a2fbb9ea 2035
c18487ee
YR
2036static u8 bnx2x_link_test(struct bnx2x *bp)
2037{
2038 u8 rc;
a2fbb9ea 2039
4a37fb66 2040 bnx2x_acquire_phy_lock(bp);
c18487ee 2041 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2042 bnx2x_release_phy_lock(bp);
a2fbb9ea 2043
c18487ee
YR
2044 return rc;
2045}
a2fbb9ea 2046
34f80b04
EG
2047/* Calculates the sum of vn_min_rates.
2048 It's needed for further normalizing of the min_rates.
2049
2050 Returns:
2051 sum of vn_min_rates
2052 or
2053 0 - if all the min_rates are 0.
33471629 2054 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2055 If not all min_rates are zero then those that are zeroes will
2056 be set to 1.
2057 */
2058static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2059{
2060 int i, port = BP_PORT(bp);
2061 u32 wsum = 0;
2062 int all_zero = 1;
2063
2064 for (i = 0; i < E1HVN_MAX; i++) {
2065 u32 vn_cfg =
2066 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2067 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2068 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2069 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2070 /* If min rate is zero - set it to 1 */
2071 if (!vn_min_rate)
2072 vn_min_rate = DEF_MIN_RATE;
2073 else
2074 all_zero = 0;
2075
2076 wsum += vn_min_rate;
2077 }
2078 }
2079
2080 /* ... only if all min rates are zeros - disable FAIRNESS */
2081 if (all_zero)
2082 return 0;
2083
2084 return wsum;
2085}
2086
2087static void bnx2x_init_port_minmax(struct bnx2x *bp,
2088 int en_fness,
2089 u16 port_rate,
2090 struct cmng_struct_per_port *m_cmng_port)
2091{
2092 u32 r_param = port_rate / 8;
2093 int port = BP_PORT(bp);
2094 int i;
2095
2096 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2097
2098 /* Enable minmax only if we are in e1hmf mode */
2099 if (IS_E1HMF(bp)) {
2100 u32 fair_periodic_timeout_usec;
2101 u32 t_fair;
2102
2103 /* Enable rate shaping and fairness */
2104 m_cmng_port->flags.cmng_vn_enable = 1;
2105 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2106 m_cmng_port->flags.rate_shaping_enable = 1;
2107
2108 if (!en_fness)
2109 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2110 " fairness will be disabled\n");
2111
2112 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2113 m_cmng_port->rs_vars.rs_periodic_timeout =
2114 RS_PERIODIC_TIMEOUT_USEC / 4;
2115
2116 /* this is the threshold below which no timer arming will occur
2117 1.25 coefficient is for the threshold to be a little bigger
2118 than the real time, to compensate for timer in-accuracy */
2119 m_cmng_port->rs_vars.rs_threshold =
2120 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2121
2122 /* resolution of fairness timer */
2123 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2124 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2125 t_fair = T_FAIR_COEF / port_rate;
2126
2127 /* this is the threshold below which we won't arm
2128 the timer anymore */
2129 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2130
2131 /* we multiply by 1e3/8 to get bytes/msec.
2132 We don't want the credits to pass a credit
2133 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2134 m_cmng_port->fair_vars.upper_bound =
2135 r_param * t_fair * FAIR_MEM;
2136 /* since each tick is 4 usec */
2137 m_cmng_port->fair_vars.fairness_timeout =
2138 fair_periodic_timeout_usec / 4;
2139
2140 } else {
2141 /* Disable rate shaping and fairness */
2142 m_cmng_port->flags.cmng_vn_enable = 0;
2143 m_cmng_port->flags.fairness_enable = 0;
2144 m_cmng_port->flags.rate_shaping_enable = 0;
2145
2146 DP(NETIF_MSG_IFUP,
2147 "Single function mode minmax will be disabled\n");
2148 }
2149
2150 /* Store it to internal memory */
2151 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2152 REG_WR(bp, BAR_XSTRORM_INTMEM +
2153 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2154 ((u32 *)(m_cmng_port))[i]);
2155}
2156
2157static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2158 u32 wsum, u16 port_rate,
2159 struct cmng_struct_per_port *m_cmng_port)
2160{
2161 struct rate_shaping_vars_per_vn m_rs_vn;
2162 struct fairness_vars_per_vn m_fair_vn;
2163 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2164 u16 vn_min_rate, vn_max_rate;
2165 int i;
2166
2167 /* If function is hidden - set min and max to zeroes */
2168 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2169 vn_min_rate = 0;
2170 vn_max_rate = 0;
2171
2172 } else {
2173 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2174 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2175 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2176 if current min rate is zero - set it to 1.
33471629 2177 This is a requirement of the algorithm. */
34f80b04
EG
2178 if ((vn_min_rate == 0) && wsum)
2179 vn_min_rate = DEF_MIN_RATE;
2180 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2181 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2182 }
2183
2184 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2185 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2186
2187 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2188 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2189
2190 /* global vn counter - maximal Mbps for this vn */
2191 m_rs_vn.vn_counter.rate = vn_max_rate;
2192
2193 /* quota - number of bytes transmitted in this period */
2194 m_rs_vn.vn_counter.quota =
2195 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2196
2197#ifdef BNX2X_PER_PROT_QOS
2198 /* per protocol counter */
2199 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2200 /* maximal Mbps for this protocol */
2201 m_rs_vn.protocol_counters[protocol].rate =
2202 protocol_max_rate[protocol];
2203 /* the quota in each timer period -
2204 number of bytes transmitted in this period */
2205 m_rs_vn.protocol_counters[protocol].quota =
2206 (u32)(rs_periodic_timeout_usec *
2207 ((double)m_rs_vn.
2208 protocol_counters[protocol].rate/8));
2209 }
2210#endif
2211
2212 if (wsum) {
2213 /* credit for each period of the fairness algorithm:
2214 number of bytes in T_FAIR (the vn share the port rate).
2215 wsum should not be larger than 10000, thus
2216 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2217 m_fair_vn.vn_credit_delta =
2218 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2219 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2220 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2221 m_fair_vn.vn_credit_delta);
2222 }
2223
2224#ifdef BNX2X_PER_PROT_QOS
2225 do {
2226 u32 protocolWeightSum = 0;
2227
2228 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2229 protocolWeightSum +=
2230 drvInit.protocol_min_rate[protocol];
2231 /* per protocol counter -
2232 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2233 if (protocolWeightSum > 0) {
2234 for (protocol = 0;
2235 protocol < NUM_OF_PROTOCOLS; protocol++)
2236 /* credit for each period of the
2237 fairness algorithm - number of bytes in
2238 T_FAIR (the protocol share the vn rate) */
2239 m_fair_vn.protocol_credit_delta[protocol] =
2240 (u32)((vn_min_rate / 8) * t_fair *
2241 protocol_min_rate / protocolWeightSum);
2242 }
2243 } while (0);
2244#endif
2245
2246 /* Store it to internal memory */
2247 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2248 REG_WR(bp, BAR_XSTRORM_INTMEM +
2249 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2250 ((u32 *)(&m_rs_vn))[i]);
2251
2252 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2253 REG_WR(bp, BAR_XSTRORM_INTMEM +
2254 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2255 ((u32 *)(&m_fair_vn))[i]);
2256}
2257
c18487ee
YR
2258/* This function is called upon link interrupt */
2259static void bnx2x_link_attn(struct bnx2x *bp)
2260{
34f80b04
EG
2261 int vn;
2262
bb2a0f7a
YG
2263 /* Make sure that we are synced with the current statistics */
2264 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2265
c18487ee 2266 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2267
bb2a0f7a
YG
2268 if (bp->link_vars.link_up) {
2269
2270 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2271 struct host_port_stats *pstats;
2272
2273 pstats = bnx2x_sp(bp, port_stats);
2274 /* reset old bmac stats */
2275 memset(&(pstats->mac_stx[0]), 0,
2276 sizeof(struct mac_stx));
2277 }
2278 if ((bp->state == BNX2X_STATE_OPEN) ||
2279 (bp->state == BNX2X_STATE_DISABLED))
2280 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2281 }
2282
c18487ee
YR
2283 /* indicate link status */
2284 bnx2x_link_report(bp);
34f80b04
EG
2285
2286 if (IS_E1HMF(bp)) {
2287 int func;
2288
2289 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2290 if (vn == BP_E1HVN(bp))
2291 continue;
2292
2293 func = ((vn << 1) | BP_PORT(bp));
2294
2295 /* Set the attention towards other drivers
2296 on the same port */
2297 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2298 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2299 }
2300 }
2301
2302 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2303 struct cmng_struct_per_port m_cmng_port;
2304 u32 wsum;
2305 int port = BP_PORT(bp);
2306
2307 /* Init RATE SHAPING and FAIRNESS contexts */
2308 wsum = bnx2x_calc_vn_wsum(bp);
2309 bnx2x_init_port_minmax(bp, (int)wsum,
2310 bp->link_vars.line_speed,
2311 &m_cmng_port);
2312 if (IS_E1HMF(bp))
2313 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2314 bnx2x_init_vn_minmax(bp, 2*vn + port,
2315 wsum, bp->link_vars.line_speed,
2316 &m_cmng_port);
2317 }
c18487ee 2318}
a2fbb9ea 2319
c18487ee
YR
2320static void bnx2x__link_status_update(struct bnx2x *bp)
2321{
2322 if (bp->state != BNX2X_STATE_OPEN)
2323 return;
a2fbb9ea 2324
c18487ee 2325 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2326
bb2a0f7a
YG
2327 if (bp->link_vars.link_up)
2328 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2329 else
2330 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2331
c18487ee
YR
2332 /* indicate link status */
2333 bnx2x_link_report(bp);
a2fbb9ea 2334}
a2fbb9ea 2335
34f80b04
EG
2336static void bnx2x_pmf_update(struct bnx2x *bp)
2337{
2338 int port = BP_PORT(bp);
2339 u32 val;
2340
2341 bp->port.pmf = 1;
2342 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2343
2344 /* enable nig attention */
2345 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2346 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2347 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2348
2349 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2350}
2351
c18487ee 2352/* end of Link */
a2fbb9ea
ET
2353
2354/* slow path */
2355
2356/*
2357 * General service functions
2358 */
2359
2360/* the slow path queue is odd since completions arrive on the fastpath ring */
2361static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2362 u32 data_hi, u32 data_lo, int common)
2363{
34f80b04 2364 int func = BP_FUNC(bp);
a2fbb9ea 2365
34f80b04
EG
2366 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2367 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2368 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2369 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2370 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2371
2372#ifdef BNX2X_STOP_ON_ERROR
2373 if (unlikely(bp->panic))
2374 return -EIO;
2375#endif
2376
34f80b04 2377 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2378
2379 if (!bp->spq_left) {
2380 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2381 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2382 bnx2x_panic();
2383 return -EBUSY;
2384 }
f1410647 2385
a2fbb9ea
ET
2386 /* CID needs port number to be encoded int it */
2387 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2388 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2389 HW_CID(bp, cid)));
2390 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2391 if (common)
2392 bp->spq_prod_bd->hdr.type |=
2393 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2394
2395 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2396 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2397
2398 bp->spq_left--;
2399
2400 if (bp->spq_prod_bd == bp->spq_last_bd) {
2401 bp->spq_prod_bd = bp->spq;
2402 bp->spq_prod_idx = 0;
2403 DP(NETIF_MSG_TIMER, "end of spq\n");
2404
2405 } else {
2406 bp->spq_prod_bd++;
2407 bp->spq_prod_idx++;
2408 }
2409
34f80b04 2410 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2411 bp->spq_prod_idx);
2412
34f80b04 2413 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2414 return 0;
2415}
2416
2417/* acquire split MCP access lock register */
4a37fb66 2418static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2419{
a2fbb9ea 2420 u32 i, j, val;
34f80b04 2421 int rc = 0;
a2fbb9ea
ET
2422
2423 might_sleep();
2424 i = 100;
2425 for (j = 0; j < i*10; j++) {
2426 val = (1UL << 31);
2427 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2428 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2429 if (val & (1L << 31))
2430 break;
2431
2432 msleep(5);
2433 }
a2fbb9ea 2434 if (!(val & (1L << 31))) {
19680c48 2435 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2436 rc = -EBUSY;
2437 }
2438
2439 return rc;
2440}
2441
4a37fb66
YG
2442/* release split MCP access lock register */
2443static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2444{
2445 u32 val = 0;
2446
2447 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2448}
2449
2450static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2451{
2452 struct host_def_status_block *def_sb = bp->def_status_blk;
2453 u16 rc = 0;
2454
2455 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2456 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2457 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2458 rc |= 1;
2459 }
2460 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2461 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2462 rc |= 2;
2463 }
2464 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2465 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2466 rc |= 4;
2467 }
2468 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2469 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2470 rc |= 8;
2471 }
2472 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2473 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2474 rc |= 16;
2475 }
2476 return rc;
2477}
2478
2479/*
2480 * slow path service functions
2481 */
2482
2483static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2484{
34f80b04 2485 int port = BP_PORT(bp);
5c862848
EG
2486 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2487 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2488 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2489 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2490 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2491 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2492 u32 aeu_mask;
a2fbb9ea 2493
a2fbb9ea
ET
2494 if (bp->attn_state & asserted)
2495 BNX2X_ERR("IGU ERROR\n");
2496
3fcaf2e5
EG
2497 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2498 aeu_mask = REG_RD(bp, aeu_addr);
2499
a2fbb9ea 2500 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2501 aeu_mask, asserted);
2502 aeu_mask &= ~(asserted & 0xff);
2503 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2504
3fcaf2e5
EG
2505 REG_WR(bp, aeu_addr, aeu_mask);
2506 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2507
3fcaf2e5 2508 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2509 bp->attn_state |= asserted;
3fcaf2e5 2510 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2511
2512 if (asserted & ATTN_HARD_WIRED_MASK) {
2513 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2514
a5e9a7cf
EG
2515 bnx2x_acquire_phy_lock(bp);
2516
877e9aa4
ET
2517 /* save nig interrupt mask */
2518 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2519 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2520
c18487ee 2521 bnx2x_link_attn(bp);
a2fbb9ea
ET
2522
2523 /* handle unicore attn? */
2524 }
2525 if (asserted & ATTN_SW_TIMER_4_FUNC)
2526 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2527
2528 if (asserted & GPIO_2_FUNC)
2529 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2530
2531 if (asserted & GPIO_3_FUNC)
2532 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2533
2534 if (asserted & GPIO_4_FUNC)
2535 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2536
2537 if (port == 0) {
2538 if (asserted & ATTN_GENERAL_ATTN_1) {
2539 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2540 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2541 }
2542 if (asserted & ATTN_GENERAL_ATTN_2) {
2543 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2544 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2545 }
2546 if (asserted & ATTN_GENERAL_ATTN_3) {
2547 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2548 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2549 }
2550 } else {
2551 if (asserted & ATTN_GENERAL_ATTN_4) {
2552 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2553 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2554 }
2555 if (asserted & ATTN_GENERAL_ATTN_5) {
2556 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2557 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2558 }
2559 if (asserted & ATTN_GENERAL_ATTN_6) {
2560 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2561 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2562 }
2563 }
2564
2565 } /* if hardwired */
2566
5c862848
EG
2567 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2568 asserted, hc_addr);
2569 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2570
2571 /* now set back the mask */
a5e9a7cf 2572 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2573 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2574 bnx2x_release_phy_lock(bp);
2575 }
a2fbb9ea
ET
2576}
2577
877e9aa4 2578static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2579{
34f80b04 2580 int port = BP_PORT(bp);
877e9aa4
ET
2581 int reg_offset;
2582 u32 val;
2583
34f80b04
EG
2584 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2585 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2586
34f80b04 2587 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2588
2589 val = REG_RD(bp, reg_offset);
2590 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2591 REG_WR(bp, reg_offset, val);
2592
2593 BNX2X_ERR("SPIO5 hw attention\n");
2594
34f80b04 2595 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2596 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2597 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2598 /* Fan failure attention */
2599
17de50b7 2600 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2601 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2602 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2603 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2604 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2605 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2606 /* mark the failure */
c18487ee 2607 bp->link_params.ext_phy_config &=
877e9aa4 2608 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2609 bp->link_params.ext_phy_config |=
877e9aa4
ET
2610 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2611 SHMEM_WR(bp,
2612 dev_info.port_hw_config[port].
2613 external_phy_config,
c18487ee 2614 bp->link_params.ext_phy_config);
877e9aa4
ET
2615 /* log the failure */
2616 printk(KERN_ERR PFX "Fan Failure on Network"
2617 " Controller %s has caused the driver to"
2618 " shutdown the card to prevent permanent"
2619 " damage. Please contact Dell Support for"
2620 " assistance\n", bp->dev->name);
2621 break;
2622
2623 default:
2624 break;
2625 }
2626 }
34f80b04
EG
2627
2628 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2629
2630 val = REG_RD(bp, reg_offset);
2631 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2632 REG_WR(bp, reg_offset, val);
2633
2634 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2635 (attn & HW_INTERRUT_ASSERT_SET_0));
2636 bnx2x_panic();
2637 }
877e9aa4
ET
2638}
2639
2640static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2641{
2642 u32 val;
2643
2644 if (attn & BNX2X_DOORQ_ASSERT) {
2645
2646 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2647 BNX2X_ERR("DB hw attention 0x%x\n", val);
2648 /* DORQ discard attention */
2649 if (val & 0x2)
2650 BNX2X_ERR("FATAL error from DORQ\n");
2651 }
34f80b04
EG
2652
2653 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2654
2655 int port = BP_PORT(bp);
2656 int reg_offset;
2657
2658 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2659 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2660
2661 val = REG_RD(bp, reg_offset);
2662 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2663 REG_WR(bp, reg_offset, val);
2664
2665 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2666 (attn & HW_INTERRUT_ASSERT_SET_1));
2667 bnx2x_panic();
2668 }
877e9aa4
ET
2669}
2670
2671static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2672{
2673 u32 val;
2674
2675 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2676
2677 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2678 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2679 /* CFC error attention */
2680 if (val & 0x2)
2681 BNX2X_ERR("FATAL error from CFC\n");
2682 }
2683
2684 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2685
2686 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2687 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2688 /* RQ_USDMDP_FIFO_OVERFLOW */
2689 if (val & 0x18000)
2690 BNX2X_ERR("FATAL error from PXP\n");
2691 }
34f80b04
EG
2692
2693 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2694
2695 int port = BP_PORT(bp);
2696 int reg_offset;
2697
2698 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2699 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2700
2701 val = REG_RD(bp, reg_offset);
2702 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2703 REG_WR(bp, reg_offset, val);
2704
2705 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2706 (attn & HW_INTERRUT_ASSERT_SET_2));
2707 bnx2x_panic();
2708 }
877e9aa4
ET
2709}
2710
2711static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2712{
34f80b04
EG
2713 u32 val;
2714
877e9aa4
ET
2715 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2716
34f80b04
EG
2717 if (attn & BNX2X_PMF_LINK_ASSERT) {
2718 int func = BP_FUNC(bp);
2719
2720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2721 bnx2x__link_status_update(bp);
2722 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2723 DRV_STATUS_PMF)
2724 bnx2x_pmf_update(bp);
2725
2726 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2727
2728 BNX2X_ERR("MC assert!\n");
2729 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2731 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2732 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2733 bnx2x_panic();
2734
2735 } else if (attn & BNX2X_MCP_ASSERT) {
2736
2737 BNX2X_ERR("MCP assert!\n");
2738 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2739 bnx2x_fw_dump(bp);
877e9aa4
ET
2740
2741 } else
2742 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2743 }
2744
2745 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2746 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2747 if (attn & BNX2X_GRC_TIMEOUT) {
2748 val = CHIP_IS_E1H(bp) ?
2749 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2750 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2751 }
2752 if (attn & BNX2X_GRC_RSV) {
2753 val = CHIP_IS_E1H(bp) ?
2754 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2755 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2756 }
877e9aa4 2757 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2758 }
2759}
2760
2761static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2762{
a2fbb9ea
ET
2763 struct attn_route attn;
2764 struct attn_route group_mask;
34f80b04 2765 int port = BP_PORT(bp);
877e9aa4 2766 int index;
a2fbb9ea
ET
2767 u32 reg_addr;
2768 u32 val;
3fcaf2e5 2769 u32 aeu_mask;
a2fbb9ea
ET
2770
2771 /* need to take HW lock because MCP or other port might also
2772 try to handle this event */
4a37fb66 2773 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2774
2775 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2776 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2777 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2778 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2779 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2780 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2781
2782 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2783 if (deasserted & (1 << index)) {
2784 group_mask = bp->attn_group[index];
2785
34f80b04
EG
2786 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2787 index, group_mask.sig[0], group_mask.sig[1],
2788 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2789
877e9aa4
ET
2790 bnx2x_attn_int_deasserted3(bp,
2791 attn.sig[3] & group_mask.sig[3]);
2792 bnx2x_attn_int_deasserted1(bp,
2793 attn.sig[1] & group_mask.sig[1]);
2794 bnx2x_attn_int_deasserted2(bp,
2795 attn.sig[2] & group_mask.sig[2]);
2796 bnx2x_attn_int_deasserted0(bp,
2797 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2798
a2fbb9ea
ET
2799 if ((attn.sig[0] & group_mask.sig[0] &
2800 HW_PRTY_ASSERT_SET_0) ||
2801 (attn.sig[1] & group_mask.sig[1] &
2802 HW_PRTY_ASSERT_SET_1) ||
2803 (attn.sig[2] & group_mask.sig[2] &
2804 HW_PRTY_ASSERT_SET_2))
6378c025 2805 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2806 }
2807 }
2808
4a37fb66 2809 bnx2x_release_alr(bp);
a2fbb9ea 2810
5c862848 2811 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2812
2813 val = ~deasserted;
3fcaf2e5
EG
2814 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2815 val, reg_addr);
5c862848 2816 REG_WR(bp, reg_addr, val);
a2fbb9ea 2817
a2fbb9ea 2818 if (~bp->attn_state & deasserted)
3fcaf2e5 2819 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2820
2821 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2822 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2823
3fcaf2e5
EG
2824 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2825 aeu_mask = REG_RD(bp, reg_addr);
2826
2827 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2828 aeu_mask, deasserted);
2829 aeu_mask |= (deasserted & 0xff);
2830 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2831
3fcaf2e5
EG
2832 REG_WR(bp, reg_addr, aeu_mask);
2833 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2834
2835 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2836 bp->attn_state &= ~deasserted;
2837 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2838}
2839
2840static void bnx2x_attn_int(struct bnx2x *bp)
2841{
2842 /* read local copy of bits */
68d59484
EG
2843 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2844 attn_bits);
2845 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2846 attn_bits_ack);
a2fbb9ea
ET
2847 u32 attn_state = bp->attn_state;
2848
2849 /* look for changed bits */
2850 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2851 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2852
2853 DP(NETIF_MSG_HW,
2854 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2855 attn_bits, attn_ack, asserted, deasserted);
2856
2857 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2858 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2859
2860 /* handle bits that were raised */
2861 if (asserted)
2862 bnx2x_attn_int_asserted(bp, asserted);
2863
2864 if (deasserted)
2865 bnx2x_attn_int_deasserted(bp, deasserted);
2866}
2867
2868static void bnx2x_sp_task(struct work_struct *work)
2869{
1cf167f2 2870 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2871 u16 status;
2872
34f80b04 2873
a2fbb9ea
ET
2874 /* Return here if interrupt is disabled */
2875 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2876 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2877 return;
2878 }
2879
2880 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2881/* if (status == 0) */
2882/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2883
3196a88a 2884 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2885
877e9aa4
ET
2886 /* HW attentions */
2887 if (status & 0x1)
a2fbb9ea 2888 bnx2x_attn_int(bp);
a2fbb9ea 2889
bb2a0f7a
YG
2890 /* CStorm events: query_stats, port delete ramrod */
2891 if (status & 0x2)
2892 bp->stats_pending = 0;
2893
68d59484 2894 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2895 IGU_INT_NOP, 1);
2896 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2897 IGU_INT_NOP, 1);
2898 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2899 IGU_INT_NOP, 1);
2900 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2901 IGU_INT_NOP, 1);
2902 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2903 IGU_INT_ENABLE, 1);
877e9aa4 2904
a2fbb9ea
ET
2905}
2906
2907static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2908{
2909 struct net_device *dev = dev_instance;
2910 struct bnx2x *bp = netdev_priv(dev);
2911
2912 /* Return here if interrupt is disabled */
2913 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2914 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2915 return IRQ_HANDLED;
2916 }
2917
8d9c5f34 2918 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2919
2920#ifdef BNX2X_STOP_ON_ERROR
2921 if (unlikely(bp->panic))
2922 return IRQ_HANDLED;
2923#endif
2924
1cf167f2 2925 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2926
2927 return IRQ_HANDLED;
2928}
2929
2930/* end of slow path */
2931
2932/* Statistics */
2933
2934/****************************************************************************
2935* Macros
2936****************************************************************************/
2937
a2fbb9ea
ET
2938/* sum[hi:lo] += add[hi:lo] */
2939#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2940 do { \
2941 s_lo += a_lo; \
f5ba6772 2942 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2943 } while (0)
2944
2945/* difference = minuend - subtrahend */
2946#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2947 do { \
bb2a0f7a
YG
2948 if (m_lo < s_lo) { \
2949 /* underflow */ \
a2fbb9ea 2950 d_hi = m_hi - s_hi; \
bb2a0f7a 2951 if (d_hi > 0) { \
6378c025 2952 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2953 d_hi--; \
2954 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2955 } else { \
6378c025 2956 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2957 d_hi = 0; \
2958 d_lo = 0; \
2959 } \
bb2a0f7a
YG
2960 } else { \
2961 /* m_lo >= s_lo */ \
a2fbb9ea 2962 if (m_hi < s_hi) { \
bb2a0f7a
YG
2963 d_hi = 0; \
2964 d_lo = 0; \
2965 } else { \
6378c025 2966 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2967 d_hi = m_hi - s_hi; \
2968 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2969 } \
2970 } \
2971 } while (0)
2972
bb2a0f7a 2973#define UPDATE_STAT64(s, t) \
a2fbb9ea 2974 do { \
bb2a0f7a
YG
2975 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2976 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2977 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2978 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2979 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2980 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2981 } while (0)
2982
bb2a0f7a 2983#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2984 do { \
bb2a0f7a
YG
2985 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2986 diff.lo, new->s##_lo, old->s##_lo); \
2987 ADD_64(estats->t##_hi, diff.hi, \
2988 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2989 } while (0)
2990
2991/* sum[hi:lo] += add */
2992#define ADD_EXTEND_64(s_hi, s_lo, a) \
2993 do { \
2994 s_lo += a; \
2995 s_hi += (s_lo < a) ? 1 : 0; \
2996 } while (0)
2997
bb2a0f7a 2998#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2999 do { \
bb2a0f7a
YG
3000 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3001 pstats->mac_stx[1].s##_lo, \
3002 new->s); \
a2fbb9ea
ET
3003 } while (0)
3004
bb2a0f7a 3005#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
3006 do { \
3007 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3008 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
3009 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3010 } while (0)
3011
3012#define UPDATE_EXTEND_XSTAT(s, t) \
3013 do { \
3014 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3015 old_xclient->s = le32_to_cpu(xclient->s); \
3016 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
3017 } while (0)
3018
3019/*
3020 * General service functions
3021 */
3022
3023static inline long bnx2x_hilo(u32 *hiref)
3024{
3025 u32 lo = *(hiref + 1);
3026#if (BITS_PER_LONG == 64)
3027 u32 hi = *hiref;
3028
3029 return HILO_U64(hi, lo);
3030#else
3031 return lo;
3032#endif
3033}
3034
3035/*
3036 * Init service functions
3037 */
3038
bb2a0f7a
YG
3039static void bnx2x_storm_stats_post(struct bnx2x *bp)
3040{
3041 if (!bp->stats_pending) {
3042 struct eth_query_ramrod_data ramrod_data = {0};
3043 int rc;
3044
3045 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3046 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
bb2a0f7a
YG
3047 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3048
3049 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3050 ((u32 *)&ramrod_data)[1],
3051 ((u32 *)&ramrod_data)[0], 0);
3052 if (rc == 0) {
3053 /* stats ramrod has it's own slot on the spq */
3054 bp->spq_left++;
3055 bp->stats_pending = 1;
3056 }
3057 }
3058}
3059
3060static void bnx2x_stats_init(struct bnx2x *bp)
3061{
3062 int port = BP_PORT(bp);
3063
3064 bp->executer_idx = 0;
3065 bp->stats_counter = 0;
3066
3067 /* port stats */
3068 if (!BP_NOMCP(bp))
3069 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3070 else
3071 bp->port.port_stx = 0;
3072 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3073
3074 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3075 bp->port.old_nig_stats.brb_discard =
3076 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3077 bp->port.old_nig_stats.brb_truncate =
3078 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3079 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3080 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3081 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3082 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3083
3084 /* function stats */
3085 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3086 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3087 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3088 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3089
3090 bp->stats_state = STATS_STATE_DISABLED;
3091 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3092 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3093}
3094
3095static void bnx2x_hw_stats_post(struct bnx2x *bp)
3096{
3097 struct dmae_command *dmae = &bp->stats_dmae;
3098 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3099
3100 *stats_comp = DMAE_COMP_VAL;
3101
3102 /* loader */
3103 if (bp->executer_idx) {
3104 int loader_idx = PMF_DMAE_C(bp);
3105
3106 memset(dmae, 0, sizeof(struct dmae_command));
3107
3108 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3109 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3110 DMAE_CMD_DST_RESET |
3111#ifdef __BIG_ENDIAN
3112 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3113#else
3114 DMAE_CMD_ENDIANITY_DW_SWAP |
3115#endif
3116 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3117 DMAE_CMD_PORT_0) |
3118 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3119 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3120 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3121 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3122 sizeof(struct dmae_command) *
3123 (loader_idx + 1)) >> 2;
3124 dmae->dst_addr_hi = 0;
3125 dmae->len = sizeof(struct dmae_command) >> 2;
3126 if (CHIP_IS_E1(bp))
3127 dmae->len--;
3128 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3129 dmae->comp_addr_hi = 0;
3130 dmae->comp_val = 1;
3131
3132 *stats_comp = 0;
3133 bnx2x_post_dmae(bp, dmae, loader_idx);
3134
3135 } else if (bp->func_stx) {
3136 *stats_comp = 0;
3137 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3138 }
3139}
3140
3141static int bnx2x_stats_comp(struct bnx2x *bp)
3142{
3143 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3144 int cnt = 10;
3145
3146 might_sleep();
3147 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3148 if (!cnt) {
3149 BNX2X_ERR("timeout waiting for stats finished\n");
3150 break;
3151 }
3152 cnt--;
12469401 3153 msleep(1);
bb2a0f7a
YG
3154 }
3155 return 1;
3156}
3157
3158/*
3159 * Statistics service functions
3160 */
3161
3162static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3163{
3164 struct dmae_command *dmae;
3165 u32 opcode;
3166 int loader_idx = PMF_DMAE_C(bp);
3167 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3168
3169 /* sanity */
3170 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3171 BNX2X_ERR("BUG!\n");
3172 return;
3173 }
3174
3175 bp->executer_idx = 0;
3176
3177 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3178 DMAE_CMD_C_ENABLE |
3179 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3180#ifdef __BIG_ENDIAN
3181 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3182#else
3183 DMAE_CMD_ENDIANITY_DW_SWAP |
3184#endif
3185 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3186 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3187
3188 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3189 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3190 dmae->src_addr_lo = bp->port.port_stx >> 2;
3191 dmae->src_addr_hi = 0;
3192 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3193 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3194 dmae->len = DMAE_LEN32_RD_MAX;
3195 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3196 dmae->comp_addr_hi = 0;
3197 dmae->comp_val = 1;
3198
3199 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3200 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3201 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3202 dmae->src_addr_hi = 0;
7a9b2557
VZ
3203 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3204 DMAE_LEN32_RD_MAX * 4);
3205 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3206 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3207 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3208 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3209 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3210 dmae->comp_val = DMAE_COMP_VAL;
3211
3212 *stats_comp = 0;
3213 bnx2x_hw_stats_post(bp);
3214 bnx2x_stats_comp(bp);
3215}
3216
3217static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3218{
3219 struct dmae_command *dmae;
34f80b04 3220 int port = BP_PORT(bp);
bb2a0f7a 3221 int vn = BP_E1HVN(bp);
a2fbb9ea 3222 u32 opcode;
bb2a0f7a 3223 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3224 u32 mac_addr;
bb2a0f7a
YG
3225 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3226
3227 /* sanity */
3228 if (!bp->link_vars.link_up || !bp->port.pmf) {
3229 BNX2X_ERR("BUG!\n");
3230 return;
3231 }
a2fbb9ea
ET
3232
3233 bp->executer_idx = 0;
bb2a0f7a
YG
3234
3235 /* MCP */
3236 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3237 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3238 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3239#ifdef __BIG_ENDIAN
bb2a0f7a 3240 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3241#else
bb2a0f7a 3242 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3243#endif
bb2a0f7a
YG
3244 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3245 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3246
bb2a0f7a 3247 if (bp->port.port_stx) {
a2fbb9ea
ET
3248
3249 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3250 dmae->opcode = opcode;
bb2a0f7a
YG
3251 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3252 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3253 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3254 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3255 dmae->len = sizeof(struct host_port_stats) >> 2;
3256 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3257 dmae->comp_addr_hi = 0;
3258 dmae->comp_val = 1;
a2fbb9ea
ET
3259 }
3260
bb2a0f7a
YG
3261 if (bp->func_stx) {
3262
3263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264 dmae->opcode = opcode;
3265 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3266 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3267 dmae->dst_addr_lo = bp->func_stx >> 2;
3268 dmae->dst_addr_hi = 0;
3269 dmae->len = sizeof(struct host_func_stats) >> 2;
3270 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3271 dmae->comp_addr_hi = 0;
3272 dmae->comp_val = 1;
a2fbb9ea
ET
3273 }
3274
bb2a0f7a 3275 /* MAC */
a2fbb9ea
ET
3276 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3277 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3279#ifdef __BIG_ENDIAN
3280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3281#else
3282 DMAE_CMD_ENDIANITY_DW_SWAP |
3283#endif
bb2a0f7a
YG
3284 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3285 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3286
c18487ee 3287 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3288
3289 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3290 NIG_REG_INGRESS_BMAC0_MEM);
3291
3292 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3293 BIGMAC_REGISTER_TX_STAT_GTBYT */
3294 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3295 dmae->opcode = opcode;
3296 dmae->src_addr_lo = (mac_addr +
3297 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3298 dmae->src_addr_hi = 0;
3299 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3300 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3301 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3302 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3303 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3304 dmae->comp_addr_hi = 0;
3305 dmae->comp_val = 1;
3306
3307 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3308 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3309 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3310 dmae->opcode = opcode;
3311 dmae->src_addr_lo = (mac_addr +
3312 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3313 dmae->src_addr_hi = 0;
3314 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3315 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3316 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3317 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3318 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3319 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3320 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3321 dmae->comp_addr_hi = 0;
3322 dmae->comp_val = 1;
3323
c18487ee 3324 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3325
3326 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3327
3328 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3329 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3330 dmae->opcode = opcode;
3331 dmae->src_addr_lo = (mac_addr +
3332 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3333 dmae->src_addr_hi = 0;
3334 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3335 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3336 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3337 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3338 dmae->comp_addr_hi = 0;
3339 dmae->comp_val = 1;
3340
3341 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3342 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3343 dmae->opcode = opcode;
3344 dmae->src_addr_lo = (mac_addr +
3345 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3346 dmae->src_addr_hi = 0;
3347 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3348 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3349 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3350 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3351 dmae->len = 1;
3352 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3353 dmae->comp_addr_hi = 0;
3354 dmae->comp_val = 1;
3355
3356 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3357 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3358 dmae->opcode = opcode;
3359 dmae->src_addr_lo = (mac_addr +
3360 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3361 dmae->src_addr_hi = 0;
3362 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3363 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3364 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3365 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3366 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3367 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3368 dmae->comp_addr_hi = 0;
3369 dmae->comp_val = 1;
3370 }
3371
3372 /* NIG */
bb2a0f7a
YG
3373 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3374 dmae->opcode = opcode;
3375 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3376 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3377 dmae->src_addr_hi = 0;
3378 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3379 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3380 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3381 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3382 dmae->comp_addr_hi = 0;
3383 dmae->comp_val = 1;
3384
3385 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3386 dmae->opcode = opcode;
3387 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3388 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3389 dmae->src_addr_hi = 0;
3390 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3391 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3392 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3393 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3394 dmae->len = (2*sizeof(u32)) >> 2;
3395 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3396 dmae->comp_addr_hi = 0;
3397 dmae->comp_val = 1;
3398
a2fbb9ea
ET
3399 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3400 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3401 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3402 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3403#ifdef __BIG_ENDIAN
3404 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3405#else
3406 DMAE_CMD_ENDIANITY_DW_SWAP |
3407#endif
bb2a0f7a
YG
3408 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3409 (vn << DMAE_CMD_E1HVN_SHIFT));
3410 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3411 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3412 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3413 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3414 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3415 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3416 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3417 dmae->len = (2*sizeof(u32)) >> 2;
3418 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3419 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3420 dmae->comp_val = DMAE_COMP_VAL;
3421
3422 *stats_comp = 0;
a2fbb9ea
ET
3423}
3424
bb2a0f7a 3425static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3426{
bb2a0f7a
YG
3427 struct dmae_command *dmae = &bp->stats_dmae;
3428 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3429
bb2a0f7a
YG
3430 /* sanity */
3431 if (!bp->func_stx) {
3432 BNX2X_ERR("BUG!\n");
3433 return;
3434 }
a2fbb9ea 3435
bb2a0f7a
YG
3436 bp->executer_idx = 0;
3437 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3438
bb2a0f7a
YG
3439 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3440 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3441 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3442#ifdef __BIG_ENDIAN
3443 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3444#else
3445 DMAE_CMD_ENDIANITY_DW_SWAP |
3446#endif
3447 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3448 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3449 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3450 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3451 dmae->dst_addr_lo = bp->func_stx >> 2;
3452 dmae->dst_addr_hi = 0;
3453 dmae->len = sizeof(struct host_func_stats) >> 2;
3454 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3455 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3456 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3457
bb2a0f7a
YG
3458 *stats_comp = 0;
3459}
a2fbb9ea 3460
bb2a0f7a
YG
3461static void bnx2x_stats_start(struct bnx2x *bp)
3462{
3463 if (bp->port.pmf)
3464 bnx2x_port_stats_init(bp);
3465
3466 else if (bp->func_stx)
3467 bnx2x_func_stats_init(bp);
3468
3469 bnx2x_hw_stats_post(bp);
3470 bnx2x_storm_stats_post(bp);
3471}
3472
3473static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3474{
3475 bnx2x_stats_comp(bp);
3476 bnx2x_stats_pmf_update(bp);
3477 bnx2x_stats_start(bp);
3478}
3479
3480static void bnx2x_stats_restart(struct bnx2x *bp)
3481{
3482 bnx2x_stats_comp(bp);
3483 bnx2x_stats_start(bp);
3484}
3485
3486static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3487{
3488 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3489 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3490 struct regpair diff;
3491
3492 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3493 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3494 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3495 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3496 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3497 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3498 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3499 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3500 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3501 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3502 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3503 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3504 UPDATE_STAT64(tx_stat_gt127,
3505 tx_stat_etherstatspkts65octetsto127octets);
3506 UPDATE_STAT64(tx_stat_gt255,
3507 tx_stat_etherstatspkts128octetsto255octets);
3508 UPDATE_STAT64(tx_stat_gt511,
3509 tx_stat_etherstatspkts256octetsto511octets);
3510 UPDATE_STAT64(tx_stat_gt1023,
3511 tx_stat_etherstatspkts512octetsto1023octets);
3512 UPDATE_STAT64(tx_stat_gt1518,
3513 tx_stat_etherstatspkts1024octetsto1522octets);
3514 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3515 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3516 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3517 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3518 UPDATE_STAT64(tx_stat_gterr,
3519 tx_stat_dot3statsinternalmactransmiterrors);
3520 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3521}
3522
3523static void bnx2x_emac_stats_update(struct bnx2x *bp)
3524{
3525 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3526 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3527
3528 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3529 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3530 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3531 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3532 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3533 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3534 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3535 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3536 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3537 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3538 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3539 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3540 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3541 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3542 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3543 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3544 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3545 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3546 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3547 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3548 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3549 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3550 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3551 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3552 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3553 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3554 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3555 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3556 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3557 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3558 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3559}
3560
3561static int bnx2x_hw_stats_update(struct bnx2x *bp)
3562{
3563 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3564 struct nig_stats *old = &(bp->port.old_nig_stats);
3565 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3566 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3567 struct regpair diff;
3568
3569 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3570 bnx2x_bmac_stats_update(bp);
3571
3572 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3573 bnx2x_emac_stats_update(bp);
3574
3575 else { /* unreached */
3576 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3577 return -1;
3578 }
a2fbb9ea 3579
bb2a0f7a
YG
3580 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3581 new->brb_discard - old->brb_discard);
66e855f3
YG
3582 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3583 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3584
bb2a0f7a
YG
3585 UPDATE_STAT64_NIG(egress_mac_pkt0,
3586 etherstatspkts1024octetsto1522octets);
3587 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3588
bb2a0f7a 3589 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3590
bb2a0f7a
YG
3591 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3592 sizeof(struct mac_stx));
3593 estats->brb_drop_hi = pstats->brb_drop_hi;
3594 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3595
bb2a0f7a 3596 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3597
bb2a0f7a 3598 return 0;
a2fbb9ea
ET
3599}
3600
bb2a0f7a 3601static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3602{
3603 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3604 int cl_id = BP_CL_ID(bp);
3605 struct tstorm_per_port_stats *tport =
3606 &stats->tstorm_common.port_statistics;
a2fbb9ea 3607 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3608 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3609 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3610 struct xstorm_per_client_stats *xclient =
3611 &stats->xstorm_common.client_statistics[cl_id];
3612 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3613 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3614 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3615 u32 diff;
3616
bb2a0f7a
YG
3617 /* are storm stats valid? */
3618 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3619 bp->stats_counter) {
3620 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3621 " tstorm counter (%d) != stats_counter (%d)\n",
3622 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3623 return -1;
3624 }
bb2a0f7a
YG
3625 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3626 bp->stats_counter) {
3627 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3628 " xstorm counter (%d) != stats_counter (%d)\n",
3629 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3630 return -2;
3631 }
a2fbb9ea 3632
bb2a0f7a
YG
3633 fstats->total_bytes_received_hi =
3634 fstats->valid_bytes_received_hi =
a2fbb9ea 3635 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3636 fstats->total_bytes_received_lo =
3637 fstats->valid_bytes_received_lo =
a2fbb9ea 3638 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3639
3640 estats->error_bytes_received_hi =
3641 le32_to_cpu(tclient->rcv_error_bytes.hi);
3642 estats->error_bytes_received_lo =
3643 le32_to_cpu(tclient->rcv_error_bytes.lo);
3644 ADD_64(estats->error_bytes_received_hi,
3645 estats->rx_stat_ifhcinbadoctets_hi,
3646 estats->error_bytes_received_lo,
3647 estats->rx_stat_ifhcinbadoctets_lo);
3648
3649 ADD_64(fstats->total_bytes_received_hi,
3650 estats->error_bytes_received_hi,
3651 fstats->total_bytes_received_lo,
3652 estats->error_bytes_received_lo);
3653
3654 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3655 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3656 total_multicast_packets_received);
a2fbb9ea 3657 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3658 total_broadcast_packets_received);
3659
3660 fstats->total_bytes_transmitted_hi =
3661 le32_to_cpu(xclient->total_sent_bytes.hi);
3662 fstats->total_bytes_transmitted_lo =
3663 le32_to_cpu(xclient->total_sent_bytes.lo);
3664
3665 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3666 total_unicast_packets_transmitted);
3667 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3668 total_multicast_packets_transmitted);
3669 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3670 total_broadcast_packets_transmitted);
3671
3672 memcpy(estats, &(fstats->total_bytes_received_hi),
3673 sizeof(struct host_func_stats) - 2*sizeof(u32));
3674
3675 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3676 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3677 estats->brb_truncate_discard =
3678 le32_to_cpu(tport->brb_truncate_discard);
3679 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3680
3681 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3682 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3683 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3684 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3685 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3686 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3687 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3688 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3689 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3690 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3691 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3692 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3693 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3694
bb2a0f7a
YG
3695 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3696 old_tclient->packets_too_big_discard =
a2fbb9ea 3697 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3698 estats->no_buff_discard =
3699 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3700 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3701
3702 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3703 old_xclient->unicast_bytes_sent.hi =
3704 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3705 old_xclient->unicast_bytes_sent.lo =
3706 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3707 old_xclient->multicast_bytes_sent.hi =
3708 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3709 old_xclient->multicast_bytes_sent.lo =
3710 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3711 old_xclient->broadcast_bytes_sent.hi =
3712 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3713 old_xclient->broadcast_bytes_sent.lo =
3714 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3715
3716 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3717
3718 return 0;
3719}
3720
bb2a0f7a 3721static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3722{
bb2a0f7a
YG
3723 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3724 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3725 struct net_device_stats *nstats = &bp->dev->stats;
3726
3727 nstats->rx_packets =
3728 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3729 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3730 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3731
3732 nstats->tx_packets =
3733 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3734 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3735 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3736
bb2a0f7a 3737 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3738
0e39e645 3739 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3740
bb2a0f7a
YG
3741 nstats->rx_dropped = old_tclient->checksum_discard +
3742 estats->mac_discard;
a2fbb9ea
ET
3743 nstats->tx_dropped = 0;
3744
3745 nstats->multicast =
3746 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3747
bb2a0f7a
YG
3748 nstats->collisions =
3749 estats->tx_stat_dot3statssinglecollisionframes_lo +
3750 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3751 estats->tx_stat_dot3statslatecollisions_lo +
3752 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3753
bb2a0f7a
YG
3754 estats->jabber_packets_received =
3755 old_tclient->packets_too_big_discard +
3756 estats->rx_stat_dot3statsframestoolong_lo;
3757
3758 nstats->rx_length_errors =
3759 estats->rx_stat_etherstatsundersizepkts_lo +
3760 estats->jabber_packets_received;
66e855f3 3761 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3762 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3763 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3764 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3765 nstats->rx_missed_errors = estats->xxoverflow_discard;
3766
3767 nstats->rx_errors = nstats->rx_length_errors +
3768 nstats->rx_over_errors +
3769 nstats->rx_crc_errors +
3770 nstats->rx_frame_errors +
0e39e645
ET
3771 nstats->rx_fifo_errors +
3772 nstats->rx_missed_errors;
a2fbb9ea 3773
bb2a0f7a
YG
3774 nstats->tx_aborted_errors =
3775 estats->tx_stat_dot3statslatecollisions_lo +
3776 estats->tx_stat_dot3statsexcessivecollisions_lo;
3777 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3778 nstats->tx_fifo_errors = 0;
3779 nstats->tx_heartbeat_errors = 0;
3780 nstats->tx_window_errors = 0;
3781
3782 nstats->tx_errors = nstats->tx_aborted_errors +
3783 nstats->tx_carrier_errors;
a2fbb9ea
ET
3784}
3785
bb2a0f7a 3786static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3787{
bb2a0f7a
YG
3788 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3789 int update = 0;
a2fbb9ea 3790
bb2a0f7a
YG
3791 if (*stats_comp != DMAE_COMP_VAL)
3792 return;
3793
3794 if (bp->port.pmf)
3795 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3796
bb2a0f7a 3797 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3798
bb2a0f7a
YG
3799 if (update)
3800 bnx2x_net_stats_update(bp);
a2fbb9ea 3801
bb2a0f7a
YG
3802 else {
3803 if (bp->stats_pending) {
3804 bp->stats_pending++;
3805 if (bp->stats_pending == 3) {
3806 BNX2X_ERR("stats not updated for 3 times\n");
3807 bnx2x_panic();
3808 return;
3809 }
3810 }
a2fbb9ea
ET
3811 }
3812
3813 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3814 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3815 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3816 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3817 int i;
a2fbb9ea
ET
3818
3819 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3820 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3821 " tx pkt (%lx)\n",
3822 bnx2x_tx_avail(bp->fp),
7a9b2557 3823 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3824 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3825 " rx pkt (%lx)\n",
7a9b2557
VZ
3826 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3827 bp->fp->rx_comp_cons),
3828 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3829 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3830 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3831 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3832 printk(KERN_DEBUG "tstats: checksum_discard %u "
3833 "packets_too_big_discard %u no_buff_discard %u "
3834 "mac_discard %u mac_filter_discard %u "
3835 "xxovrflow_discard %u brb_truncate_discard %u "
3836 "ttl0_discard %u\n",
bb2a0f7a
YG
3837 old_tclient->checksum_discard,
3838 old_tclient->packets_too_big_discard,
3839 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3840 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3841 estats->brb_truncate_discard,
3842 old_tclient->ttl0_discard);
a2fbb9ea
ET
3843
3844 for_each_queue(bp, i) {
3845 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3846 bnx2x_fp(bp, i, tx_pkt),
3847 bnx2x_fp(bp, i, rx_pkt),
3848 bnx2x_fp(bp, i, rx_calls));
3849 }
3850 }
3851
bb2a0f7a
YG
3852 bnx2x_hw_stats_post(bp);
3853 bnx2x_storm_stats_post(bp);
3854}
a2fbb9ea 3855
bb2a0f7a
YG
3856static void bnx2x_port_stats_stop(struct bnx2x *bp)
3857{
3858 struct dmae_command *dmae;
3859 u32 opcode;
3860 int loader_idx = PMF_DMAE_C(bp);
3861 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3862
bb2a0f7a 3863 bp->executer_idx = 0;
a2fbb9ea 3864
bb2a0f7a
YG
3865 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3866 DMAE_CMD_C_ENABLE |
3867 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3868#ifdef __BIG_ENDIAN
bb2a0f7a 3869 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3870#else
bb2a0f7a 3871 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3872#endif
bb2a0f7a
YG
3873 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3874 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3875
3876 if (bp->port.port_stx) {
3877
3878 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3879 if (bp->func_stx)
3880 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3881 else
3882 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3883 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3884 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3885 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3886 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3887 dmae->len = sizeof(struct host_port_stats) >> 2;
3888 if (bp->func_stx) {
3889 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3890 dmae->comp_addr_hi = 0;
3891 dmae->comp_val = 1;
3892 } else {
3893 dmae->comp_addr_lo =
3894 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3895 dmae->comp_addr_hi =
3896 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3897 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3898
bb2a0f7a
YG
3899 *stats_comp = 0;
3900 }
a2fbb9ea
ET
3901 }
3902
bb2a0f7a
YG
3903 if (bp->func_stx) {
3904
3905 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3906 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3907 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3908 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3909 dmae->dst_addr_lo = bp->func_stx >> 2;
3910 dmae->dst_addr_hi = 0;
3911 dmae->len = sizeof(struct host_func_stats) >> 2;
3912 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3913 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3914 dmae->comp_val = DMAE_COMP_VAL;
3915
3916 *stats_comp = 0;
a2fbb9ea 3917 }
bb2a0f7a
YG
3918}
3919
3920static void bnx2x_stats_stop(struct bnx2x *bp)
3921{
3922 int update = 0;
3923
3924 bnx2x_stats_comp(bp);
3925
3926 if (bp->port.pmf)
3927 update = (bnx2x_hw_stats_update(bp) == 0);
3928
3929 update |= (bnx2x_storm_stats_update(bp) == 0);
3930
3931 if (update) {
3932 bnx2x_net_stats_update(bp);
a2fbb9ea 3933
bb2a0f7a
YG
3934 if (bp->port.pmf)
3935 bnx2x_port_stats_stop(bp);
3936
3937 bnx2x_hw_stats_post(bp);
3938 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3939 }
3940}
3941
bb2a0f7a
YG
3942static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3943{
3944}
3945
3946static const struct {
3947 void (*action)(struct bnx2x *bp);
3948 enum bnx2x_stats_state next_state;
3949} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3950/* state event */
3951{
3952/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3953/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3954/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3955/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3956},
3957{
3958/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3959/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3960/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3961/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3962}
3963};
3964
3965static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3966{
3967 enum bnx2x_stats_state state = bp->stats_state;
3968
3969 bnx2x_stats_stm[state][event].action(bp);
3970 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3971
3972 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3973 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3974 state, event, bp->stats_state);
3975}
3976
a2fbb9ea
ET
3977static void bnx2x_timer(unsigned long data)
3978{
3979 struct bnx2x *bp = (struct bnx2x *) data;
3980
3981 if (!netif_running(bp->dev))
3982 return;
3983
3984 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3985 goto timer_restart;
a2fbb9ea
ET
3986
3987 if (poll) {
3988 struct bnx2x_fastpath *fp = &bp->fp[0];
3989 int rc;
3990
3991 bnx2x_tx_int(fp, 1000);
3992 rc = bnx2x_rx_int(fp, 1000);
3993 }
3994
34f80b04
EG
3995 if (!BP_NOMCP(bp)) {
3996 int func = BP_FUNC(bp);
a2fbb9ea
ET
3997 u32 drv_pulse;
3998 u32 mcp_pulse;
3999
4000 ++bp->fw_drv_pulse_wr_seq;
4001 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4002 /* TBD - add SYSTEM_TIME */
4003 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4004 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4005
34f80b04 4006 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4007 MCP_PULSE_SEQ_MASK);
4008 /* The delta between driver pulse and mcp response
4009 * should be 1 (before mcp response) or 0 (after mcp response)
4010 */
4011 if ((drv_pulse != mcp_pulse) &&
4012 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4013 /* someone lost a heartbeat... */
4014 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4015 drv_pulse, mcp_pulse);
4016 }
4017 }
4018
bb2a0f7a
YG
4019 if ((bp->state == BNX2X_STATE_OPEN) ||
4020 (bp->state == BNX2X_STATE_DISABLED))
4021 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4022
f1410647 4023timer_restart:
a2fbb9ea
ET
4024 mod_timer(&bp->timer, jiffies + bp->current_interval);
4025}
4026
4027/* end of Statistics */
4028
4029/* nic init */
4030
4031/*
4032 * nic init service functions
4033 */
4034
34f80b04 4035static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4036{
34f80b04
EG
4037 int port = BP_PORT(bp);
4038
4039 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4040 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4041 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4042 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4043 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4044 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4045}
4046
5c862848
EG
4047static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4048 dma_addr_t mapping, int sb_id)
34f80b04
EG
4049{
4050 int port = BP_PORT(bp);
bb2a0f7a 4051 int func = BP_FUNC(bp);
a2fbb9ea 4052 int index;
34f80b04 4053 u64 section;
a2fbb9ea
ET
4054
4055 /* USTORM */
4056 section = ((u64)mapping) + offsetof(struct host_status_block,
4057 u_status_block);
34f80b04 4058 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4059
4060 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4061 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4062 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4063 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4064 U64_HI(section));
bb2a0f7a
YG
4065 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4066 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4067
4068 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4069 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4070 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4071
4072 /* CSTORM */
4073 section = ((u64)mapping) + offsetof(struct host_status_block,
4074 c_status_block);
34f80b04 4075 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4076
4077 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4078 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4079 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4080 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4081 U64_HI(section));
7a9b2557
VZ
4082 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4083 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4084
4085 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4086 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4087 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4088
4089 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4090}
4091
4092static void bnx2x_zero_def_sb(struct bnx2x *bp)
4093{
4094 int func = BP_FUNC(bp);
a2fbb9ea 4095
34f80b04
EG
4096 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4097 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4098 sizeof(struct ustorm_def_status_block)/4);
4099 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4100 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4101 sizeof(struct cstorm_def_status_block)/4);
4102 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4103 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4104 sizeof(struct xstorm_def_status_block)/4);
4105 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4106 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4107 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4108}
4109
4110static void bnx2x_init_def_sb(struct bnx2x *bp,
4111 struct host_def_status_block *def_sb,
34f80b04 4112 dma_addr_t mapping, int sb_id)
a2fbb9ea 4113{
34f80b04
EG
4114 int port = BP_PORT(bp);
4115 int func = BP_FUNC(bp);
a2fbb9ea
ET
4116 int index, val, reg_offset;
4117 u64 section;
4118
4119 /* ATTN */
4120 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4121 atten_status_block);
34f80b04 4122 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4123
49d66772
ET
4124 bp->attn_state = 0;
4125
a2fbb9ea
ET
4126 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4127 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4128
34f80b04 4129 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4130 bp->attn_group[index].sig[0] = REG_RD(bp,
4131 reg_offset + 0x10*index);
4132 bp->attn_group[index].sig[1] = REG_RD(bp,
4133 reg_offset + 0x4 + 0x10*index);
4134 bp->attn_group[index].sig[2] = REG_RD(bp,
4135 reg_offset + 0x8 + 0x10*index);
4136 bp->attn_group[index].sig[3] = REG_RD(bp,
4137 reg_offset + 0xc + 0x10*index);
4138 }
4139
a2fbb9ea
ET
4140 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4141 HC_REG_ATTN_MSG0_ADDR_L);
4142
4143 REG_WR(bp, reg_offset, U64_LO(section));
4144 REG_WR(bp, reg_offset + 4, U64_HI(section));
4145
4146 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4147
4148 val = REG_RD(bp, reg_offset);
34f80b04 4149 val |= sb_id;
a2fbb9ea
ET
4150 REG_WR(bp, reg_offset, val);
4151
4152 /* USTORM */
4153 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4154 u_def_status_block);
34f80b04 4155 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4156
4157 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4158 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4159 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4160 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4161 U64_HI(section));
5c862848 4162 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4163 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4164
4165 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4166 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4167 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4168
4169 /* CSTORM */
4170 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4171 c_def_status_block);
34f80b04 4172 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4173
4174 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4175 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4176 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4177 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4178 U64_HI(section));
5c862848 4179 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4180 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4181
4182 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4183 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4184 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4185
4186 /* TSTORM */
4187 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4188 t_def_status_block);
34f80b04 4189 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4190
4191 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4192 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4193 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4194 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4195 U64_HI(section));
5c862848 4196 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4197 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4198
4199 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4200 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4201 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4202
4203 /* XSTORM */
4204 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4205 x_def_status_block);
34f80b04 4206 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4207
4208 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4209 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4210 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4211 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4212 U64_HI(section));
5c862848 4213 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4214 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4215
4216 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4217 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4218 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4219
bb2a0f7a 4220 bp->stats_pending = 0;
66e855f3 4221 bp->set_mac_pending = 0;
bb2a0f7a 4222
34f80b04 4223 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4224}
4225
4226static void bnx2x_update_coalesce(struct bnx2x *bp)
4227{
34f80b04 4228 int port = BP_PORT(bp);
a2fbb9ea
ET
4229 int i;
4230
4231 for_each_queue(bp, i) {
34f80b04 4232 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4233
4234 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4235 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4236 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4237 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4238 bp->rx_ticks/12);
a2fbb9ea 4239 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4240 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4241 U_SB_ETH_RX_CQ_INDEX),
4242 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4243
4244 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4245 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4246 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4247 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4248 bp->tx_ticks/12);
a2fbb9ea 4249 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4250 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4251 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4252 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4253 }
4254}
4255
7a9b2557
VZ
4256static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4257 struct bnx2x_fastpath *fp, int last)
4258{
4259 int i;
4260
4261 for (i = 0; i < last; i++) {
4262 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4263 struct sk_buff *skb = rx_buf->skb;
4264
4265 if (skb == NULL) {
4266 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4267 continue;
4268 }
4269
4270 if (fp->tpa_state[i] == BNX2X_TPA_START)
4271 pci_unmap_single(bp->pdev,
4272 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4273 bp->rx_buf_size,
7a9b2557
VZ
4274 PCI_DMA_FROMDEVICE);
4275
4276 dev_kfree_skb(skb);
4277 rx_buf->skb = NULL;
4278 }
4279}
4280
a2fbb9ea
ET
4281static void bnx2x_init_rx_rings(struct bnx2x *bp)
4282{
7a9b2557 4283 int func = BP_FUNC(bp);
32626230
EG
4284 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4285 ETH_MAX_AGGREGATION_QUEUES_E1H;
4286 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4287 int i, j;
a2fbb9ea 4288
437cf2f1
EG
4289 bp->rx_buf_size = bp->dev->mtu;
4290 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4291 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4292
7a9b2557
VZ
4293 if (bp->flags & TPA_ENABLE_FLAG) {
4294 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4295 "rx_buf_size %d effective_mtu %d\n",
4296 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4297
4298 for_each_queue(bp, j) {
32626230 4299 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4300
32626230 4301 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4302 fp->tpa_pool[i].skb =
4303 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4304 if (!fp->tpa_pool[i].skb) {
4305 BNX2X_ERR("Failed to allocate TPA "
4306 "skb pool for queue[%d] - "
4307 "disabling TPA on this "
4308 "queue!\n", j);
4309 bnx2x_free_tpa_pool(bp, fp, i);
4310 fp->disable_tpa = 1;
4311 break;
4312 }
4313 pci_unmap_addr_set((struct sw_rx_bd *)
4314 &bp->fp->tpa_pool[i],
4315 mapping, 0);
4316 fp->tpa_state[i] = BNX2X_TPA_STOP;
4317 }
4318 }
4319 }
4320
a2fbb9ea
ET
4321 for_each_queue(bp, j) {
4322 struct bnx2x_fastpath *fp = &bp->fp[j];
4323
4324 fp->rx_bd_cons = 0;
4325 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4326 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4327
4328 /* "next page" elements initialization */
4329 /* SGE ring */
4330 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4331 struct eth_rx_sge *sge;
4332
4333 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4334 sge->addr_hi =
4335 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4336 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4337 sge->addr_lo =
4338 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4339 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4340 }
4341
4342 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4343
7a9b2557 4344 /* RX BD ring */
a2fbb9ea
ET
4345 for (i = 1; i <= NUM_RX_RINGS; i++) {
4346 struct eth_rx_bd *rx_bd;
4347
4348 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4349 rx_bd->addr_hi =
4350 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4351 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4352 rx_bd->addr_lo =
4353 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4354 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4355 }
4356
34f80b04 4357 /* CQ ring */
a2fbb9ea
ET
4358 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4359 struct eth_rx_cqe_next_page *nextpg;
4360
4361 nextpg = (struct eth_rx_cqe_next_page *)
4362 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4363 nextpg->addr_hi =
4364 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4365 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4366 nextpg->addr_lo =
4367 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4368 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4369 }
4370
7a9b2557
VZ
4371 /* Allocate SGEs and initialize the ring elements */
4372 for (i = 0, ring_prod = 0;
4373 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4374
7a9b2557
VZ
4375 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4376 BNX2X_ERR("was only able to allocate "
4377 "%d rx sges\n", i);
4378 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4379 /* Cleanup already allocated elements */
4380 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4381 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4382 fp->disable_tpa = 1;
4383 ring_prod = 0;
4384 break;
4385 }
4386 ring_prod = NEXT_SGE_IDX(ring_prod);
4387 }
4388 fp->rx_sge_prod = ring_prod;
4389
4390 /* Allocate BDs and initialize BD ring */
66e855f3 4391 fp->rx_comp_cons = 0;
7a9b2557 4392 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4393 for (i = 0; i < bp->rx_ring_size; i++) {
4394 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4395 BNX2X_ERR("was only able to allocate "
4396 "%d rx skbs\n", i);
66e855f3 4397 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4398 break;
4399 }
4400 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4401 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4402 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4403 }
4404
7a9b2557
VZ
4405 fp->rx_bd_prod = ring_prod;
4406 /* must not have more available CQEs than BDs */
4407 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4408 cqe_ring_prod);
a2fbb9ea
ET
4409 fp->rx_pkt = fp->rx_calls = 0;
4410
7a9b2557
VZ
4411 /* Warning!
4412 * this will generate an interrupt (to the TSTORM)
4413 * must only be done after chip is initialized
4414 */
4415 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4416 fp->rx_sge_prod);
a2fbb9ea
ET
4417 if (j != 0)
4418 continue;
4419
4420 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4421 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4422 U64_LO(fp->rx_comp_mapping));
4423 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4424 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4425 U64_HI(fp->rx_comp_mapping));
4426 }
4427}
4428
4429static void bnx2x_init_tx_ring(struct bnx2x *bp)
4430{
4431 int i, j;
4432
4433 for_each_queue(bp, j) {
4434 struct bnx2x_fastpath *fp = &bp->fp[j];
4435
4436 for (i = 1; i <= NUM_TX_RINGS; i++) {
4437 struct eth_tx_bd *tx_bd =
4438 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4439
4440 tx_bd->addr_hi =
4441 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4442 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4443 tx_bd->addr_lo =
4444 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4445 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4446 }
4447
4448 fp->tx_pkt_prod = 0;
4449 fp->tx_pkt_cons = 0;
4450 fp->tx_bd_prod = 0;
4451 fp->tx_bd_cons = 0;
4452 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4453 fp->tx_pkt = 0;
4454 }
4455}
4456
4457static void bnx2x_init_sp_ring(struct bnx2x *bp)
4458{
34f80b04 4459 int func = BP_FUNC(bp);
a2fbb9ea
ET
4460
4461 spin_lock_init(&bp->spq_lock);
4462
4463 bp->spq_left = MAX_SPQ_PENDING;
4464 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4465 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4466 bp->spq_prod_bd = bp->spq;
4467 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4468
34f80b04 4469 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4470 U64_LO(bp->spq_mapping));
34f80b04
EG
4471 REG_WR(bp,
4472 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4473 U64_HI(bp->spq_mapping));
4474
34f80b04 4475 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4476 bp->spq_prod_idx);
4477}
4478
4479static void bnx2x_init_context(struct bnx2x *bp)
4480{
4481 int i;
4482
4483 for_each_queue(bp, i) {
4484 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4485 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4486 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4487
34f80b04
EG
4488 context->ustorm_st_context.common.sb_index_numbers =
4489 BNX2X_RX_SB_INDEX_NUM;
4490 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4491 context->ustorm_st_context.common.status_block_id = sb_id;
4492 context->ustorm_st_context.common.flags =
4493 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
8d9c5f34
EG
4494 context->ustorm_st_context.common.mc_alignment_log_size =
4495 6 /*BCM_RX_ETH_PAYLOAD_ALIGN*/;
34f80b04 4496 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4497 bp->rx_buf_size;
34f80b04 4498 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4499 U64_HI(fp->rx_desc_mapping);
34f80b04 4500 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4501 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4502 if (!fp->disable_tpa) {
4503 context->ustorm_st_context.common.flags |=
4504 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4505 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4506 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4507 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4508 (u32)0xffff);
7a9b2557
VZ
4509 context->ustorm_st_context.common.sge_page_base_hi =
4510 U64_HI(fp->rx_sge_mapping);
4511 context->ustorm_st_context.common.sge_page_base_lo =
4512 U64_LO(fp->rx_sge_mapping);
4513 }
4514
8d9c5f34
EG
4515 context->ustorm_ag_context.cdu_usage =
4516 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4517 CDU_REGION_NUMBER_UCM_AG,
4518 ETH_CONNECTION_TYPE);
4519
4520 context->xstorm_st_context.tx_bd_page_base_hi =
4521 U64_HI(fp->tx_desc_mapping);
4522 context->xstorm_st_context.tx_bd_page_base_lo =
4523 U64_LO(fp->tx_desc_mapping);
4524 context->xstorm_st_context.db_data_addr_hi =
4525 U64_HI(fp->tx_prods_mapping);
4526 context->xstorm_st_context.db_data_addr_lo =
4527 U64_LO(fp->tx_prods_mapping);
4528 context->xstorm_st_context.statistics_data = (fp->cl_id |
4529 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4530 context->cstorm_st_context.sb_index_number =
5c862848 4531 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4532 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4533
4534 context->xstorm_ag_context.cdu_reserved =
4535 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4536 CDU_REGION_NUMBER_XCM_AG,
4537 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4538 }
4539}
4540
4541static void bnx2x_init_ind_table(struct bnx2x *bp)
4542{
26c8fa4d 4543 int func = BP_FUNC(bp);
a2fbb9ea
ET
4544 int i;
4545
4546 if (!is_multi(bp))
4547 return;
4548
34f80b04 4549 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4550 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4551 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d
EG
4552 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4553 BP_CL_ID(bp) + (i % bp->num_queues));
a2fbb9ea
ET
4554}
4555
49d66772
ET
4556static void bnx2x_set_client_config(struct bnx2x *bp)
4557{
49d66772 4558 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4559 int port = BP_PORT(bp);
4560 int i;
49d66772 4561
e7799c5f 4562 tstorm_client.mtu = bp->dev->mtu;
66e855f3 4563 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4564 tstorm_client.config_flags =
4565 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4566#ifdef BCM_VLAN
0c6671b0 4567 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4568 tstorm_client.config_flags |=
8d9c5f34 4569 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4570 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4571 }
4572#endif
49d66772 4573
7a9b2557
VZ
4574 if (bp->flags & TPA_ENABLE_FLAG) {
4575 tstorm_client.max_sges_for_packet =
4f40f2cb 4576 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4577 tstorm_client.max_sges_for_packet =
4578 ((tstorm_client.max_sges_for_packet +
4579 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4580 PAGES_PER_SGE_SHIFT;
4581
4582 tstorm_client.config_flags |=
4583 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4584 }
4585
49d66772
ET
4586 for_each_queue(bp, i) {
4587 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4588 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4589 ((u32 *)&tstorm_client)[0]);
4590 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4591 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4592 ((u32 *)&tstorm_client)[1]);
4593 }
4594
34f80b04
EG
4595 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4596 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4597}
4598
a2fbb9ea
ET
4599static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4600{
a2fbb9ea 4601 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4602 int mode = bp->rx_mode;
4603 int mask = (1 << BP_L_ID(bp));
4604 int func = BP_FUNC(bp);
a2fbb9ea
ET
4605 int i;
4606
3196a88a 4607 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4608
4609 switch (mode) {
4610 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4611 tstorm_mac_filter.ucast_drop_all = mask;
4612 tstorm_mac_filter.mcast_drop_all = mask;
4613 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4614 break;
4615 case BNX2X_RX_MODE_NORMAL:
34f80b04 4616 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4617 break;
4618 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4619 tstorm_mac_filter.mcast_accept_all = mask;
4620 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4621 break;
4622 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4623 tstorm_mac_filter.ucast_accept_all = mask;
4624 tstorm_mac_filter.mcast_accept_all = mask;
4625 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4626 break;
4627 default:
34f80b04
EG
4628 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4629 break;
a2fbb9ea
ET
4630 }
4631
4632 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4633 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4634 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4635 ((u32 *)&tstorm_mac_filter)[i]);
4636
34f80b04 4637/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4638 ((u32 *)&tstorm_mac_filter)[i]); */
4639 }
a2fbb9ea 4640
49d66772
ET
4641 if (mode != BNX2X_RX_MODE_NONE)
4642 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4643}
4644
471de716
EG
4645static void bnx2x_init_internal_common(struct bnx2x *bp)
4646{
4647 int i;
4648
3cdf1db7
YG
4649 if (bp->flags & TPA_ENABLE_FLAG) {
4650 struct tstorm_eth_tpa_exist tpa = {0};
4651
4652 tpa.tpa_exist = 1;
4653
4654 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4655 ((u32 *)&tpa)[0]);
4656 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4657 ((u32 *)&tpa)[1]);
4658 }
4659
471de716
EG
4660 /* Zero this manually as its initialization is
4661 currently missing in the initTool */
4662 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4663 REG_WR(bp, BAR_USTRORM_INTMEM +
4664 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4665}
4666
4667static void bnx2x_init_internal_port(struct bnx2x *bp)
4668{
4669 int port = BP_PORT(bp);
4670
4671 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4672 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4673 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4674 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4675}
4676
4677static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4678{
a2fbb9ea
ET
4679 struct tstorm_eth_function_common_config tstorm_config = {0};
4680 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4681 int port = BP_PORT(bp);
4682 int func = BP_FUNC(bp);
4683 int i;
471de716 4684 u16 max_agg_size;
a2fbb9ea
ET
4685
4686 if (is_multi(bp)) {
4687 tstorm_config.config_flags = MULTI_FLAGS;
4688 tstorm_config.rss_result_mask = MULTI_MASK;
4689 }
8d9c5f34
EG
4690 if (IS_E1HMF(bp))
4691 tstorm_config.config_flags |=
4692 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4693
34f80b04
EG
4694 tstorm_config.leading_client_id = BP_L_ID(bp);
4695
a2fbb9ea 4696 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4697 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4698 (*(u32 *)&tstorm_config));
4699
c14423fe 4700 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4701 bnx2x_set_storm_rx_mode(bp);
4702
66e855f3
YG
4703 /* reset xstorm per client statistics */
4704 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4705 REG_WR(bp, BAR_XSTRORM_INTMEM +
4706 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4707 i*4, 0);
4708 }
4709 /* reset tstorm per client statistics */
4710 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4711 REG_WR(bp, BAR_TSTRORM_INTMEM +
4712 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4713 i*4, 0);
4714 }
4715
4716 /* Init statistics related context */
34f80b04 4717 stats_flags.collect_eth = 1;
a2fbb9ea 4718
66e855f3 4719 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4720 ((u32 *)&stats_flags)[0]);
66e855f3 4721 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4722 ((u32 *)&stats_flags)[1]);
4723
66e855f3 4724 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4725 ((u32 *)&stats_flags)[0]);
66e855f3 4726 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4727 ((u32 *)&stats_flags)[1]);
4728
66e855f3 4729 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4730 ((u32 *)&stats_flags)[0]);
66e855f3 4731 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4732 ((u32 *)&stats_flags)[1]);
4733
66e855f3
YG
4734 REG_WR(bp, BAR_XSTRORM_INTMEM +
4735 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4736 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4737 REG_WR(bp, BAR_XSTRORM_INTMEM +
4738 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4739 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4740
4741 REG_WR(bp, BAR_TSTRORM_INTMEM +
4742 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4743 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4744 REG_WR(bp, BAR_TSTRORM_INTMEM +
4745 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4746 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4747
4748 if (CHIP_IS_E1H(bp)) {
4749 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4750 IS_E1HMF(bp));
4751 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4752 IS_E1HMF(bp));
4753 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4754 IS_E1HMF(bp));
4755 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4756 IS_E1HMF(bp));
4757
7a9b2557
VZ
4758 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4759 bp->e1hov);
34f80b04
EG
4760 }
4761
4f40f2cb
EG
4762 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4763 max_agg_size =
4764 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4765 SGE_PAGE_SIZE * PAGES_PER_SGE),
4766 (u32)0xffff);
7a9b2557
VZ
4767 for_each_queue(bp, i) {
4768 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4769
4770 REG_WR(bp, BAR_USTRORM_INTMEM +
4771 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4772 U64_LO(fp->rx_comp_mapping));
4773 REG_WR(bp, BAR_USTRORM_INTMEM +
4774 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4775 U64_HI(fp->rx_comp_mapping));
4776
7a9b2557
VZ
4777 REG_WR16(bp, BAR_USTRORM_INTMEM +
4778 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4779 max_agg_size);
4780 }
a2fbb9ea
ET
4781}
4782
471de716
EG
4783static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4784{
4785 switch (load_code) {
4786 case FW_MSG_CODE_DRV_LOAD_COMMON:
4787 bnx2x_init_internal_common(bp);
4788 /* no break */
4789
4790 case FW_MSG_CODE_DRV_LOAD_PORT:
4791 bnx2x_init_internal_port(bp);
4792 /* no break */
4793
4794 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4795 bnx2x_init_internal_func(bp);
4796 break;
4797
4798 default:
4799 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4800 break;
4801 }
4802}
4803
4804static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4805{
4806 int i;
4807
4808 for_each_queue(bp, i) {
4809 struct bnx2x_fastpath *fp = &bp->fp[i];
4810
34f80b04 4811 fp->bp = bp;
a2fbb9ea 4812 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4813 fp->index = i;
34f80b04
EG
4814 fp->cl_id = BP_L_ID(bp) + i;
4815 fp->sb_id = fp->cl_id;
4816 DP(NETIF_MSG_IFUP,
4817 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4818 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4819 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4820 FP_SB_ID(fp));
4821 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4822 }
4823
5c862848
EG
4824 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4825 DEF_SB_ID);
4826 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4827 bnx2x_update_coalesce(bp);
4828 bnx2x_init_rx_rings(bp);
4829 bnx2x_init_tx_ring(bp);
4830 bnx2x_init_sp_ring(bp);
4831 bnx2x_init_context(bp);
471de716 4832 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4833 bnx2x_init_ind_table(bp);
0ef00459
EG
4834 bnx2x_stats_init(bp);
4835
4836 /* At this point, we are ready for interrupts */
4837 atomic_set(&bp->intr_sem, 0);
4838
4839 /* flush all before enabling interrupts */
4840 mb();
4841 mmiowb();
4842
615f8fd9 4843 bnx2x_int_enable(bp);
a2fbb9ea
ET
4844}
4845
4846/* end of nic init */
4847
4848/*
4849 * gzip service functions
4850 */
4851
4852static int bnx2x_gunzip_init(struct bnx2x *bp)
4853{
4854 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4855 &bp->gunzip_mapping);
4856 if (bp->gunzip_buf == NULL)
4857 goto gunzip_nomem1;
4858
4859 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4860 if (bp->strm == NULL)
4861 goto gunzip_nomem2;
4862
4863 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4864 GFP_KERNEL);
4865 if (bp->strm->workspace == NULL)
4866 goto gunzip_nomem3;
4867
4868 return 0;
4869
4870gunzip_nomem3:
4871 kfree(bp->strm);
4872 bp->strm = NULL;
4873
4874gunzip_nomem2:
4875 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4876 bp->gunzip_mapping);
4877 bp->gunzip_buf = NULL;
4878
4879gunzip_nomem1:
4880 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4881 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4882 return -ENOMEM;
4883}
4884
4885static void bnx2x_gunzip_end(struct bnx2x *bp)
4886{
4887 kfree(bp->strm->workspace);
4888
4889 kfree(bp->strm);
4890 bp->strm = NULL;
4891
4892 if (bp->gunzip_buf) {
4893 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4894 bp->gunzip_mapping);
4895 bp->gunzip_buf = NULL;
4896 }
4897}
4898
4899static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4900{
4901 int n, rc;
4902
4903 /* check gzip header */
4904 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4905 return -EINVAL;
4906
4907 n = 10;
4908
34f80b04 4909#define FNAME 0x8
a2fbb9ea
ET
4910
4911 if (zbuf[3] & FNAME)
4912 while ((zbuf[n++] != 0) && (n < len));
4913
4914 bp->strm->next_in = zbuf + n;
4915 bp->strm->avail_in = len - n;
4916 bp->strm->next_out = bp->gunzip_buf;
4917 bp->strm->avail_out = FW_BUF_SIZE;
4918
4919 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4920 if (rc != Z_OK)
4921 return rc;
4922
4923 rc = zlib_inflate(bp->strm, Z_FINISH);
4924 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4925 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4926 bp->dev->name, bp->strm->msg);
4927
4928 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4929 if (bp->gunzip_outlen & 0x3)
4930 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4931 " gunzip_outlen (%d) not aligned\n",
4932 bp->dev->name, bp->gunzip_outlen);
4933 bp->gunzip_outlen >>= 2;
4934
4935 zlib_inflateEnd(bp->strm);
4936
4937 if (rc == Z_STREAM_END)
4938 return 0;
4939
4940 return rc;
4941}
4942
4943/* nic load/unload */
4944
4945/*
34f80b04 4946 * General service functions
a2fbb9ea
ET
4947 */
4948
4949/* send a NIG loopback debug packet */
4950static void bnx2x_lb_pckt(struct bnx2x *bp)
4951{
a2fbb9ea 4952 u32 wb_write[3];
a2fbb9ea
ET
4953
4954 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4955 wb_write[0] = 0x55555555;
4956 wb_write[1] = 0x55555555;
34f80b04 4957 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4958 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4959
4960 /* NON-IP protocol */
a2fbb9ea
ET
4961 wb_write[0] = 0x09000000;
4962 wb_write[1] = 0x55555555;
34f80b04 4963 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4964 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4965}
4966
4967/* some of the internal memories
4968 * are not directly readable from the driver
4969 * to test them we send debug packets
4970 */
4971static int bnx2x_int_mem_test(struct bnx2x *bp)
4972{
4973 int factor;
4974 int count, i;
4975 u32 val = 0;
4976
ad8d3948 4977 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4978 factor = 120;
ad8d3948
EG
4979 else if (CHIP_REV_IS_EMUL(bp))
4980 factor = 200;
4981 else
a2fbb9ea 4982 factor = 1;
a2fbb9ea
ET
4983
4984 DP(NETIF_MSG_HW, "start part1\n");
4985
4986 /* Disable inputs of parser neighbor blocks */
4987 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4988 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4989 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4990 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4991
4992 /* Write 0 to parser credits for CFC search request */
4993 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4994
4995 /* send Ethernet packet */
4996 bnx2x_lb_pckt(bp);
4997
4998 /* TODO do i reset NIG statistic? */
4999 /* Wait until NIG register shows 1 packet of size 0x10 */
5000 count = 1000 * factor;
5001 while (count) {
34f80b04 5002
a2fbb9ea
ET
5003 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5004 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5005 if (val == 0x10)
5006 break;
5007
5008 msleep(10);
5009 count--;
5010 }
5011 if (val != 0x10) {
5012 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5013 return -1;
5014 }
5015
5016 /* Wait until PRS register shows 1 packet */
5017 count = 1000 * factor;
5018 while (count) {
5019 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5020 if (val == 1)
5021 break;
5022
5023 msleep(10);
5024 count--;
5025 }
5026 if (val != 0x1) {
5027 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5028 return -2;
5029 }
5030
5031 /* Reset and init BRB, PRS */
34f80b04 5032 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5033 msleep(50);
34f80b04 5034 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5035 msleep(50);
5036 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5037 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5038
5039 DP(NETIF_MSG_HW, "part2\n");
5040
5041 /* Disable inputs of parser neighbor blocks */
5042 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5043 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5044 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5045 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5046
5047 /* Write 0 to parser credits for CFC search request */
5048 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5049
5050 /* send 10 Ethernet packets */
5051 for (i = 0; i < 10; i++)
5052 bnx2x_lb_pckt(bp);
5053
5054 /* Wait until NIG register shows 10 + 1
5055 packets of size 11*0x10 = 0xb0 */
5056 count = 1000 * factor;
5057 while (count) {
34f80b04 5058
a2fbb9ea
ET
5059 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5060 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5061 if (val == 0xb0)
5062 break;
5063
5064 msleep(10);
5065 count--;
5066 }
5067 if (val != 0xb0) {
5068 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5069 return -3;
5070 }
5071
5072 /* Wait until PRS register shows 2 packets */
5073 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5074 if (val != 2)
5075 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5076
5077 /* Write 1 to parser credits for CFC search request */
5078 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5079
5080 /* Wait until PRS register shows 3 packets */
5081 msleep(10 * factor);
5082 /* Wait until NIG register shows 1 packet of size 0x10 */
5083 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5084 if (val != 3)
5085 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5086
5087 /* clear NIG EOP FIFO */
5088 for (i = 0; i < 11; i++)
5089 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5090 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5091 if (val != 1) {
5092 BNX2X_ERR("clear of NIG failed\n");
5093 return -4;
5094 }
5095
5096 /* Reset and init BRB, PRS, NIG */
5097 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5098 msleep(50);
5099 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5100 msleep(50);
5101 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5102 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5103#ifndef BCM_ISCSI
5104 /* set NIC mode */
5105 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5106#endif
5107
5108 /* Enable inputs of parser neighbor blocks */
5109 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5110 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5111 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5112 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5113
5114 DP(NETIF_MSG_HW, "done\n");
5115
5116 return 0; /* OK */
5117}
5118
5119static void enable_blocks_attention(struct bnx2x *bp)
5120{
5121 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5122 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5123 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5124 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5125 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5126 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5127 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5128 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5129 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5130/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5131/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5132 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5133 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5134 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5135/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5136/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5137 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5138 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5139 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5140 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5141/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5142/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5143 if (CHIP_REV_IS_FPGA(bp))
5144 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5145 else
5146 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5147 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5148 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5149 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5150/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5151/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5152 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5153 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5154/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5155 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5156}
5157
34f80b04 5158
81f75bbf
EG
5159static void bnx2x_reset_common(struct bnx2x *bp)
5160{
5161 /* reset_common */
5162 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5163 0xd3ffff7f);
5164 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5165}
5166
34f80b04 5167static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5168{
a2fbb9ea 5169 u32 val, i;
a2fbb9ea 5170
34f80b04 5171 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5172
81f75bbf 5173 bnx2x_reset_common(bp);
34f80b04
EG
5174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5175 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5176
34f80b04
EG
5177 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5178 if (CHIP_IS_E1H(bp))
5179 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5180
34f80b04
EG
5181 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5182 msleep(30);
5183 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5184
34f80b04
EG
5185 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5186 if (CHIP_IS_E1(bp)) {
5187 /* enable HW interrupt from PXP on USDM overflow
5188 bit 16 on INT_MASK_0 */
5189 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5190 }
a2fbb9ea 5191
34f80b04
EG
5192 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5193 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5194
5195#ifdef __BIG_ENDIAN
34f80b04
EG
5196 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5197 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5198 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5199 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5200 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
34f80b04
EG
5201
5202/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5203 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5204 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5205 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5206 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5207#endif
5208
34f80b04 5209 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5210#ifdef BCM_ISCSI
34f80b04
EG
5211 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5212 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5213 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5214#endif
5215
34f80b04
EG
5216 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5217 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5218
34f80b04
EG
5219 /* let the HW do it's magic ... */
5220 msleep(100);
5221 /* finish PXP init */
5222 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5223 if (val != 1) {
5224 BNX2X_ERR("PXP2 CFG failed\n");
5225 return -EBUSY;
5226 }
5227 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5228 if (val != 1) {
5229 BNX2X_ERR("PXP2 RD_INIT failed\n");
5230 return -EBUSY;
5231 }
a2fbb9ea 5232
34f80b04
EG
5233 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5234 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5235
34f80b04 5236 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5237
34f80b04
EG
5238 /* clean the DMAE memory */
5239 bp->dmae_ready = 1;
5240 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5241
34f80b04
EG
5242 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5243 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5244 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5245 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5246
34f80b04
EG
5247 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5248 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5249 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5250 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5251
5252 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5253 /* soft reset pulse */
5254 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5255 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5256
5257#ifdef BCM_ISCSI
34f80b04 5258 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5259#endif
a2fbb9ea 5260
34f80b04
EG
5261 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5262 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5263 if (!CHIP_REV_IS_SLOW(bp)) {
5264 /* enable hw interrupt from doorbell Q */
5265 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5266 }
a2fbb9ea 5267
34f80b04
EG
5268 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5269 if (CHIP_REV_IS_SLOW(bp)) {
5270 /* fix for emulation and FPGA for no pause */
5271 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5272 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5273 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5274 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5275 }
a2fbb9ea 5276
34f80b04 5277 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5278 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5279 /* set NIC mode */
5280 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5281 if (CHIP_IS_E1H(bp))
5282 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5283
34f80b04
EG
5284 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5285 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5286 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5287 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5288
34f80b04
EG
5289 if (CHIP_IS_E1H(bp)) {
5290 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5291 STORM_INTMEM_SIZE_E1H/2);
5292 bnx2x_init_fill(bp,
5293 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5294 0, STORM_INTMEM_SIZE_E1H/2);
5295 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5296 STORM_INTMEM_SIZE_E1H/2);
5297 bnx2x_init_fill(bp,
5298 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5299 0, STORM_INTMEM_SIZE_E1H/2);
5300 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5301 STORM_INTMEM_SIZE_E1H/2);
5302 bnx2x_init_fill(bp,
5303 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5304 0, STORM_INTMEM_SIZE_E1H/2);
5305 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5306 STORM_INTMEM_SIZE_E1H/2);
5307 bnx2x_init_fill(bp,
5308 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5309 0, STORM_INTMEM_SIZE_E1H/2);
5310 } else { /* E1 */
ad8d3948
EG
5311 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5312 STORM_INTMEM_SIZE_E1);
5313 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5314 STORM_INTMEM_SIZE_E1);
5315 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5316 STORM_INTMEM_SIZE_E1);
5317 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5318 STORM_INTMEM_SIZE_E1);
34f80b04 5319 }
a2fbb9ea 5320
34f80b04
EG
5321 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5322 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5323 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5324 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5325
34f80b04
EG
5326 /* sync semi rtc */
5327 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5328 0x80000000);
5329 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5330 0x80000000);
a2fbb9ea 5331
34f80b04
EG
5332 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5333 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5334 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5335
34f80b04
EG
5336 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5337 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5338 REG_WR(bp, i, 0xc0cac01a);
5339 /* TODO: replace with something meaningful */
5340 }
8d9c5f34 5341 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5342 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5343
34f80b04
EG
5344 if (sizeof(union cdu_context) != 1024)
5345 /* we currently assume that a context is 1024 bytes */
5346 printk(KERN_ALERT PFX "please adjust the size of"
5347 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5348
34f80b04
EG
5349 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5350 val = (4 << 24) + (0 << 12) + 1024;
5351 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5352 if (CHIP_IS_E1(bp)) {
5353 /* !!! fix pxp client crdit until excel update */
5354 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5355 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5356 }
a2fbb9ea 5357
34f80b04
EG
5358 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5359 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5360 /* enable context validation interrupt from CFC */
5361 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5362
5363 /* set the thresholds to prevent CFC/CDU race */
5364 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5365
34f80b04
EG
5366 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5367 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5368
34f80b04
EG
5369 /* PXPCS COMMON comes here */
5370 /* Reset PCIE errors for debug */
5371 REG_WR(bp, 0x2814, 0xffffffff);
5372 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5373
34f80b04
EG
5374 /* EMAC0 COMMON comes here */
5375 /* EMAC1 COMMON comes here */
5376 /* DBU COMMON comes here */
5377 /* DBG COMMON comes here */
5378
5379 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5380 if (CHIP_IS_E1H(bp)) {
5381 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5382 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5383 }
5384
5385 if (CHIP_REV_IS_SLOW(bp))
5386 msleep(200);
5387
5388 /* finish CFC init */
5389 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5390 if (val != 1) {
5391 BNX2X_ERR("CFC LL_INIT failed\n");
5392 return -EBUSY;
5393 }
5394 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5395 if (val != 1) {
5396 BNX2X_ERR("CFC AC_INIT failed\n");
5397 return -EBUSY;
5398 }
5399 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5400 if (val != 1) {
5401 BNX2X_ERR("CFC CAM_INIT failed\n");
5402 return -EBUSY;
5403 }
5404 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5405
34f80b04
EG
5406 /* read NIG statistic
5407 to see if this is our first up since powerup */
5408 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5409 val = *bnx2x_sp(bp, wb_data[0]);
5410
5411 /* do internal memory self test */
5412 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5413 BNX2X_ERR("internal mem self test failed\n");
5414 return -EBUSY;
5415 }
5416
5417 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5418 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5419 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5420 /* Fan failure is indicated by SPIO 5 */
5421 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5422 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5423
5424 /* set to active low mode */
5425 val = REG_RD(bp, MISC_REG_SPIO_INT);
5426 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5427 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5428 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5429
34f80b04
EG
5430 /* enable interrupt to signal the IGU */
5431 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5432 val |= (1 << MISC_REGISTERS_SPIO_5);
5433 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5434 break;
f1410647 5435
34f80b04
EG
5436 default:
5437 break;
5438 }
f1410647 5439
34f80b04
EG
5440 /* clear PXP2 attentions */
5441 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5442
34f80b04 5443 enable_blocks_attention(bp);
a2fbb9ea 5444
6bbca910
YR
5445 if (!BP_NOMCP(bp)) {
5446 bnx2x_acquire_phy_lock(bp);
5447 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5448 bnx2x_release_phy_lock(bp);
5449 } else
5450 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5451
34f80b04
EG
5452 return 0;
5453}
a2fbb9ea 5454
34f80b04
EG
5455static int bnx2x_init_port(struct bnx2x *bp)
5456{
5457 int port = BP_PORT(bp);
5458 u32 val;
a2fbb9ea 5459
34f80b04
EG
5460 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5461
5462 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5463
5464 /* Port PXP comes here */
5465 /* Port PXP2 comes here */
a2fbb9ea
ET
5466#ifdef BCM_ISCSI
5467 /* Port0 1
5468 * Port1 385 */
5469 i++;
5470 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5471 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5472 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5473 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5474
5475 /* Port0 2
5476 * Port1 386 */
5477 i++;
5478 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5479 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5480 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5481 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5482
5483 /* Port0 3
5484 * Port1 387 */
5485 i++;
5486 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5487 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5488 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5489 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5490#endif
34f80b04 5491 /* Port CMs come here */
8d9c5f34
EG
5492 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5493 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5494
5495 /* Port QM comes here */
a2fbb9ea
ET
5496#ifdef BCM_ISCSI
5497 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5498 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5499
5500 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5501 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5502#endif
5503 /* Port DQ comes here */
5504 /* Port BRB1 comes here */
ad8d3948 5505 /* Port PRS comes here */
a2fbb9ea
ET
5506 /* Port TSDM comes here */
5507 /* Port CSDM comes here */
5508 /* Port USDM comes here */
5509 /* Port XSDM comes here */
34f80b04
EG
5510 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5511 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5512 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5513 port ? USEM_PORT1_END : USEM_PORT0_END);
5514 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5515 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5516 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5517 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5518 /* Port UPB comes here */
34f80b04
EG
5519 /* Port XPB comes here */
5520
5521 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5522 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5523
5524 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5525 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5526
5527 /* update threshold */
34f80b04 5528 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5529 /* update init credit */
34f80b04 5530 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5531
5532 /* probe changes */
34f80b04 5533 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5534 msleep(5);
34f80b04 5535 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5536
5537#ifdef BCM_ISCSI
5538 /* tell the searcher where the T2 table is */
5539 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5540
5541 wb_write[0] = U64_LO(bp->t2_mapping);
5542 wb_write[1] = U64_HI(bp->t2_mapping);
5543 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5544 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5545 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5546 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5547
5548 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5549 /* Port SRCH comes here */
5550#endif
5551 /* Port CDU comes here */
5552 /* Port CFC comes here */
34f80b04
EG
5553
5554 if (CHIP_IS_E1(bp)) {
5555 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5556 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5557 }
5558 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5559 port ? HC_PORT1_END : HC_PORT0_END);
5560
5561 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5562 MISC_AEU_PORT0_START,
34f80b04
EG
5563 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5564 /* init aeu_mask_attn_func_0/1:
5565 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5566 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5567 * bits 4-7 are used for "per vn group attention" */
5568 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5569 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5570
a2fbb9ea
ET
5571 /* Port PXPCS comes here */
5572 /* Port EMAC0 comes here */
5573 /* Port EMAC1 comes here */
5574 /* Port DBU comes here */
5575 /* Port DBG comes here */
34f80b04
EG
5576 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5577 port ? NIG_PORT1_END : NIG_PORT0_END);
5578
5579 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5580
5581 if (CHIP_IS_E1H(bp)) {
5582 u32 wsum;
5583 struct cmng_struct_per_port m_cmng_port;
5584 int vn;
5585
5586 /* 0x2 disable e1hov, 0x1 enable */
5587 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5588 (IS_E1HMF(bp) ? 0x1 : 0x2));
5589
5590 /* Init RATE SHAPING and FAIRNESS contexts.
5591 Initialize as if there is 10G link. */
5592 wsum = bnx2x_calc_vn_wsum(bp);
5593 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5594 if (IS_E1HMF(bp))
5595 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5596 bnx2x_init_vn_minmax(bp, 2*vn + port,
5597 wsum, 10000, &m_cmng_port);
5598 }
5599
a2fbb9ea
ET
5600 /* Port MCP comes here */
5601 /* Port DMAE comes here */
5602
34f80b04 5603 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5604 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5605 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5606 /* add SPIO 5 to group 0 */
5607 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5608 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5609 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5610 break;
5611
5612 default:
5613 break;
5614 }
5615
c18487ee 5616 bnx2x__link_reset(bp);
a2fbb9ea 5617
34f80b04
EG
5618 return 0;
5619}
5620
5621#define ILT_PER_FUNC (768/2)
5622#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5623/* the phys address is shifted right 12 bits and has an added
5624 1=valid bit added to the 53rd bit
5625 then since this is a wide register(TM)
5626 we split it into two 32 bit writes
5627 */
5628#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5629#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5630#define PXP_ONE_ILT(x) (((x) << 10) | x)
5631#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5632
5633#define CNIC_ILT_LINES 0
5634
5635static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5636{
5637 int reg;
5638
5639 if (CHIP_IS_E1H(bp))
5640 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5641 else /* E1 */
5642 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5643
5644 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5645}
5646
5647static int bnx2x_init_func(struct bnx2x *bp)
5648{
5649 int port = BP_PORT(bp);
5650 int func = BP_FUNC(bp);
5651 int i;
5652
5653 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5654
5655 i = FUNC_ILT_BASE(func);
5656
5657 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5658 if (CHIP_IS_E1H(bp)) {
5659 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5660 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5661 } else /* E1 */
5662 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5663 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5664
5665
5666 if (CHIP_IS_E1H(bp)) {
5667 for (i = 0; i < 9; i++)
5668 bnx2x_init_block(bp,
5669 cm_start[func][i], cm_end[func][i]);
5670
5671 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5672 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5673 }
5674
5675 /* HC init per function */
5676 if (CHIP_IS_E1H(bp)) {
5677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5678
5679 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5680 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5681 }
5682 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5683
c14423fe 5684 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5685 REG_WR(bp, 0x2114, 0xffffffff);
5686 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5687
34f80b04
EG
5688 return 0;
5689}
5690
5691static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5692{
5693 int i, rc = 0;
a2fbb9ea 5694
34f80b04
EG
5695 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5696 BP_FUNC(bp), load_code);
a2fbb9ea 5697
34f80b04
EG
5698 bp->dmae_ready = 0;
5699 mutex_init(&bp->dmae_mutex);
5700 bnx2x_gunzip_init(bp);
a2fbb9ea 5701
34f80b04
EG
5702 switch (load_code) {
5703 case FW_MSG_CODE_DRV_LOAD_COMMON:
5704 rc = bnx2x_init_common(bp);
5705 if (rc)
5706 goto init_hw_err;
5707 /* no break */
5708
5709 case FW_MSG_CODE_DRV_LOAD_PORT:
5710 bp->dmae_ready = 1;
5711 rc = bnx2x_init_port(bp);
5712 if (rc)
5713 goto init_hw_err;
5714 /* no break */
5715
5716 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5717 bp->dmae_ready = 1;
5718 rc = bnx2x_init_func(bp);
5719 if (rc)
5720 goto init_hw_err;
5721 break;
5722
5723 default:
5724 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5725 break;
5726 }
5727
5728 if (!BP_NOMCP(bp)) {
5729 int func = BP_FUNC(bp);
a2fbb9ea
ET
5730
5731 bp->fw_drv_pulse_wr_seq =
34f80b04 5732 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5733 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5734 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5735 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5736 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5737 } else
5738 bp->func_stx = 0;
a2fbb9ea 5739
34f80b04
EG
5740 /* this needs to be done before gunzip end */
5741 bnx2x_zero_def_sb(bp);
5742 for_each_queue(bp, i)
5743 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5744
5745init_hw_err:
5746 bnx2x_gunzip_end(bp);
5747
5748 return rc;
a2fbb9ea
ET
5749}
5750
c14423fe 5751/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5752static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5753{
34f80b04 5754 int func = BP_FUNC(bp);
f1410647
ET
5755 u32 seq = ++bp->fw_seq;
5756 u32 rc = 0;
19680c48
EG
5757 u32 cnt = 1;
5758 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5759
34f80b04 5760 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5761 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5762
19680c48
EG
5763 do {
5764 /* let the FW do it's magic ... */
5765 msleep(delay);
a2fbb9ea 5766
19680c48 5767 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5768
19680c48
EG
5769 /* Give the FW up to 2 second (200*10ms) */
5770 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5771
5772 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5773 cnt*delay, rc, seq);
a2fbb9ea
ET
5774
5775 /* is this a reply to our command? */
5776 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5777 rc &= FW_MSG_CODE_MASK;
f1410647 5778
a2fbb9ea
ET
5779 } else {
5780 /* FW BUG! */
5781 BNX2X_ERR("FW failed to respond!\n");
5782 bnx2x_fw_dump(bp);
5783 rc = 0;
5784 }
f1410647 5785
a2fbb9ea
ET
5786 return rc;
5787}
5788
5789static void bnx2x_free_mem(struct bnx2x *bp)
5790{
5791
5792#define BNX2X_PCI_FREE(x, y, size) \
5793 do { \
5794 if (x) { \
5795 pci_free_consistent(bp->pdev, size, x, y); \
5796 x = NULL; \
5797 y = 0; \
5798 } \
5799 } while (0)
5800
5801#define BNX2X_FREE(x) \
5802 do { \
5803 if (x) { \
5804 vfree(x); \
5805 x = NULL; \
5806 } \
5807 } while (0)
5808
5809 int i;
5810
5811 /* fastpath */
5812 for_each_queue(bp, i) {
5813
5814 /* Status blocks */
5815 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5816 bnx2x_fp(bp, i, status_blk_mapping),
5817 sizeof(struct host_status_block) +
5818 sizeof(struct eth_tx_db_data));
5819
5820 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5821 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5822 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5823 bnx2x_fp(bp, i, tx_desc_mapping),
5824 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5825
5826 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5827 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5828 bnx2x_fp(bp, i, rx_desc_mapping),
5829 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5830
5831 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5832 bnx2x_fp(bp, i, rx_comp_mapping),
5833 sizeof(struct eth_fast_path_rx_cqe) *
5834 NUM_RCQ_BD);
a2fbb9ea 5835
7a9b2557 5836 /* SGE ring */
32626230 5837 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5838 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5839 bnx2x_fp(bp, i, rx_sge_mapping),
5840 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5841 }
a2fbb9ea
ET
5842 /* end of fastpath */
5843
5844 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5845 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5846
5847 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5848 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5849
5850#ifdef BCM_ISCSI
5851 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5852 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5853 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5854 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5855#endif
7a9b2557 5856 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5857
5858#undef BNX2X_PCI_FREE
5859#undef BNX2X_KFREE
5860}
5861
5862static int bnx2x_alloc_mem(struct bnx2x *bp)
5863{
5864
5865#define BNX2X_PCI_ALLOC(x, y, size) \
5866 do { \
5867 x = pci_alloc_consistent(bp->pdev, size, y); \
5868 if (x == NULL) \
5869 goto alloc_mem_err; \
5870 memset(x, 0, size); \
5871 } while (0)
5872
5873#define BNX2X_ALLOC(x, size) \
5874 do { \
5875 x = vmalloc(size); \
5876 if (x == NULL) \
5877 goto alloc_mem_err; \
5878 memset(x, 0, size); \
5879 } while (0)
5880
5881 int i;
5882
5883 /* fastpath */
a2fbb9ea
ET
5884 for_each_queue(bp, i) {
5885 bnx2x_fp(bp, i, bp) = bp;
5886
5887 /* Status blocks */
5888 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5889 &bnx2x_fp(bp, i, status_blk_mapping),
5890 sizeof(struct host_status_block) +
5891 sizeof(struct eth_tx_db_data));
5892
5893 bnx2x_fp(bp, i, hw_tx_prods) =
5894 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5895
5896 bnx2x_fp(bp, i, tx_prods_mapping) =
5897 bnx2x_fp(bp, i, status_blk_mapping) +
5898 sizeof(struct host_status_block);
5899
5900 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5901 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5902 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5903 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5904 &bnx2x_fp(bp, i, tx_desc_mapping),
5905 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5906
5907 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5908 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5909 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5910 &bnx2x_fp(bp, i, rx_desc_mapping),
5911 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5912
5913 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5914 &bnx2x_fp(bp, i, rx_comp_mapping),
5915 sizeof(struct eth_fast_path_rx_cqe) *
5916 NUM_RCQ_BD);
5917
7a9b2557
VZ
5918 /* SGE ring */
5919 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5920 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5921 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5922 &bnx2x_fp(bp, i, rx_sge_mapping),
5923 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5924 }
5925 /* end of fastpath */
5926
5927 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5928 sizeof(struct host_def_status_block));
5929
5930 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5931 sizeof(struct bnx2x_slowpath));
5932
5933#ifdef BCM_ISCSI
5934 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5935
5936 /* Initialize T1 */
5937 for (i = 0; i < 64*1024; i += 64) {
5938 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5939 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5940 }
5941
5942 /* allocate searcher T2 table
5943 we allocate 1/4 of alloc num for T2
5944 (which is not entered into the ILT) */
5945 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5946
5947 /* Initialize T2 */
5948 for (i = 0; i < 16*1024; i += 64)
5949 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5950
c14423fe 5951 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5952 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5953
5954 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5955 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5956
5957 /* QM queues (128*MAX_CONN) */
5958 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5959#endif
5960
5961 /* Slow path ring */
5962 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5963
5964 return 0;
5965
5966alloc_mem_err:
5967 bnx2x_free_mem(bp);
5968 return -ENOMEM;
5969
5970#undef BNX2X_PCI_ALLOC
5971#undef BNX2X_ALLOC
5972}
5973
5974static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5975{
5976 int i;
5977
5978 for_each_queue(bp, i) {
5979 struct bnx2x_fastpath *fp = &bp->fp[i];
5980
5981 u16 bd_cons = fp->tx_bd_cons;
5982 u16 sw_prod = fp->tx_pkt_prod;
5983 u16 sw_cons = fp->tx_pkt_cons;
5984
a2fbb9ea
ET
5985 while (sw_cons != sw_prod) {
5986 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5987 sw_cons++;
5988 }
5989 }
5990}
5991
5992static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5993{
5994 int i, j;
5995
5996 for_each_queue(bp, j) {
5997 struct bnx2x_fastpath *fp = &bp->fp[j];
5998
a2fbb9ea
ET
5999 for (i = 0; i < NUM_RX_BD; i++) {
6000 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6001 struct sk_buff *skb = rx_buf->skb;
6002
6003 if (skb == NULL)
6004 continue;
6005
6006 pci_unmap_single(bp->pdev,
6007 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6008 bp->rx_buf_size,
a2fbb9ea
ET
6009 PCI_DMA_FROMDEVICE);
6010
6011 rx_buf->skb = NULL;
6012 dev_kfree_skb(skb);
6013 }
7a9b2557 6014 if (!fp->disable_tpa)
32626230
EG
6015 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6016 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6017 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6018 }
6019}
6020
6021static void bnx2x_free_skbs(struct bnx2x *bp)
6022{
6023 bnx2x_free_tx_skbs(bp);
6024 bnx2x_free_rx_skbs(bp);
6025}
6026
6027static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6028{
34f80b04 6029 int i, offset = 1;
a2fbb9ea
ET
6030
6031 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6032 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6033 bp->msix_table[0].vector);
6034
6035 for_each_queue(bp, i) {
c14423fe 6036 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6037 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6038 bnx2x_fp(bp, i, state));
6039
228241eb
ET
6040 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6041 BNX2X_ERR("IRQ of fp #%d being freed while "
6042 "state != closed\n", i);
a2fbb9ea 6043
34f80b04 6044 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6045 }
a2fbb9ea
ET
6046}
6047
6048static void bnx2x_free_irq(struct bnx2x *bp)
6049{
a2fbb9ea 6050 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6051 bnx2x_free_msix_irqs(bp);
6052 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6053 bp->flags &= ~USING_MSIX_FLAG;
6054
6055 } else
6056 free_irq(bp->pdev->irq, bp->dev);
6057}
6058
6059static int bnx2x_enable_msix(struct bnx2x *bp)
6060{
34f80b04 6061 int i, rc, offset;
a2fbb9ea
ET
6062
6063 bp->msix_table[0].entry = 0;
34f80b04
EG
6064 offset = 1;
6065 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6066
34f80b04
EG
6067 for_each_queue(bp, i) {
6068 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6069
34f80b04
EG
6070 bp->msix_table[i + offset].entry = igu_vec;
6071 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6072 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6073 }
6074
34f80b04
EG
6075 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6076 bp->num_queues + offset);
6077 if (rc) {
6078 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6079 return -1;
6080 }
a2fbb9ea
ET
6081 bp->flags |= USING_MSIX_FLAG;
6082
6083 return 0;
a2fbb9ea
ET
6084}
6085
a2fbb9ea
ET
6086static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6087{
34f80b04 6088 int i, rc, offset = 1;
a2fbb9ea 6089
a2fbb9ea
ET
6090 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6091 bp->dev->name, bp->dev);
a2fbb9ea
ET
6092 if (rc) {
6093 BNX2X_ERR("request sp irq failed\n");
6094 return -EBUSY;
6095 }
6096
6097 for_each_queue(bp, i) {
34f80b04 6098 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6099 bnx2x_msix_fp_int, 0,
6100 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6101 if (rc) {
3196a88a
EG
6102 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6103 i + offset, -rc);
a2fbb9ea
ET
6104 bnx2x_free_msix_irqs(bp);
6105 return -EBUSY;
6106 }
6107
6108 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6109 }
6110
6111 return 0;
a2fbb9ea
ET
6112}
6113
6114static int bnx2x_req_irq(struct bnx2x *bp)
6115{
34f80b04 6116 int rc;
a2fbb9ea 6117
34f80b04
EG
6118 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6119 bp->dev->name, bp->dev);
a2fbb9ea
ET
6120 if (!rc)
6121 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6122
6123 return rc;
a2fbb9ea
ET
6124}
6125
65abd74d
YG
6126static void bnx2x_napi_enable(struct bnx2x *bp)
6127{
6128 int i;
6129
6130 for_each_queue(bp, i)
6131 napi_enable(&bnx2x_fp(bp, i, napi));
6132}
6133
6134static void bnx2x_napi_disable(struct bnx2x *bp)
6135{
6136 int i;
6137
6138 for_each_queue(bp, i)
6139 napi_disable(&bnx2x_fp(bp, i, napi));
6140}
6141
6142static void bnx2x_netif_start(struct bnx2x *bp)
6143{
6144 if (atomic_dec_and_test(&bp->intr_sem)) {
6145 if (netif_running(bp->dev)) {
6146 if (bp->state == BNX2X_STATE_OPEN)
6147 netif_wake_queue(bp->dev);
6148 bnx2x_napi_enable(bp);
6149 bnx2x_int_enable(bp);
6150 }
6151 }
6152}
6153
f8ef6e44 6154static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6155{
f8ef6e44 6156 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6157 bnx2x_napi_disable(bp);
65abd74d 6158 if (netif_running(bp->dev)) {
65abd74d
YG
6159 netif_tx_disable(bp->dev);
6160 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6161 }
6162}
6163
a2fbb9ea
ET
6164/*
6165 * Init service functions
6166 */
6167
3101c2bc 6168static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6169{
6170 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6171 int port = BP_PORT(bp);
a2fbb9ea
ET
6172
6173 /* CAM allocation
6174 * unicasts 0-31:port0 32-63:port1
6175 * multicast 64-127:port0 128-191:port1
6176 */
8d9c5f34 6177 config->hdr.length = 2;
af246401 6178 config->hdr.offset = port ? 32 : 0;
34f80b04 6179 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6180 config->hdr.reserved1 = 0;
6181
6182 /* primary MAC */
6183 config->config_table[0].cam_entry.msb_mac_addr =
6184 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6185 config->config_table[0].cam_entry.middle_mac_addr =
6186 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6187 config->config_table[0].cam_entry.lsb_mac_addr =
6188 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6189 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6190 if (set)
6191 config->config_table[0].target_table_entry.flags = 0;
6192 else
6193 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6194 config->config_table[0].target_table_entry.client_id = 0;
6195 config->config_table[0].target_table_entry.vlan_id = 0;
6196
3101c2bc
YG
6197 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6198 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6199 config->config_table[0].cam_entry.msb_mac_addr,
6200 config->config_table[0].cam_entry.middle_mac_addr,
6201 config->config_table[0].cam_entry.lsb_mac_addr);
6202
6203 /* broadcast */
6204 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6205 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6206 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6207 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6208 if (set)
6209 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6210 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6211 else
6212 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6213 config->config_table[1].target_table_entry.client_id = 0;
6214 config->config_table[1].target_table_entry.vlan_id = 0;
6215
6216 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6217 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6218 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6219}
6220
3101c2bc 6221static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6222{
6223 struct mac_configuration_cmd_e1h *config =
6224 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6225
3101c2bc 6226 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6227 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6228 return;
6229 }
6230
6231 /* CAM allocation for E1H
6232 * unicasts: by func number
6233 * multicast: 20+FUNC*20, 20 each
6234 */
8d9c5f34 6235 config->hdr.length = 1;
34f80b04
EG
6236 config->hdr.offset = BP_FUNC(bp);
6237 config->hdr.client_id = BP_CL_ID(bp);
6238 config->hdr.reserved1 = 0;
6239
6240 /* primary MAC */
6241 config->config_table[0].msb_mac_addr =
6242 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6243 config->config_table[0].middle_mac_addr =
6244 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6245 config->config_table[0].lsb_mac_addr =
6246 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6247 config->config_table[0].client_id = BP_L_ID(bp);
6248 config->config_table[0].vlan_id = 0;
6249 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6250 if (set)
6251 config->config_table[0].flags = BP_PORT(bp);
6252 else
6253 config->config_table[0].flags =
6254 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6255
3101c2bc
YG
6256 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6257 (set ? "setting" : "clearing"),
34f80b04
EG
6258 config->config_table[0].msb_mac_addr,
6259 config->config_table[0].middle_mac_addr,
6260 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6261
6262 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6263 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6264 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6265}
6266
a2fbb9ea
ET
6267static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6268 int *state_p, int poll)
6269{
6270 /* can take a while if any port is running */
34f80b04 6271 int cnt = 500;
a2fbb9ea 6272
c14423fe
ET
6273 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6274 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6275
6276 might_sleep();
34f80b04 6277 while (cnt--) {
a2fbb9ea
ET
6278 if (poll) {
6279 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6280 /* if index is different from 0
6281 * the reply for some commands will
3101c2bc 6282 * be on the non default queue
a2fbb9ea
ET
6283 */
6284 if (idx)
6285 bnx2x_rx_int(&bp->fp[idx], 10);
6286 }
a2fbb9ea 6287
3101c2bc 6288 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6289 if (*state_p == state)
a2fbb9ea
ET
6290 return 0;
6291
a2fbb9ea 6292 msleep(1);
a2fbb9ea
ET
6293 }
6294
a2fbb9ea 6295 /* timeout! */
49d66772
ET
6296 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6297 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6298#ifdef BNX2X_STOP_ON_ERROR
6299 bnx2x_panic();
6300#endif
a2fbb9ea 6301
49d66772 6302 return -EBUSY;
a2fbb9ea
ET
6303}
6304
6305static int bnx2x_setup_leading(struct bnx2x *bp)
6306{
34f80b04 6307 int rc;
a2fbb9ea 6308
c14423fe 6309 /* reset IGU state */
34f80b04 6310 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6311
6312 /* SETUP ramrod */
6313 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6314
34f80b04
EG
6315 /* Wait for completion */
6316 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6317
34f80b04 6318 return rc;
a2fbb9ea
ET
6319}
6320
6321static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6322{
a2fbb9ea 6323 /* reset IGU state */
34f80b04 6324 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6325
228241eb 6326 /* SETUP ramrod */
a2fbb9ea
ET
6327 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6328 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6329
6330 /* Wait for completion */
6331 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6332 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6333}
6334
a2fbb9ea
ET
6335static int bnx2x_poll(struct napi_struct *napi, int budget);
6336static void bnx2x_set_rx_mode(struct net_device *dev);
6337
34f80b04
EG
6338/* must be called with rtnl_lock */
6339static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6340{
228241eb 6341 u32 load_code;
2dfe0e1f 6342 int i, rc = 0;
34f80b04
EG
6343#ifdef BNX2X_STOP_ON_ERROR
6344 if (unlikely(bp->panic))
6345 return -EPERM;
6346#endif
a2fbb9ea
ET
6347
6348 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6349
34f80b04
EG
6350 if (use_inta) {
6351 bp->num_queues = 1;
6352
6353 } else {
6354 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6355 /* user requested number */
6356 bp->num_queues = use_multi;
6357
6358 else if (use_multi)
6359 bp->num_queues = min_t(u32, num_online_cpus(),
6360 BP_MAX_QUEUES(bp));
6361 else
a2fbb9ea 6362 bp->num_queues = 1;
34f80b04 6363
2dfe0e1f
EG
6364 DP(NETIF_MSG_IFUP,
6365 "set number of queues to %d\n", bp->num_queues);
6366
6367 /* if we can't use MSI-X we only need one fp,
6368 * so try to enable MSI-X with the requested number of fp's
6369 * and fallback to MSI or legacy INTx with one fp
6370 */
6371 rc = bnx2x_enable_msix(bp);
6372 if (rc) {
34f80b04
EG
6373 /* failed to enable MSI-X */
6374 bp->num_queues = 1;
6375 if (use_multi)
6376 BNX2X_ERR("Multi requested but failed"
6377 " to enable MSI-X\n");
a2fbb9ea
ET
6378 }
6379 }
c14423fe 6380
a2fbb9ea
ET
6381 if (bnx2x_alloc_mem(bp))
6382 return -ENOMEM;
6383
7a9b2557
VZ
6384 for_each_queue(bp, i)
6385 bnx2x_fp(bp, i, disable_tpa) =
6386 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6387
2dfe0e1f
EG
6388 for_each_queue(bp, i)
6389 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6390 bnx2x_poll, 128);
6391
6392#ifdef BNX2X_STOP_ON_ERROR
6393 for_each_queue(bp, i) {
6394 struct bnx2x_fastpath *fp = &bp->fp[i];
6395
6396 fp->poll_no_work = 0;
6397 fp->poll_calls = 0;
6398 fp->poll_max_calls = 0;
6399 fp->poll_complete = 0;
6400 fp->poll_exit = 0;
6401 }
6402#endif
6403 bnx2x_napi_enable(bp);
6404
34f80b04
EG
6405 if (bp->flags & USING_MSIX_FLAG) {
6406 rc = bnx2x_req_msix_irqs(bp);
6407 if (rc) {
6408 pci_disable_msix(bp->pdev);
2dfe0e1f 6409 goto load_error1;
34f80b04 6410 }
2dfe0e1f 6411 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
34f80b04
EG
6412 } else {
6413 bnx2x_ack_int(bp);
6414 rc = bnx2x_req_irq(bp);
6415 if (rc) {
2dfe0e1f
EG
6416 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6417 goto load_error1;
a2fbb9ea
ET
6418 }
6419 }
6420
2dfe0e1f
EG
6421 /* Send LOAD_REQUEST command to MCP
6422 Returns the type of LOAD command:
6423 if it is the first port to be initialized
6424 common blocks should be initialized, otherwise - not
6425 */
6426 if (!BP_NOMCP(bp)) {
6427 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6428 if (!load_code) {
6429 BNX2X_ERR("MCP response failure, aborting\n");
6430 rc = -EBUSY;
6431 goto load_error2;
6432 }
6433 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6434 rc = -EBUSY; /* other port in diagnostic mode */
6435 goto load_error2;
6436 }
6437
6438 } else {
6439 int port = BP_PORT(bp);
6440
6441 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6442 load_count[0], load_count[1], load_count[2]);
6443 load_count[0]++;
6444 load_count[1 + port]++;
6445 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6446 load_count[0], load_count[1], load_count[2]);
6447 if (load_count[0] == 1)
6448 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6449 else if (load_count[1 + port] == 1)
6450 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6451 else
6452 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6453 }
6454
6455 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6456 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6457 bp->port.pmf = 1;
6458 else
6459 bp->port.pmf = 0;
6460 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6461
a2fbb9ea 6462 /* Initialize HW */
34f80b04
EG
6463 rc = bnx2x_init_hw(bp, load_code);
6464 if (rc) {
a2fbb9ea 6465 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6466 goto load_error2;
a2fbb9ea
ET
6467 }
6468
a2fbb9ea 6469 /* Setup NIC internals and enable interrupts */
471de716 6470 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6471
6472 /* Send LOAD_DONE command to MCP */
34f80b04 6473 if (!BP_NOMCP(bp)) {
228241eb
ET
6474 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6475 if (!load_code) {
da5a662a 6476 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6477 rc = -EBUSY;
2dfe0e1f 6478 goto load_error3;
a2fbb9ea
ET
6479 }
6480 }
6481
6482 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6483
34f80b04
EG
6484 rc = bnx2x_setup_leading(bp);
6485 if (rc) {
da5a662a 6486 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6487 goto load_error3;
34f80b04 6488 }
a2fbb9ea 6489
34f80b04
EG
6490 if (CHIP_IS_E1H(bp))
6491 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6492 BNX2X_ERR("!!! mf_cfg function disabled\n");
6493 bp->state = BNX2X_STATE_DISABLED;
6494 }
a2fbb9ea 6495
34f80b04
EG
6496 if (bp->state == BNX2X_STATE_OPEN)
6497 for_each_nondefault_queue(bp, i) {
6498 rc = bnx2x_setup_multi(bp, i);
6499 if (rc)
2dfe0e1f 6500 goto load_error3;
34f80b04 6501 }
a2fbb9ea 6502
34f80b04 6503 if (CHIP_IS_E1(bp))
3101c2bc 6504 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6505 else
3101c2bc 6506 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6507
6508 if (bp->port.pmf)
6509 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6510
6511 /* Start fast path */
34f80b04
EG
6512 switch (load_mode) {
6513 case LOAD_NORMAL:
6514 /* Tx queue should be only reenabled */
6515 netif_wake_queue(bp->dev);
2dfe0e1f 6516 /* Initialize the receive filter. */
34f80b04
EG
6517 bnx2x_set_rx_mode(bp->dev);
6518 break;
6519
6520 case LOAD_OPEN:
a2fbb9ea 6521 netif_start_queue(bp->dev);
2dfe0e1f 6522 /* Initialize the receive filter. */
34f80b04 6523 bnx2x_set_rx_mode(bp->dev);
34f80b04 6524 break;
a2fbb9ea 6525
34f80b04 6526 case LOAD_DIAG:
2dfe0e1f 6527 /* Initialize the receive filter. */
a2fbb9ea 6528 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6529 bp->state = BNX2X_STATE_DIAG;
6530 break;
6531
6532 default:
6533 break;
a2fbb9ea
ET
6534 }
6535
34f80b04
EG
6536 if (!bp->port.pmf)
6537 bnx2x__link_status_update(bp);
6538
a2fbb9ea
ET
6539 /* start the timer */
6540 mod_timer(&bp->timer, jiffies + bp->current_interval);
6541
34f80b04 6542
a2fbb9ea
ET
6543 return 0;
6544
2dfe0e1f
EG
6545load_error3:
6546 bnx2x_int_disable_sync(bp, 1);
6547 if (!BP_NOMCP(bp)) {
6548 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6549 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6550 }
6551 bp->port.pmf = 0;
7a9b2557
VZ
6552 /* Free SKBs, SGEs, TPA pool and driver internals */
6553 bnx2x_free_skbs(bp);
6554 for_each_queue(bp, i)
3196a88a 6555 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6556load_error2:
d1014634
YG
6557 /* Release IRQs */
6558 bnx2x_free_irq(bp);
2dfe0e1f
EG
6559load_error1:
6560 bnx2x_napi_disable(bp);
7cde1c8b
EG
6561 for_each_queue(bp, i)
6562 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6563 bnx2x_free_mem(bp);
6564
6565 /* TBD we really need to reset the chip
6566 if we want to recover from this */
34f80b04 6567 return rc;
a2fbb9ea
ET
6568}
6569
6570static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6571{
a2fbb9ea
ET
6572 int rc;
6573
c14423fe 6574 /* halt the connection */
a2fbb9ea 6575 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6576 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6577
34f80b04 6578 /* Wait for completion */
a2fbb9ea 6579 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6580 &(bp->fp[index].state), 1);
c14423fe 6581 if (rc) /* timeout */
a2fbb9ea
ET
6582 return rc;
6583
6584 /* delete cfc entry */
6585 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6586
34f80b04
EG
6587 /* Wait for completion */
6588 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6589 &(bp->fp[index].state), 1);
6590 return rc;
a2fbb9ea
ET
6591}
6592
da5a662a 6593static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6594{
49d66772 6595 u16 dsb_sp_prod_idx;
c14423fe 6596 /* if the other port is handling traffic,
a2fbb9ea 6597 this can take a lot of time */
34f80b04
EG
6598 int cnt = 500;
6599 int rc;
a2fbb9ea
ET
6600
6601 might_sleep();
6602
6603 /* Send HALT ramrod */
6604 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6605 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6606
34f80b04
EG
6607 /* Wait for completion */
6608 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6609 &(bp->fp[0].state), 1);
6610 if (rc) /* timeout */
da5a662a 6611 return rc;
a2fbb9ea 6612
49d66772 6613 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6614
228241eb 6615 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6616 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6617
49d66772 6618 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6619 we are going to reset the chip anyway
6620 so there is not much to do if this times out
6621 */
34f80b04 6622 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6623 if (!cnt) {
6624 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6625 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6626 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6627#ifdef BNX2X_STOP_ON_ERROR
6628 bnx2x_panic();
da5a662a
VZ
6629#else
6630 rc = -EBUSY;
34f80b04
EG
6631#endif
6632 break;
6633 }
6634 cnt--;
da5a662a 6635 msleep(1);
5650d9d4 6636 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
6637 }
6638 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6639 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6640
6641 return rc;
a2fbb9ea
ET
6642}
6643
34f80b04
EG
6644static void bnx2x_reset_func(struct bnx2x *bp)
6645{
6646 int port = BP_PORT(bp);
6647 int func = BP_FUNC(bp);
6648 int base, i;
6649
6650 /* Configure IGU */
6651 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6652 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6653
6654 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6655
6656 /* Clear ILT */
6657 base = FUNC_ILT_BASE(func);
6658 for (i = base; i < base + ILT_PER_FUNC; i++)
6659 bnx2x_ilt_wr(bp, i, 0);
6660}
6661
6662static void bnx2x_reset_port(struct bnx2x *bp)
6663{
6664 int port = BP_PORT(bp);
6665 u32 val;
6666
6667 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6668
6669 /* Do not rcv packets to BRB */
6670 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6671 /* Do not direct rcv packets that are not for MCP to the BRB */
6672 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6673 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6674
6675 /* Configure AEU */
6676 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6677
6678 msleep(100);
6679 /* Check for BRB port occupancy */
6680 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6681 if (val)
6682 DP(NETIF_MSG_IFDOWN,
33471629 6683 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6684
6685 /* TODO: Close Doorbell port? */
6686}
6687
34f80b04
EG
6688static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6689{
6690 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6691 BP_FUNC(bp), reset_code);
6692
6693 switch (reset_code) {
6694 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6695 bnx2x_reset_port(bp);
6696 bnx2x_reset_func(bp);
6697 bnx2x_reset_common(bp);
6698 break;
6699
6700 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6701 bnx2x_reset_port(bp);
6702 bnx2x_reset_func(bp);
6703 break;
6704
6705 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6706 bnx2x_reset_func(bp);
6707 break;
49d66772 6708
34f80b04
EG
6709 default:
6710 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6711 break;
6712 }
6713}
6714
33471629 6715/* must be called with rtnl_lock */
34f80b04 6716static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6717{
da5a662a 6718 int port = BP_PORT(bp);
a2fbb9ea 6719 u32 reset_code = 0;
da5a662a 6720 int i, cnt, rc;
a2fbb9ea
ET
6721
6722 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6723
228241eb
ET
6724 bp->rx_mode = BNX2X_RX_MODE_NONE;
6725 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6726
f8ef6e44 6727 bnx2x_netif_stop(bp, 1);
e94d8af3 6728
34f80b04
EG
6729 del_timer_sync(&bp->timer);
6730 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6731 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6732 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6733
70b9986c
EG
6734 /* Release IRQs */
6735 bnx2x_free_irq(bp);
6736
da5a662a 6737 /* Wait until tx fast path tasks complete */
228241eb
ET
6738 for_each_queue(bp, i) {
6739 struct bnx2x_fastpath *fp = &bp->fp[i];
6740
34f80b04
EG
6741 cnt = 1000;
6742 smp_rmb();
e8b5fc51 6743 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6744
65abd74d 6745 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6746 if (!cnt) {
6747 BNX2X_ERR("timeout waiting for queue[%d]\n",
6748 i);
6749#ifdef BNX2X_STOP_ON_ERROR
6750 bnx2x_panic();
6751 return -EBUSY;
6752#else
6753 break;
6754#endif
6755 }
6756 cnt--;
da5a662a 6757 msleep(1);
34f80b04
EG
6758 smp_rmb();
6759 }
228241eb 6760 }
da5a662a
VZ
6761 /* Give HW time to discard old tx messages */
6762 msleep(1);
a2fbb9ea 6763
3101c2bc
YG
6764 if (CHIP_IS_E1(bp)) {
6765 struct mac_configuration_cmd *config =
6766 bnx2x_sp(bp, mcast_config);
6767
6768 bnx2x_set_mac_addr_e1(bp, 0);
6769
8d9c5f34 6770 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
6771 CAM_INVALIDATE(config->config_table[i]);
6772
8d9c5f34 6773 config->hdr.length = i;
3101c2bc
YG
6774 if (CHIP_REV_IS_SLOW(bp))
6775 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6776 else
6777 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6778 config->hdr.client_id = BP_CL_ID(bp);
6779 config->hdr.reserved1 = 0;
6780
6781 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6782 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6783 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6784
6785 } else { /* E1H */
65abd74d
YG
6786 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6787
3101c2bc
YG
6788 bnx2x_set_mac_addr_e1h(bp, 0);
6789
6790 for (i = 0; i < MC_HASH_SIZE; i++)
6791 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6792 }
6793
65abd74d
YG
6794 if (unload_mode == UNLOAD_NORMAL)
6795 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6796
6797 else if (bp->flags & NO_WOL_FLAG) {
6798 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6799 if (CHIP_IS_E1H(bp))
6800 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6801
6802 } else if (bp->wol) {
6803 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6804 u8 *mac_addr = bp->dev->dev_addr;
6805 u32 val;
6806 /* The mac address is written to entries 1-4 to
6807 preserve entry 0 which is used by the PMF */
6808 u8 entry = (BP_E1HVN(bp) + 1)*8;
6809
6810 val = (mac_addr[0] << 8) | mac_addr[1];
6811 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6812
6813 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6814 (mac_addr[4] << 8) | mac_addr[5];
6815 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6816
6817 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6818
6819 } else
6820 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6821
34f80b04
EG
6822 /* Close multi and leading connections
6823 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6824 for_each_nondefault_queue(bp, i)
6825 if (bnx2x_stop_multi(bp, i))
228241eb 6826 goto unload_error;
a2fbb9ea 6827
da5a662a
VZ
6828 rc = bnx2x_stop_leading(bp);
6829 if (rc) {
34f80b04 6830 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6831#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6832 return -EBUSY;
da5a662a
VZ
6833#else
6834 goto unload_error;
34f80b04 6835#endif
228241eb
ET
6836 }
6837
6838unload_error:
34f80b04 6839 if (!BP_NOMCP(bp))
228241eb 6840 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6841 else {
6842 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6843 load_count[0], load_count[1], load_count[2]);
6844 load_count[0]--;
da5a662a 6845 load_count[1 + port]--;
34f80b04
EG
6846 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6847 load_count[0], load_count[1], load_count[2]);
6848 if (load_count[0] == 0)
6849 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6850 else if (load_count[1 + port] == 0)
34f80b04
EG
6851 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6852 else
6853 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6854 }
a2fbb9ea 6855
34f80b04
EG
6856 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6857 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6858 bnx2x__link_reset(bp);
a2fbb9ea
ET
6859
6860 /* Reset the chip */
228241eb 6861 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6862
6863 /* Report UNLOAD_DONE to MCP */
34f80b04 6864 if (!BP_NOMCP(bp))
a2fbb9ea 6865 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6866 bp->port.pmf = 0;
a2fbb9ea 6867
7a9b2557 6868 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6869 bnx2x_free_skbs(bp);
7a9b2557 6870 for_each_queue(bp, i)
3196a88a 6871 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7cde1c8b
EG
6872 for_each_queue(bp, i)
6873 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6874 bnx2x_free_mem(bp);
6875
6876 bp->state = BNX2X_STATE_CLOSED;
228241eb 6877
a2fbb9ea
ET
6878 netif_carrier_off(bp->dev);
6879
6880 return 0;
6881}
6882
34f80b04
EG
6883static void bnx2x_reset_task(struct work_struct *work)
6884{
6885 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6886
6887#ifdef BNX2X_STOP_ON_ERROR
6888 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6889 " so reset not done to allow debug dump,\n"
6890 KERN_ERR " you will need to reboot when done\n");
6891 return;
6892#endif
6893
6894 rtnl_lock();
6895
6896 if (!netif_running(bp->dev))
6897 goto reset_task_exit;
6898
6899 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6900 bnx2x_nic_load(bp, LOAD_NORMAL);
6901
6902reset_task_exit:
6903 rtnl_unlock();
6904}
6905
a2fbb9ea
ET
6906/* end of nic load/unload */
6907
6908/* ethtool_ops */
6909
6910/*
6911 * Init service functions
6912 */
6913
34f80b04
EG
6914static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6915{
6916 u32 val;
6917
6918 /* Check if there is any driver already loaded */
6919 val = REG_RD(bp, MISC_REG_UNPREPARED);
6920 if (val == 0x1) {
6921 /* Check if it is the UNDI driver
6922 * UNDI driver initializes CID offset for normal bell to 0x7
6923 */
4a37fb66 6924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6925 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6926 if (val == 0x7) {
6927 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6928 /* save our func */
34f80b04 6929 int func = BP_FUNC(bp);
da5a662a
VZ
6930 u32 swap_en;
6931 u32 swap_val;
34f80b04 6932
b4661739
EG
6933 /* clear the UNDI indication */
6934 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6935
34f80b04
EG
6936 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6937
6938 /* try unload UNDI on port 0 */
6939 bp->func = 0;
da5a662a
VZ
6940 bp->fw_seq =
6941 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6942 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6943 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6944
6945 /* if UNDI is loaded on the other port */
6946 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6947
da5a662a
VZ
6948 /* send "DONE" for previous unload */
6949 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6950
6951 /* unload UNDI on port 1 */
34f80b04 6952 bp->func = 1;
da5a662a
VZ
6953 bp->fw_seq =
6954 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6955 DRV_MSG_SEQ_NUMBER_MASK);
6956 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6957
6958 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6959 }
6960
b4661739
EG
6961 /* now it's safe to release the lock */
6962 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6963
da5a662a
VZ
6964 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6965 HC_REG_CONFIG_0), 0x1000);
6966
6967 /* close input traffic and wait for it */
6968 /* Do not rcv packets to BRB */
6969 REG_WR(bp,
6970 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6971 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6972 /* Do not direct rcv packets that are not for MCP to
6973 * the BRB */
6974 REG_WR(bp,
6975 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6976 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6977 /* clear AEU */
6978 REG_WR(bp,
6979 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6980 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6981 msleep(10);
6982
6983 /* save NIG port swap info */
6984 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6985 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6986 /* reset device */
6987 REG_WR(bp,
6988 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6989 0xd3ffffff);
34f80b04
EG
6990 REG_WR(bp,
6991 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6992 0x1403);
da5a662a
VZ
6993 /* take the NIG out of reset and restore swap values */
6994 REG_WR(bp,
6995 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6996 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6997 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6998 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6999
7000 /* send unload done to the MCP */
7001 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7002
7003 /* restore our func and fw_seq */
7004 bp->func = func;
7005 bp->fw_seq =
7006 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7007 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7008
7009 } else
7010 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7011 }
7012}
7013
7014static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7015{
7016 u32 val, val2, val3, val4, id;
72ce58c3 7017 u16 pmc;
34f80b04
EG
7018
7019 /* Get the chip revision id and number. */
7020 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7021 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7022 id = ((val & 0xffff) << 16);
7023 val = REG_RD(bp, MISC_REG_CHIP_REV);
7024 id |= ((val & 0xf) << 12);
7025 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7026 id |= ((val & 0xff) << 4);
5a40e08e 7027 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7028 id |= (val & 0xf);
7029 bp->common.chip_id = id;
7030 bp->link_params.chip_id = bp->common.chip_id;
7031 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7032
7033 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7034 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7035 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7036 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7037 bp->common.flash_size, bp->common.flash_size);
7038
7039 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7040 bp->link_params.shmem_base = bp->common.shmem_base;
7041 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7042
7043 if (!bp->common.shmem_base ||
7044 (bp->common.shmem_base < 0xA0000) ||
7045 (bp->common.shmem_base >= 0xC0000)) {
7046 BNX2X_DEV_INFO("MCP not active\n");
7047 bp->flags |= NO_MCP_FLAG;
7048 return;
7049 }
7050
7051 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7052 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7053 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7054 BNX2X_ERR("BAD MCP validity signature\n");
7055
7056 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7057 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7058
7059 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7060 bp->common.hw_config, bp->common.board);
7061
7062 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7063 SHARED_HW_CFG_LED_MODE_MASK) >>
7064 SHARED_HW_CFG_LED_MODE_SHIFT);
7065
7066 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7067 bp->common.bc_ver = val;
7068 BNX2X_DEV_INFO("bc_ver %X\n", val);
7069 if (val < BNX2X_BC_VER) {
7070 /* for now only warn
7071 * later we might need to enforce this */
7072 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7073 " please upgrade BC\n", BNX2X_BC_VER, val);
7074 }
72ce58c3
EG
7075
7076 if (BP_E1HVN(bp) == 0) {
7077 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7078 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7079 } else {
7080 /* no WOL capability for E1HVN != 0 */
7081 bp->flags |= NO_WOL_FLAG;
7082 }
7083 BNX2X_DEV_INFO("%sWoL capable\n",
7084 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7085
7086 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7087 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7088 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7089 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7090
7091 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7092 val, val2, val3, val4);
7093}
7094
7095static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7096 u32 switch_cfg)
a2fbb9ea 7097{
34f80b04 7098 int port = BP_PORT(bp);
a2fbb9ea
ET
7099 u32 ext_phy_type;
7100
a2fbb9ea
ET
7101 switch (switch_cfg) {
7102 case SWITCH_CFG_1G:
7103 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7104
c18487ee
YR
7105 ext_phy_type =
7106 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7107 switch (ext_phy_type) {
7108 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7109 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7110 ext_phy_type);
7111
34f80b04
EG
7112 bp->port.supported |= (SUPPORTED_10baseT_Half |
7113 SUPPORTED_10baseT_Full |
7114 SUPPORTED_100baseT_Half |
7115 SUPPORTED_100baseT_Full |
7116 SUPPORTED_1000baseT_Full |
7117 SUPPORTED_2500baseX_Full |
7118 SUPPORTED_TP |
7119 SUPPORTED_FIBRE |
7120 SUPPORTED_Autoneg |
7121 SUPPORTED_Pause |
7122 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7123 break;
7124
7125 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7126 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7127 ext_phy_type);
7128
34f80b04
EG
7129 bp->port.supported |= (SUPPORTED_10baseT_Half |
7130 SUPPORTED_10baseT_Full |
7131 SUPPORTED_100baseT_Half |
7132 SUPPORTED_100baseT_Full |
7133 SUPPORTED_1000baseT_Full |
7134 SUPPORTED_TP |
7135 SUPPORTED_FIBRE |
7136 SUPPORTED_Autoneg |
7137 SUPPORTED_Pause |
7138 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7139 break;
7140
7141 default:
7142 BNX2X_ERR("NVRAM config error. "
7143 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7144 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7145 return;
7146 }
7147
34f80b04
EG
7148 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7149 port*0x10);
7150 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7151 break;
7152
7153 case SWITCH_CFG_10G:
7154 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7155
c18487ee
YR
7156 ext_phy_type =
7157 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7158 switch (ext_phy_type) {
7159 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7160 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7161 ext_phy_type);
7162
34f80b04
EG
7163 bp->port.supported |= (SUPPORTED_10baseT_Half |
7164 SUPPORTED_10baseT_Full |
7165 SUPPORTED_100baseT_Half |
7166 SUPPORTED_100baseT_Full |
7167 SUPPORTED_1000baseT_Full |
7168 SUPPORTED_2500baseX_Full |
7169 SUPPORTED_10000baseT_Full |
7170 SUPPORTED_TP |
7171 SUPPORTED_FIBRE |
7172 SUPPORTED_Autoneg |
7173 SUPPORTED_Pause |
7174 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7175 break;
7176
7177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7178 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7179 ext_phy_type);
f1410647 7180
34f80b04
EG
7181 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7182 SUPPORTED_FIBRE |
7183 SUPPORTED_Pause |
7184 SUPPORTED_Asym_Pause);
f1410647
ET
7185 break;
7186
a2fbb9ea 7187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7188 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7189 ext_phy_type);
7190
34f80b04
EG
7191 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7192 SUPPORTED_1000baseT_Full |
7193 SUPPORTED_FIBRE |
7194 SUPPORTED_Pause |
7195 SUPPORTED_Asym_Pause);
f1410647
ET
7196 break;
7197
7198 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7199 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7200 ext_phy_type);
7201
34f80b04
EG
7202 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7203 SUPPORTED_1000baseT_Full |
7204 SUPPORTED_FIBRE |
7205 SUPPORTED_Autoneg |
7206 SUPPORTED_Pause |
7207 SUPPORTED_Asym_Pause);
f1410647
ET
7208 break;
7209
c18487ee
YR
7210 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7211 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7212 ext_phy_type);
7213
34f80b04
EG
7214 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7215 SUPPORTED_2500baseX_Full |
7216 SUPPORTED_1000baseT_Full |
7217 SUPPORTED_FIBRE |
7218 SUPPORTED_Autoneg |
7219 SUPPORTED_Pause |
7220 SUPPORTED_Asym_Pause);
c18487ee
YR
7221 break;
7222
f1410647
ET
7223 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7224 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7225 ext_phy_type);
7226
34f80b04
EG
7227 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7228 SUPPORTED_TP |
7229 SUPPORTED_Autoneg |
7230 SUPPORTED_Pause |
7231 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7232 break;
7233
c18487ee
YR
7234 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7235 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7236 bp->link_params.ext_phy_config);
7237 break;
7238
a2fbb9ea
ET
7239 default:
7240 BNX2X_ERR("NVRAM config error. "
7241 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7242 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7243 return;
7244 }
7245
34f80b04
EG
7246 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7247 port*0x18);
7248 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7249
a2fbb9ea
ET
7250 break;
7251
7252 default:
7253 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7254 bp->port.link_config);
a2fbb9ea
ET
7255 return;
7256 }
34f80b04 7257 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7258
7259 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7260 if (!(bp->link_params.speed_cap_mask &
7261 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7262 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7263
c18487ee
YR
7264 if (!(bp->link_params.speed_cap_mask &
7265 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7266 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7267
c18487ee
YR
7268 if (!(bp->link_params.speed_cap_mask &
7269 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7270 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7271
c18487ee
YR
7272 if (!(bp->link_params.speed_cap_mask &
7273 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7274 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7275
c18487ee
YR
7276 if (!(bp->link_params.speed_cap_mask &
7277 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7278 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7279 SUPPORTED_1000baseT_Full);
a2fbb9ea 7280
c18487ee
YR
7281 if (!(bp->link_params.speed_cap_mask &
7282 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7283 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7284
c18487ee
YR
7285 if (!(bp->link_params.speed_cap_mask &
7286 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7287 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7288
34f80b04 7289 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7290}
7291
34f80b04 7292static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7293{
c18487ee 7294 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7295
34f80b04 7296 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7297 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7298 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7299 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7300 bp->port.advertising = bp->port.supported;
a2fbb9ea 7301 } else {
c18487ee
YR
7302 u32 ext_phy_type =
7303 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7304
7305 if ((ext_phy_type ==
7306 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7307 (ext_phy_type ==
7308 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7309 /* force 10G, no AN */
c18487ee 7310 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7311 bp->port.advertising =
a2fbb9ea
ET
7312 (ADVERTISED_10000baseT_Full |
7313 ADVERTISED_FIBRE);
7314 break;
7315 }
7316 BNX2X_ERR("NVRAM config error. "
7317 "Invalid link_config 0x%x"
7318 " Autoneg not supported\n",
34f80b04 7319 bp->port.link_config);
a2fbb9ea
ET
7320 return;
7321 }
7322 break;
7323
7324 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7325 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7326 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7327 bp->port.advertising = (ADVERTISED_10baseT_Full |
7328 ADVERTISED_TP);
a2fbb9ea
ET
7329 } else {
7330 BNX2X_ERR("NVRAM config error. "
7331 "Invalid link_config 0x%x"
7332 " speed_cap_mask 0x%x\n",
34f80b04 7333 bp->port.link_config,
c18487ee 7334 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7335 return;
7336 }
7337 break;
7338
7339 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7340 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7341 bp->link_params.req_line_speed = SPEED_10;
7342 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7343 bp->port.advertising = (ADVERTISED_10baseT_Half |
7344 ADVERTISED_TP);
a2fbb9ea
ET
7345 } else {
7346 BNX2X_ERR("NVRAM config error. "
7347 "Invalid link_config 0x%x"
7348 " speed_cap_mask 0x%x\n",
34f80b04 7349 bp->port.link_config,
c18487ee 7350 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7351 return;
7352 }
7353 break;
7354
7355 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7356 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7357 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7358 bp->port.advertising = (ADVERTISED_100baseT_Full |
7359 ADVERTISED_TP);
a2fbb9ea
ET
7360 } else {
7361 BNX2X_ERR("NVRAM config error. "
7362 "Invalid link_config 0x%x"
7363 " speed_cap_mask 0x%x\n",
34f80b04 7364 bp->port.link_config,
c18487ee 7365 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7366 return;
7367 }
7368 break;
7369
7370 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7371 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7372 bp->link_params.req_line_speed = SPEED_100;
7373 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7374 bp->port.advertising = (ADVERTISED_100baseT_Half |
7375 ADVERTISED_TP);
a2fbb9ea
ET
7376 } else {
7377 BNX2X_ERR("NVRAM config error. "
7378 "Invalid link_config 0x%x"
7379 " speed_cap_mask 0x%x\n",
34f80b04 7380 bp->port.link_config,
c18487ee 7381 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7382 return;
7383 }
7384 break;
7385
7386 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7387 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7388 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7389 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7390 ADVERTISED_TP);
a2fbb9ea
ET
7391 } else {
7392 BNX2X_ERR("NVRAM config error. "
7393 "Invalid link_config 0x%x"
7394 " speed_cap_mask 0x%x\n",
34f80b04 7395 bp->port.link_config,
c18487ee 7396 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7397 return;
7398 }
7399 break;
7400
7401 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7402 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7403 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7404 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7405 ADVERTISED_TP);
a2fbb9ea
ET
7406 } else {
7407 BNX2X_ERR("NVRAM config error. "
7408 "Invalid link_config 0x%x"
7409 " speed_cap_mask 0x%x\n",
34f80b04 7410 bp->port.link_config,
c18487ee 7411 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7412 return;
7413 }
7414 break;
7415
7416 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7417 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7418 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7419 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7420 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7421 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7422 ADVERTISED_FIBRE);
a2fbb9ea
ET
7423 } else {
7424 BNX2X_ERR("NVRAM config error. "
7425 "Invalid link_config 0x%x"
7426 " speed_cap_mask 0x%x\n",
34f80b04 7427 bp->port.link_config,
c18487ee 7428 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7429 return;
7430 }
7431 break;
7432
7433 default:
7434 BNX2X_ERR("NVRAM config error. "
7435 "BAD link speed link_config 0x%x\n",
34f80b04 7436 bp->port.link_config);
c18487ee 7437 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7438 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7439 break;
7440 }
a2fbb9ea 7441
34f80b04
EG
7442 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7443 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7444 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7445 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7446 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7447
c18487ee 7448 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7449 " advertising 0x%x\n",
c18487ee
YR
7450 bp->link_params.req_line_speed,
7451 bp->link_params.req_duplex,
34f80b04 7452 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7453}
7454
34f80b04 7455static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7456{
34f80b04
EG
7457 int port = BP_PORT(bp);
7458 u32 val, val2;
a2fbb9ea 7459
c18487ee 7460 bp->link_params.bp = bp;
34f80b04 7461 bp->link_params.port = port;
c18487ee 7462
c18487ee 7463 bp->link_params.serdes_config =
f1410647 7464 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7465 bp->link_params.lane_config =
a2fbb9ea 7466 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7467 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7468 SHMEM_RD(bp,
7469 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7470 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7471 SHMEM_RD(bp,
7472 dev_info.port_hw_config[port].speed_capability_mask);
7473
34f80b04 7474 bp->port.link_config =
a2fbb9ea
ET
7475 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7476
34f80b04
EG
7477 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7478 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7479 " link_config 0x%08x\n",
c18487ee
YR
7480 bp->link_params.serdes_config,
7481 bp->link_params.lane_config,
7482 bp->link_params.ext_phy_config,
34f80b04 7483 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7484
34f80b04 7485 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7486 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7487 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7488
7489 bnx2x_link_settings_requested(bp);
7490
7491 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7492 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7493 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7494 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7495 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7496 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7497 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7498 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7499 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7500 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7501}
7502
7503static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7504{
7505 int func = BP_FUNC(bp);
7506 u32 val, val2;
7507 int rc = 0;
a2fbb9ea 7508
34f80b04 7509 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7510
34f80b04
EG
7511 bp->e1hov = 0;
7512 bp->e1hmf = 0;
7513 if (CHIP_IS_E1H(bp)) {
7514 bp->mf_config =
7515 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7516
3196a88a
EG
7517 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7518 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7519 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7520
34f80b04
EG
7521 bp->e1hov = val;
7522 bp->e1hmf = 1;
7523 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7524 "(0x%04x)\n",
7525 func, bp->e1hov, bp->e1hov);
7526 } else {
7527 BNX2X_DEV_INFO("Single function mode\n");
7528 if (BP_E1HVN(bp)) {
7529 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7530 " aborting\n", func);
7531 rc = -EPERM;
7532 }
7533 }
7534 }
a2fbb9ea 7535
34f80b04
EG
7536 if (!BP_NOMCP(bp)) {
7537 bnx2x_get_port_hwinfo(bp);
7538
7539 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7540 DRV_MSG_SEQ_NUMBER_MASK);
7541 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7542 }
7543
7544 if (IS_E1HMF(bp)) {
7545 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7546 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7547 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7548 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7549 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7550 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7551 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7552 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7553 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7554 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7555 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7556 ETH_ALEN);
7557 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7558 ETH_ALEN);
a2fbb9ea 7559 }
34f80b04
EG
7560
7561 return rc;
a2fbb9ea
ET
7562 }
7563
34f80b04
EG
7564 if (BP_NOMCP(bp)) {
7565 /* only supposed to happen on emulation/FPGA */
33471629 7566 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7567 random_ether_addr(bp->dev->dev_addr);
7568 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7569 }
a2fbb9ea 7570
34f80b04
EG
7571 return rc;
7572}
7573
7574static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7575{
7576 int func = BP_FUNC(bp);
7577 int rc;
7578
da5a662a
VZ
7579 /* Disable interrupt handling until HW is initialized */
7580 atomic_set(&bp->intr_sem, 1);
7581
34f80b04 7582 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7583
1cf167f2 7584 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7585 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7586
7587 rc = bnx2x_get_hwinfo(bp);
7588
7589 /* need to reset chip if undi was active */
7590 if (!BP_NOMCP(bp))
7591 bnx2x_undi_unload(bp);
7592
7593 if (CHIP_REV_IS_FPGA(bp))
7594 printk(KERN_ERR PFX "FPGA detected\n");
7595
7596 if (BP_NOMCP(bp) && (func == 0))
7597 printk(KERN_ERR PFX
7598 "MCP disabled, must load devices in order!\n");
7599
7a9b2557
VZ
7600 /* Set TPA flags */
7601 if (disable_tpa) {
7602 bp->flags &= ~TPA_ENABLE_FLAG;
7603 bp->dev->features &= ~NETIF_F_LRO;
7604 } else {
7605 bp->flags |= TPA_ENABLE_FLAG;
7606 bp->dev->features |= NETIF_F_LRO;
7607 }
7608
7609
34f80b04
EG
7610 bp->tx_ring_size = MAX_TX_AVAIL;
7611 bp->rx_ring_size = MAX_RX_AVAIL;
7612
7613 bp->rx_csum = 1;
7614 bp->rx_offset = 0;
7615
7616 bp->tx_ticks = 50;
7617 bp->rx_ticks = 25;
7618
34f80b04
EG
7619 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7620 bp->current_interval = (poll ? poll : bp->timer_interval);
7621
7622 init_timer(&bp->timer);
7623 bp->timer.expires = jiffies + bp->current_interval;
7624 bp->timer.data = (unsigned long) bp;
7625 bp->timer.function = bnx2x_timer;
7626
7627 return rc;
a2fbb9ea
ET
7628}
7629
7630/*
7631 * ethtool service functions
7632 */
7633
7634/* All ethtool functions called with rtnl_lock */
7635
7636static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7637{
7638 struct bnx2x *bp = netdev_priv(dev);
7639
34f80b04
EG
7640 cmd->supported = bp->port.supported;
7641 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7642
7643 if (netif_carrier_ok(dev)) {
c18487ee
YR
7644 cmd->speed = bp->link_vars.line_speed;
7645 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7646 } else {
c18487ee
YR
7647 cmd->speed = bp->link_params.req_line_speed;
7648 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7649 }
34f80b04
EG
7650 if (IS_E1HMF(bp)) {
7651 u16 vn_max_rate;
7652
7653 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7654 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7655 if (vn_max_rate < cmd->speed)
7656 cmd->speed = vn_max_rate;
7657 }
a2fbb9ea 7658
c18487ee
YR
7659 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7660 u32 ext_phy_type =
7661 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7662
7663 switch (ext_phy_type) {
7664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7665 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7666 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7667 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7668 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7669 cmd->port = PORT_FIBRE;
7670 break;
7671
7672 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7673 cmd->port = PORT_TP;
7674 break;
7675
c18487ee
YR
7676 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7677 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7678 bp->link_params.ext_phy_config);
7679 break;
7680
f1410647
ET
7681 default:
7682 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7683 bp->link_params.ext_phy_config);
7684 break;
f1410647
ET
7685 }
7686 } else
a2fbb9ea 7687 cmd->port = PORT_TP;
a2fbb9ea 7688
34f80b04 7689 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7690 cmd->transceiver = XCVR_INTERNAL;
7691
c18487ee 7692 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7693 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7694 else
a2fbb9ea 7695 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7696
7697 cmd->maxtxpkt = 0;
7698 cmd->maxrxpkt = 0;
7699
7700 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7701 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7702 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7703 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7704 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7705 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7706 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7707
7708 return 0;
7709}
7710
7711static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7712{
7713 struct bnx2x *bp = netdev_priv(dev);
7714 u32 advertising;
7715
34f80b04
EG
7716 if (IS_E1HMF(bp))
7717 return 0;
7718
a2fbb9ea
ET
7719 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7720 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7721 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7722 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7723 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7724 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7725 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7726
a2fbb9ea 7727 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7728 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7729 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7730 return -EINVAL;
f1410647 7731 }
a2fbb9ea
ET
7732
7733 /* advertise the requested speed and duplex if supported */
34f80b04 7734 cmd->advertising &= bp->port.supported;
a2fbb9ea 7735
c18487ee
YR
7736 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7737 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7738 bp->port.advertising |= (ADVERTISED_Autoneg |
7739 cmd->advertising);
a2fbb9ea
ET
7740
7741 } else { /* forced speed */
7742 /* advertise the requested speed and duplex if supported */
7743 switch (cmd->speed) {
7744 case SPEED_10:
7745 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7746 if (!(bp->port.supported &
f1410647
ET
7747 SUPPORTED_10baseT_Full)) {
7748 DP(NETIF_MSG_LINK,
7749 "10M full not supported\n");
a2fbb9ea 7750 return -EINVAL;
f1410647 7751 }
a2fbb9ea
ET
7752
7753 advertising = (ADVERTISED_10baseT_Full |
7754 ADVERTISED_TP);
7755 } else {
34f80b04 7756 if (!(bp->port.supported &
f1410647
ET
7757 SUPPORTED_10baseT_Half)) {
7758 DP(NETIF_MSG_LINK,
7759 "10M half not supported\n");
a2fbb9ea 7760 return -EINVAL;
f1410647 7761 }
a2fbb9ea
ET
7762
7763 advertising = (ADVERTISED_10baseT_Half |
7764 ADVERTISED_TP);
7765 }
7766 break;
7767
7768 case SPEED_100:
7769 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7770 if (!(bp->port.supported &
f1410647
ET
7771 SUPPORTED_100baseT_Full)) {
7772 DP(NETIF_MSG_LINK,
7773 "100M full not supported\n");
a2fbb9ea 7774 return -EINVAL;
f1410647 7775 }
a2fbb9ea
ET
7776
7777 advertising = (ADVERTISED_100baseT_Full |
7778 ADVERTISED_TP);
7779 } else {
34f80b04 7780 if (!(bp->port.supported &
f1410647
ET
7781 SUPPORTED_100baseT_Half)) {
7782 DP(NETIF_MSG_LINK,
7783 "100M half not supported\n");
a2fbb9ea 7784 return -EINVAL;
f1410647 7785 }
a2fbb9ea
ET
7786
7787 advertising = (ADVERTISED_100baseT_Half |
7788 ADVERTISED_TP);
7789 }
7790 break;
7791
7792 case SPEED_1000:
f1410647
ET
7793 if (cmd->duplex != DUPLEX_FULL) {
7794 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7795 return -EINVAL;
f1410647 7796 }
a2fbb9ea 7797
34f80b04 7798 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7799 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7800 return -EINVAL;
f1410647 7801 }
a2fbb9ea
ET
7802
7803 advertising = (ADVERTISED_1000baseT_Full |
7804 ADVERTISED_TP);
7805 break;
7806
7807 case SPEED_2500:
f1410647
ET
7808 if (cmd->duplex != DUPLEX_FULL) {
7809 DP(NETIF_MSG_LINK,
7810 "2.5G half not supported\n");
a2fbb9ea 7811 return -EINVAL;
f1410647 7812 }
a2fbb9ea 7813
34f80b04 7814 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7815 DP(NETIF_MSG_LINK,
7816 "2.5G full not supported\n");
a2fbb9ea 7817 return -EINVAL;
f1410647 7818 }
a2fbb9ea 7819
f1410647 7820 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7821 ADVERTISED_TP);
7822 break;
7823
7824 case SPEED_10000:
f1410647
ET
7825 if (cmd->duplex != DUPLEX_FULL) {
7826 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7827 return -EINVAL;
f1410647 7828 }
a2fbb9ea 7829
34f80b04 7830 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7831 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7832 return -EINVAL;
f1410647 7833 }
a2fbb9ea
ET
7834
7835 advertising = (ADVERTISED_10000baseT_Full |
7836 ADVERTISED_FIBRE);
7837 break;
7838
7839 default:
f1410647 7840 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7841 return -EINVAL;
7842 }
7843
c18487ee
YR
7844 bp->link_params.req_line_speed = cmd->speed;
7845 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7846 bp->port.advertising = advertising;
a2fbb9ea
ET
7847 }
7848
c18487ee 7849 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7850 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7851 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7852 bp->port.advertising);
a2fbb9ea 7853
34f80b04 7854 if (netif_running(dev)) {
bb2a0f7a 7855 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7856 bnx2x_link_set(bp);
7857 }
a2fbb9ea
ET
7858
7859 return 0;
7860}
7861
c18487ee
YR
7862#define PHY_FW_VER_LEN 10
7863
a2fbb9ea
ET
7864static void bnx2x_get_drvinfo(struct net_device *dev,
7865 struct ethtool_drvinfo *info)
7866{
7867 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7868 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7869
7870 strcpy(info->driver, DRV_MODULE_NAME);
7871 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7872
7873 phy_fw_ver[0] = '\0';
34f80b04 7874 if (bp->port.pmf) {
4a37fb66 7875 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7876 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7877 (bp->state != BNX2X_STATE_CLOSED),
7878 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7879 bnx2x_release_phy_lock(bp);
34f80b04 7880 }
c18487ee 7881
f0e53a84
EG
7882 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7883 (bp->common.bc_ver & 0xff0000) >> 16,
7884 (bp->common.bc_ver & 0xff00) >> 8,
7885 (bp->common.bc_ver & 0xff),
7886 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7887 strcpy(info->bus_info, pci_name(bp->pdev));
7888 info->n_stats = BNX2X_NUM_STATS;
7889 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7890 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7891 info->regdump_len = 0;
7892}
7893
7894static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7895{
7896 struct bnx2x *bp = netdev_priv(dev);
7897
7898 if (bp->flags & NO_WOL_FLAG) {
7899 wol->supported = 0;
7900 wol->wolopts = 0;
7901 } else {
7902 wol->supported = WAKE_MAGIC;
7903 if (bp->wol)
7904 wol->wolopts = WAKE_MAGIC;
7905 else
7906 wol->wolopts = 0;
7907 }
7908 memset(&wol->sopass, 0, sizeof(wol->sopass));
7909}
7910
7911static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7912{
7913 struct bnx2x *bp = netdev_priv(dev);
7914
7915 if (wol->wolopts & ~WAKE_MAGIC)
7916 return -EINVAL;
7917
7918 if (wol->wolopts & WAKE_MAGIC) {
7919 if (bp->flags & NO_WOL_FLAG)
7920 return -EINVAL;
7921
7922 bp->wol = 1;
34f80b04 7923 } else
a2fbb9ea 7924 bp->wol = 0;
34f80b04 7925
a2fbb9ea
ET
7926 return 0;
7927}
7928
7929static u32 bnx2x_get_msglevel(struct net_device *dev)
7930{
7931 struct bnx2x *bp = netdev_priv(dev);
7932
7933 return bp->msglevel;
7934}
7935
7936static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7937{
7938 struct bnx2x *bp = netdev_priv(dev);
7939
7940 if (capable(CAP_NET_ADMIN))
7941 bp->msglevel = level;
7942}
7943
7944static int bnx2x_nway_reset(struct net_device *dev)
7945{
7946 struct bnx2x *bp = netdev_priv(dev);
7947
34f80b04
EG
7948 if (!bp->port.pmf)
7949 return 0;
a2fbb9ea 7950
34f80b04 7951 if (netif_running(dev)) {
bb2a0f7a 7952 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7953 bnx2x_link_set(bp);
7954 }
a2fbb9ea
ET
7955
7956 return 0;
7957}
7958
7959static int bnx2x_get_eeprom_len(struct net_device *dev)
7960{
7961 struct bnx2x *bp = netdev_priv(dev);
7962
34f80b04 7963 return bp->common.flash_size;
a2fbb9ea
ET
7964}
7965
7966static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7967{
34f80b04 7968 int port = BP_PORT(bp);
a2fbb9ea
ET
7969 int count, i;
7970 u32 val = 0;
7971
7972 /* adjust timeout for emulation/FPGA */
7973 count = NVRAM_TIMEOUT_COUNT;
7974 if (CHIP_REV_IS_SLOW(bp))
7975 count *= 100;
7976
7977 /* request access to nvram interface */
7978 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7979 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7980
7981 for (i = 0; i < count*10; i++) {
7982 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7983 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7984 break;
7985
7986 udelay(5);
7987 }
7988
7989 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7990 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7991 return -EBUSY;
7992 }
7993
7994 return 0;
7995}
7996
7997static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7998{
34f80b04 7999 int port = BP_PORT(bp);
a2fbb9ea
ET
8000 int count, i;
8001 u32 val = 0;
8002
8003 /* adjust timeout for emulation/FPGA */
8004 count = NVRAM_TIMEOUT_COUNT;
8005 if (CHIP_REV_IS_SLOW(bp))
8006 count *= 100;
8007
8008 /* relinquish nvram interface */
8009 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8010 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8011
8012 for (i = 0; i < count*10; i++) {
8013 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8014 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8015 break;
8016
8017 udelay(5);
8018 }
8019
8020 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8021 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8022 return -EBUSY;
8023 }
8024
8025 return 0;
8026}
8027
8028static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8029{
8030 u32 val;
8031
8032 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8033
8034 /* enable both bits, even on read */
8035 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8036 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8037 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8038}
8039
8040static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8041{
8042 u32 val;
8043
8044 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8045
8046 /* disable both bits, even after read */
8047 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8048 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8049 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8050}
8051
8052static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8053 u32 cmd_flags)
8054{
f1410647 8055 int count, i, rc;
a2fbb9ea
ET
8056 u32 val;
8057
8058 /* build the command word */
8059 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8060
8061 /* need to clear DONE bit separately */
8062 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8063
8064 /* address of the NVRAM to read from */
8065 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8066 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8067
8068 /* issue a read command */
8069 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8070
8071 /* adjust timeout for emulation/FPGA */
8072 count = NVRAM_TIMEOUT_COUNT;
8073 if (CHIP_REV_IS_SLOW(bp))
8074 count *= 100;
8075
8076 /* wait for completion */
8077 *ret_val = 0;
8078 rc = -EBUSY;
8079 for (i = 0; i < count; i++) {
8080 udelay(5);
8081 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8082
8083 if (val & MCPR_NVM_COMMAND_DONE) {
8084 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8085 /* we read nvram data in cpu order
8086 * but ethtool sees it as an array of bytes
8087 * converting to big-endian will do the work */
8088 val = cpu_to_be32(val);
8089 *ret_val = val;
8090 rc = 0;
8091 break;
8092 }
8093 }
8094
8095 return rc;
8096}
8097
8098static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8099 int buf_size)
8100{
8101 int rc;
8102 u32 cmd_flags;
8103 u32 val;
8104
8105 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8106 DP(BNX2X_MSG_NVM,
c14423fe 8107 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8108 offset, buf_size);
8109 return -EINVAL;
8110 }
8111
34f80b04
EG
8112 if (offset + buf_size > bp->common.flash_size) {
8113 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8114 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8115 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8116 return -EINVAL;
8117 }
8118
8119 /* request access to nvram interface */
8120 rc = bnx2x_acquire_nvram_lock(bp);
8121 if (rc)
8122 return rc;
8123
8124 /* enable access to nvram interface */
8125 bnx2x_enable_nvram_access(bp);
8126
8127 /* read the first word(s) */
8128 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8129 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8130 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8131 memcpy(ret_buf, &val, 4);
8132
8133 /* advance to the next dword */
8134 offset += sizeof(u32);
8135 ret_buf += sizeof(u32);
8136 buf_size -= sizeof(u32);
8137 cmd_flags = 0;
8138 }
8139
8140 if (rc == 0) {
8141 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8142 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8143 memcpy(ret_buf, &val, 4);
8144 }
8145
8146 /* disable access to nvram interface */
8147 bnx2x_disable_nvram_access(bp);
8148 bnx2x_release_nvram_lock(bp);
8149
8150 return rc;
8151}
8152
8153static int bnx2x_get_eeprom(struct net_device *dev,
8154 struct ethtool_eeprom *eeprom, u8 *eebuf)
8155{
8156 struct bnx2x *bp = netdev_priv(dev);
8157 int rc;
8158
2add3acb
EG
8159 if (!netif_running(dev))
8160 return -EAGAIN;
8161
34f80b04 8162 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8163 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8164 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8165 eeprom->len, eeprom->len);
8166
8167 /* parameters already validated in ethtool_get_eeprom */
8168
8169 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8170
8171 return rc;
8172}
8173
8174static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8175 u32 cmd_flags)
8176{
f1410647 8177 int count, i, rc;
a2fbb9ea
ET
8178
8179 /* build the command word */
8180 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8181
8182 /* need to clear DONE bit separately */
8183 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8184
8185 /* write the data */
8186 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8187
8188 /* address of the NVRAM to write to */
8189 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8190 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8191
8192 /* issue the write command */
8193 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8194
8195 /* adjust timeout for emulation/FPGA */
8196 count = NVRAM_TIMEOUT_COUNT;
8197 if (CHIP_REV_IS_SLOW(bp))
8198 count *= 100;
8199
8200 /* wait for completion */
8201 rc = -EBUSY;
8202 for (i = 0; i < count; i++) {
8203 udelay(5);
8204 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8205 if (val & MCPR_NVM_COMMAND_DONE) {
8206 rc = 0;
8207 break;
8208 }
8209 }
8210
8211 return rc;
8212}
8213
f1410647 8214#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8215
8216static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8217 int buf_size)
8218{
8219 int rc;
8220 u32 cmd_flags;
8221 u32 align_offset;
8222 u32 val;
8223
34f80b04
EG
8224 if (offset + buf_size > bp->common.flash_size) {
8225 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8226 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8227 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8228 return -EINVAL;
8229 }
8230
8231 /* request access to nvram interface */
8232 rc = bnx2x_acquire_nvram_lock(bp);
8233 if (rc)
8234 return rc;
8235
8236 /* enable access to nvram interface */
8237 bnx2x_enable_nvram_access(bp);
8238
8239 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8240 align_offset = (offset & ~0x03);
8241 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8242
8243 if (rc == 0) {
8244 val &= ~(0xff << BYTE_OFFSET(offset));
8245 val |= (*data_buf << BYTE_OFFSET(offset));
8246
8247 /* nvram data is returned as an array of bytes
8248 * convert it back to cpu order */
8249 val = be32_to_cpu(val);
8250
a2fbb9ea
ET
8251 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8252 cmd_flags);
8253 }
8254
8255 /* disable access to nvram interface */
8256 bnx2x_disable_nvram_access(bp);
8257 bnx2x_release_nvram_lock(bp);
8258
8259 return rc;
8260}
8261
8262static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8263 int buf_size)
8264{
8265 int rc;
8266 u32 cmd_flags;
8267 u32 val;
8268 u32 written_so_far;
8269
34f80b04 8270 if (buf_size == 1) /* ethtool */
a2fbb9ea 8271 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8272
8273 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8274 DP(BNX2X_MSG_NVM,
c14423fe 8275 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8276 offset, buf_size);
8277 return -EINVAL;
8278 }
8279
34f80b04
EG
8280 if (offset + buf_size > bp->common.flash_size) {
8281 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8282 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8283 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8284 return -EINVAL;
8285 }
8286
8287 /* request access to nvram interface */
8288 rc = bnx2x_acquire_nvram_lock(bp);
8289 if (rc)
8290 return rc;
8291
8292 /* enable access to nvram interface */
8293 bnx2x_enable_nvram_access(bp);
8294
8295 written_so_far = 0;
8296 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8297 while ((written_so_far < buf_size) && (rc == 0)) {
8298 if (written_so_far == (buf_size - sizeof(u32)))
8299 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8300 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8301 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8302 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8303 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8304
8305 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8306
8307 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8308
8309 /* advance to the next dword */
8310 offset += sizeof(u32);
8311 data_buf += sizeof(u32);
8312 written_so_far += sizeof(u32);
8313 cmd_flags = 0;
8314 }
8315
8316 /* disable access to nvram interface */
8317 bnx2x_disable_nvram_access(bp);
8318 bnx2x_release_nvram_lock(bp);
8319
8320 return rc;
8321}
8322
8323static int bnx2x_set_eeprom(struct net_device *dev,
8324 struct ethtool_eeprom *eeprom, u8 *eebuf)
8325{
8326 struct bnx2x *bp = netdev_priv(dev);
8327 int rc;
8328
9f4c9583
EG
8329 if (!netif_running(dev))
8330 return -EAGAIN;
8331
34f80b04 8332 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8333 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8334 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8335 eeprom->len, eeprom->len);
8336
8337 /* parameters already validated in ethtool_set_eeprom */
8338
c18487ee 8339 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8340 if (eeprom->magic == 0x00504859)
8341 if (bp->port.pmf) {
8342
4a37fb66 8343 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8344 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8345 bp->link_params.ext_phy_config,
8346 (bp->state != BNX2X_STATE_CLOSED),
8347 eebuf, eeprom->len);
bb2a0f7a
YG
8348 if ((bp->state == BNX2X_STATE_OPEN) ||
8349 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8350 rc |= bnx2x_link_reset(&bp->link_params,
8351 &bp->link_vars);
8352 rc |= bnx2x_phy_init(&bp->link_params,
8353 &bp->link_vars);
bb2a0f7a 8354 }
4a37fb66 8355 bnx2x_release_phy_lock(bp);
34f80b04
EG
8356
8357 } else /* Only the PMF can access the PHY */
8358 return -EINVAL;
8359 else
c18487ee 8360 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8361
8362 return rc;
8363}
8364
8365static int bnx2x_get_coalesce(struct net_device *dev,
8366 struct ethtool_coalesce *coal)
8367{
8368 struct bnx2x *bp = netdev_priv(dev);
8369
8370 memset(coal, 0, sizeof(struct ethtool_coalesce));
8371
8372 coal->rx_coalesce_usecs = bp->rx_ticks;
8373 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8374
8375 return 0;
8376}
8377
8378static int bnx2x_set_coalesce(struct net_device *dev,
8379 struct ethtool_coalesce *coal)
8380{
8381 struct bnx2x *bp = netdev_priv(dev);
8382
8383 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8384 if (bp->rx_ticks > 3000)
8385 bp->rx_ticks = 3000;
8386
8387 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8388 if (bp->tx_ticks > 0x3000)
8389 bp->tx_ticks = 0x3000;
8390
34f80b04 8391 if (netif_running(dev))
a2fbb9ea
ET
8392 bnx2x_update_coalesce(bp);
8393
8394 return 0;
8395}
8396
8397static void bnx2x_get_ringparam(struct net_device *dev,
8398 struct ethtool_ringparam *ering)
8399{
8400 struct bnx2x *bp = netdev_priv(dev);
8401
8402 ering->rx_max_pending = MAX_RX_AVAIL;
8403 ering->rx_mini_max_pending = 0;
8404 ering->rx_jumbo_max_pending = 0;
8405
8406 ering->rx_pending = bp->rx_ring_size;
8407 ering->rx_mini_pending = 0;
8408 ering->rx_jumbo_pending = 0;
8409
8410 ering->tx_max_pending = MAX_TX_AVAIL;
8411 ering->tx_pending = bp->tx_ring_size;
8412}
8413
8414static int bnx2x_set_ringparam(struct net_device *dev,
8415 struct ethtool_ringparam *ering)
8416{
8417 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8418 int rc = 0;
a2fbb9ea
ET
8419
8420 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8421 (ering->tx_pending > MAX_TX_AVAIL) ||
8422 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8423 return -EINVAL;
8424
8425 bp->rx_ring_size = ering->rx_pending;
8426 bp->tx_ring_size = ering->tx_pending;
8427
34f80b04
EG
8428 if (netif_running(dev)) {
8429 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8430 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8431 }
8432
34f80b04 8433 return rc;
a2fbb9ea
ET
8434}
8435
8436static void bnx2x_get_pauseparam(struct net_device *dev,
8437 struct ethtool_pauseparam *epause)
8438{
8439 struct bnx2x *bp = netdev_priv(dev);
8440
c0700f90 8441 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8442 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8443
c0700f90
DM
8444 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8445 BNX2X_FLOW_CTRL_RX);
8446 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8447 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8448
8449 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8450 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8451 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8452}
8453
8454static int bnx2x_set_pauseparam(struct net_device *dev,
8455 struct ethtool_pauseparam *epause)
8456{
8457 struct bnx2x *bp = netdev_priv(dev);
8458
34f80b04
EG
8459 if (IS_E1HMF(bp))
8460 return 0;
8461
a2fbb9ea
ET
8462 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8463 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8464 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8465
c0700f90 8466 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8467
f1410647 8468 if (epause->rx_pause)
c0700f90 8469 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8470
f1410647 8471 if (epause->tx_pause)
c0700f90 8472 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8473
c0700f90
DM
8474 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8475 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8476
c18487ee 8477 if (epause->autoneg) {
34f80b04 8478 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8479 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8480 return -EINVAL;
8481 }
a2fbb9ea 8482
c18487ee 8483 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8484 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8485 }
a2fbb9ea 8486
c18487ee
YR
8487 DP(NETIF_MSG_LINK,
8488 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8489
8490 if (netif_running(dev)) {
bb2a0f7a 8491 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8492 bnx2x_link_set(bp);
8493 }
a2fbb9ea
ET
8494
8495 return 0;
8496}
8497
df0f2343
VZ
8498static int bnx2x_set_flags(struct net_device *dev, u32 data)
8499{
8500 struct bnx2x *bp = netdev_priv(dev);
8501 int changed = 0;
8502 int rc = 0;
8503
8504 /* TPA requires Rx CSUM offloading */
8505 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8506 if (!(dev->features & NETIF_F_LRO)) {
8507 dev->features |= NETIF_F_LRO;
8508 bp->flags |= TPA_ENABLE_FLAG;
8509 changed = 1;
8510 }
8511
8512 } else if (dev->features & NETIF_F_LRO) {
8513 dev->features &= ~NETIF_F_LRO;
8514 bp->flags &= ~TPA_ENABLE_FLAG;
8515 changed = 1;
8516 }
8517
8518 if (changed && netif_running(dev)) {
8519 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8520 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8521 }
8522
8523 return rc;
8524}
8525
a2fbb9ea
ET
8526static u32 bnx2x_get_rx_csum(struct net_device *dev)
8527{
8528 struct bnx2x *bp = netdev_priv(dev);
8529
8530 return bp->rx_csum;
8531}
8532
8533static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8534{
8535 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8536 int rc = 0;
a2fbb9ea
ET
8537
8538 bp->rx_csum = data;
df0f2343
VZ
8539
8540 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8541 TPA'ed packets will be discarded due to wrong TCP CSUM */
8542 if (!data) {
8543 u32 flags = ethtool_op_get_flags(dev);
8544
8545 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8546 }
8547
8548 return rc;
a2fbb9ea
ET
8549}
8550
8551static int bnx2x_set_tso(struct net_device *dev, u32 data)
8552{
755735eb 8553 if (data) {
a2fbb9ea 8554 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8555 dev->features |= NETIF_F_TSO6;
8556 } else {
a2fbb9ea 8557 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8558 dev->features &= ~NETIF_F_TSO6;
8559 }
8560
a2fbb9ea
ET
8561 return 0;
8562}
8563
f3c87cdd 8564static const struct {
a2fbb9ea
ET
8565 char string[ETH_GSTRING_LEN];
8566} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8567 { "register_test (offline)" },
8568 { "memory_test (offline)" },
8569 { "loopback_test (offline)" },
8570 { "nvram_test (online)" },
8571 { "interrupt_test (online)" },
8572 { "link_test (online)" },
8573 { "idle check (online)" },
8574 { "MC errors (online)" }
a2fbb9ea
ET
8575};
8576
8577static int bnx2x_self_test_count(struct net_device *dev)
8578{
8579 return BNX2X_NUM_TESTS;
8580}
8581
f3c87cdd
YG
8582static int bnx2x_test_registers(struct bnx2x *bp)
8583{
8584 int idx, i, rc = -ENODEV;
8585 u32 wr_val = 0;
9dabc424 8586 int port = BP_PORT(bp);
f3c87cdd
YG
8587 static const struct {
8588 u32 offset0;
8589 u32 offset1;
8590 u32 mask;
8591 } reg_tbl[] = {
8592/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8593 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8594 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8595 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8596 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8597 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8598 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8599 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8600 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8601 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8602/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8603 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8604 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8605 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8606 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8607 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8608 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8609 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8610 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8611 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8612/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8613 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8614 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8615 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8616 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8617 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8618 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8619 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8620 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8621 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8622/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8623 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8624 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8625 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8626 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8627 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8628 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8629 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8630
8631 { 0xffffffff, 0, 0x00000000 }
8632 };
8633
8634 if (!netif_running(bp->dev))
8635 return rc;
8636
8637 /* Repeat the test twice:
8638 First by writing 0x00000000, second by writing 0xffffffff */
8639 for (idx = 0; idx < 2; idx++) {
8640
8641 switch (idx) {
8642 case 0:
8643 wr_val = 0;
8644 break;
8645 case 1:
8646 wr_val = 0xffffffff;
8647 break;
8648 }
8649
8650 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8651 u32 offset, mask, save_val, val;
f3c87cdd
YG
8652
8653 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8654 mask = reg_tbl[i].mask;
8655
8656 save_val = REG_RD(bp, offset);
8657
8658 REG_WR(bp, offset, wr_val);
8659 val = REG_RD(bp, offset);
8660
8661 /* Restore the original register's value */
8662 REG_WR(bp, offset, save_val);
8663
8664 /* verify that value is as expected value */
8665 if ((val & mask) != (wr_val & mask))
8666 goto test_reg_exit;
8667 }
8668 }
8669
8670 rc = 0;
8671
8672test_reg_exit:
8673 return rc;
8674}
8675
8676static int bnx2x_test_memory(struct bnx2x *bp)
8677{
8678 int i, j, rc = -ENODEV;
8679 u32 val;
8680 static const struct {
8681 u32 offset;
8682 int size;
8683 } mem_tbl[] = {
8684 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8685 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8686 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8687 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8688 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8689 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8690 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8691
8692 { 0xffffffff, 0 }
8693 };
8694 static const struct {
8695 char *name;
8696 u32 offset;
9dabc424
YG
8697 u32 e1_mask;
8698 u32 e1h_mask;
f3c87cdd 8699 } prty_tbl[] = {
9dabc424
YG
8700 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8701 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8702 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8703 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8704 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8705 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8706
8707 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8708 };
8709
8710 if (!netif_running(bp->dev))
8711 return rc;
8712
8713 /* Go through all the memories */
8714 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8715 for (j = 0; j < mem_tbl[i].size; j++)
8716 REG_RD(bp, mem_tbl[i].offset + j*4);
8717
8718 /* Check the parity status */
8719 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8720 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8721 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8722 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8723 DP(NETIF_MSG_HW,
8724 "%s is 0x%x\n", prty_tbl[i].name, val);
8725 goto test_mem_exit;
8726 }
8727 }
8728
8729 rc = 0;
8730
8731test_mem_exit:
8732 return rc;
8733}
8734
f3c87cdd
YG
8735static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8736{
8737 int cnt = 1000;
8738
8739 if (link_up)
8740 while (bnx2x_link_test(bp) && cnt--)
8741 msleep(10);
8742}
8743
8744static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8745{
8746 unsigned int pkt_size, num_pkts, i;
8747 struct sk_buff *skb;
8748 unsigned char *packet;
8749 struct bnx2x_fastpath *fp = &bp->fp[0];
8750 u16 tx_start_idx, tx_idx;
8751 u16 rx_start_idx, rx_idx;
8752 u16 pkt_prod;
8753 struct sw_tx_bd *tx_buf;
8754 struct eth_tx_bd *tx_bd;
8755 dma_addr_t mapping;
8756 union eth_rx_cqe *cqe;
8757 u8 cqe_fp_flags;
8758 struct sw_rx_bd *rx_buf;
8759 u16 len;
8760 int rc = -ENODEV;
8761
8762 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8763 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 8764 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
8765
8766 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 8767 u16 cnt = 1000;
f3c87cdd 8768 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 8769 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 8770 /* wait until link state is restored */
3910c8ae
EG
8771 if (link_up)
8772 while (cnt-- && bnx2x_test_link(&bp->link_params,
8773 &bp->link_vars))
8774 msleep(10);
f3c87cdd
YG
8775 } else
8776 return -EINVAL;
8777
8778 pkt_size = 1514;
8779 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8780 if (!skb) {
8781 rc = -ENOMEM;
8782 goto test_loopback_exit;
8783 }
8784 packet = skb_put(skb, pkt_size);
8785 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8786 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8787 for (i = ETH_HLEN; i < pkt_size; i++)
8788 packet[i] = (unsigned char) (i & 0xff);
8789
8790 num_pkts = 0;
8791 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8792 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8793
8794 pkt_prod = fp->tx_pkt_prod++;
8795 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8796 tx_buf->first_bd = fp->tx_bd_prod;
8797 tx_buf->skb = skb;
8798
8799 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8800 mapping = pci_map_single(bp->pdev, skb->data,
8801 skb_headlen(skb), PCI_DMA_TODEVICE);
8802 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8803 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8804 tx_bd->nbd = cpu_to_le16(1);
8805 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8806 tx_bd->vlan = cpu_to_le16(pkt_prod);
8807 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8808 ETH_TX_BD_FLAGS_END_BD);
8809 tx_bd->general_data = ((UNICAST_ADDRESS <<
8810 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8811
58f4c4cf
EG
8812 wmb();
8813
f3c87cdd
YG
8814 fp->hw_tx_prods->bds_prod =
8815 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8816 mb(); /* FW restriction: must not reorder writing nbd and packets */
8817 fp->hw_tx_prods->packets_prod =
8818 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8819 DOORBELL(bp, FP_IDX(fp), 0);
8820
8821 mmiowb();
8822
8823 num_pkts++;
8824 fp->tx_bd_prod++;
8825 bp->dev->trans_start = jiffies;
8826
8827 udelay(100);
8828
8829 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8830 if (tx_idx != tx_start_idx + num_pkts)
8831 goto test_loopback_exit;
8832
8833 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8834 if (rx_idx != rx_start_idx + num_pkts)
8835 goto test_loopback_exit;
8836
8837 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8838 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8839 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8840 goto test_loopback_rx_exit;
8841
8842 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8843 if (len != pkt_size)
8844 goto test_loopback_rx_exit;
8845
8846 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8847 skb = rx_buf->skb;
8848 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8849 for (i = ETH_HLEN; i < pkt_size; i++)
8850 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8851 goto test_loopback_rx_exit;
8852
8853 rc = 0;
8854
8855test_loopback_rx_exit:
f3c87cdd
YG
8856
8857 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8858 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8859 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8860 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8861
8862 /* Update producers */
8863 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8864 fp->rx_sge_prod);
f3c87cdd
YG
8865
8866test_loopback_exit:
8867 bp->link_params.loopback_mode = LOOPBACK_NONE;
8868
8869 return rc;
8870}
8871
8872static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8873{
8874 int rc = 0;
8875
8876 if (!netif_running(bp->dev))
8877 return BNX2X_LOOPBACK_FAILED;
8878
f8ef6e44 8879 bnx2x_netif_stop(bp, 1);
3910c8ae 8880 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
8881
8882 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8883 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8884 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8885 }
8886
8887 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8888 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8889 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8890 }
8891
3910c8ae 8892 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8893 bnx2x_netif_start(bp);
8894
8895 return rc;
8896}
8897
8898#define CRC32_RESIDUAL 0xdebb20e3
8899
8900static int bnx2x_test_nvram(struct bnx2x *bp)
8901{
8902 static const struct {
8903 int offset;
8904 int size;
8905 } nvram_tbl[] = {
8906 { 0, 0x14 }, /* bootstrap */
8907 { 0x14, 0xec }, /* dir */
8908 { 0x100, 0x350 }, /* manuf_info */
8909 { 0x450, 0xf0 }, /* feature_info */
8910 { 0x640, 0x64 }, /* upgrade_key_info */
8911 { 0x6a4, 0x64 },
8912 { 0x708, 0x70 }, /* manuf_key_info */
8913 { 0x778, 0x70 },
8914 { 0, 0 }
8915 };
8916 u32 buf[0x350 / 4];
8917 u8 *data = (u8 *)buf;
8918 int i, rc;
8919 u32 magic, csum;
8920
8921 rc = bnx2x_nvram_read(bp, 0, data, 4);
8922 if (rc) {
8923 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8924 goto test_nvram_exit;
8925 }
8926
8927 magic = be32_to_cpu(buf[0]);
8928 if (magic != 0x669955aa) {
8929 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8930 rc = -ENODEV;
8931 goto test_nvram_exit;
8932 }
8933
8934 for (i = 0; nvram_tbl[i].size; i++) {
8935
8936 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8937 nvram_tbl[i].size);
8938 if (rc) {
8939 DP(NETIF_MSG_PROBE,
8940 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8941 goto test_nvram_exit;
8942 }
8943
8944 csum = ether_crc_le(nvram_tbl[i].size, data);
8945 if (csum != CRC32_RESIDUAL) {
8946 DP(NETIF_MSG_PROBE,
8947 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8948 rc = -ENODEV;
8949 goto test_nvram_exit;
8950 }
8951 }
8952
8953test_nvram_exit:
8954 return rc;
8955}
8956
8957static int bnx2x_test_intr(struct bnx2x *bp)
8958{
8959 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8960 int i, rc;
8961
8962 if (!netif_running(bp->dev))
8963 return -ENODEV;
8964
8d9c5f34 8965 config->hdr.length = 0;
af246401
EG
8966 if (CHIP_IS_E1(bp))
8967 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8968 else
8969 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
8970 config->hdr.client_id = BP_CL_ID(bp);
8971 config->hdr.reserved1 = 0;
8972
8973 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8974 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8975 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8976 if (rc == 0) {
8977 bp->set_mac_pending++;
8978 for (i = 0; i < 10; i++) {
8979 if (!bp->set_mac_pending)
8980 break;
8981 msleep_interruptible(10);
8982 }
8983 if (i == 10)
8984 rc = -ENODEV;
8985 }
8986
8987 return rc;
8988}
8989
a2fbb9ea
ET
8990static void bnx2x_self_test(struct net_device *dev,
8991 struct ethtool_test *etest, u64 *buf)
8992{
8993 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8994
8995 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8996
f3c87cdd 8997 if (!netif_running(dev))
a2fbb9ea 8998 return;
a2fbb9ea 8999
33471629 9000 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9001 if (IS_E1HMF(bp))
9002 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9003
9004 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9005 u8 link_up;
9006
9007 link_up = bp->link_vars.link_up;
9008 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9009 bnx2x_nic_load(bp, LOAD_DIAG);
9010 /* wait until link state is restored */
9011 bnx2x_wait_for_link(bp, link_up);
9012
9013 if (bnx2x_test_registers(bp) != 0) {
9014 buf[0] = 1;
9015 etest->flags |= ETH_TEST_FL_FAILED;
9016 }
9017 if (bnx2x_test_memory(bp) != 0) {
9018 buf[1] = 1;
9019 etest->flags |= ETH_TEST_FL_FAILED;
9020 }
9021 buf[2] = bnx2x_test_loopback(bp, link_up);
9022 if (buf[2] != 0)
9023 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9024
f3c87cdd
YG
9025 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9026 bnx2x_nic_load(bp, LOAD_NORMAL);
9027 /* wait until link state is restored */
9028 bnx2x_wait_for_link(bp, link_up);
9029 }
9030 if (bnx2x_test_nvram(bp) != 0) {
9031 buf[3] = 1;
a2fbb9ea
ET
9032 etest->flags |= ETH_TEST_FL_FAILED;
9033 }
f3c87cdd
YG
9034 if (bnx2x_test_intr(bp) != 0) {
9035 buf[4] = 1;
9036 etest->flags |= ETH_TEST_FL_FAILED;
9037 }
9038 if (bp->port.pmf)
9039 if (bnx2x_link_test(bp) != 0) {
9040 buf[5] = 1;
9041 etest->flags |= ETH_TEST_FL_FAILED;
9042 }
9043 buf[7] = bnx2x_mc_assert(bp);
9044 if (buf[7] != 0)
9045 etest->flags |= ETH_TEST_FL_FAILED;
9046
9047#ifdef BNX2X_EXTRA_DEBUG
9048 bnx2x_panic_dump(bp);
9049#endif
a2fbb9ea
ET
9050}
9051
bb2a0f7a
YG
9052static const struct {
9053 long offset;
9054 int size;
9055 u32 flags;
66e855f3
YG
9056#define STATS_FLAGS_PORT 1
9057#define STATS_FLAGS_FUNC 2
9058 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9059} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
9060/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9061 8, STATS_FLAGS_FUNC, "rx_bytes" },
9062 { STATS_OFFSET32(error_bytes_received_hi),
9063 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9064 { STATS_OFFSET32(total_bytes_transmitted_hi),
9065 8, STATS_FLAGS_FUNC, "tx_bytes" },
9066 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9067 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 9068 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 9069 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 9070 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 9071 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 9072 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 9073 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9074 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9075 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9076 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9077 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9078/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9079 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9080 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9081 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9082 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9083 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9084 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9085 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9086 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9087 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9088 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9089 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9090 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9091 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9092 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9093 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9094 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9095 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9096 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9097 8, STATS_FLAGS_PORT, "rx_fragments" },
9098/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9099 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9100 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9101 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9102 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9103 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9104 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9105 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9106 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9107 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9108 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9109 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9110 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9111 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9112 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9113 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9114 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9115 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9116 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9117 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9118/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9119 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9120 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9121 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9122 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9123 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9124 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9125 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9126 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9127 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9128 { STATS_OFFSET32(mac_filter_discard),
9129 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9130 { STATS_OFFSET32(no_buff_discard),
9131 4, STATS_FLAGS_FUNC, "rx_discards" },
9132 { STATS_OFFSET32(xxoverflow_discard),
9133 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9134 { STATS_OFFSET32(brb_drop_hi),
9135 8, STATS_FLAGS_PORT, "brb_discard" },
9136 { STATS_OFFSET32(brb_truncate_hi),
9137 8, STATS_FLAGS_PORT, "brb_truncate" },
9138/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9139 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9140 { STATS_OFFSET32(rx_skb_alloc_failed),
9141 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9142/* 42 */{ STATS_OFFSET32(hw_csum_err),
9143 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9144};
9145
66e855f3
YG
9146#define IS_NOT_E1HMF_STAT(bp, i) \
9147 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9148
a2fbb9ea
ET
9149static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9150{
bb2a0f7a
YG
9151 struct bnx2x *bp = netdev_priv(dev);
9152 int i, j;
9153
a2fbb9ea
ET
9154 switch (stringset) {
9155 case ETH_SS_STATS:
bb2a0f7a 9156 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9157 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9158 continue;
9159 strcpy(buf + j*ETH_GSTRING_LEN,
9160 bnx2x_stats_arr[i].string);
9161 j++;
9162 }
a2fbb9ea
ET
9163 break;
9164
9165 case ETH_SS_TEST:
9166 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9167 break;
9168 }
9169}
9170
9171static int bnx2x_get_stats_count(struct net_device *dev)
9172{
bb2a0f7a
YG
9173 struct bnx2x *bp = netdev_priv(dev);
9174 int i, num_stats = 0;
9175
9176 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9177 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9178 continue;
9179 num_stats++;
9180 }
9181 return num_stats;
a2fbb9ea
ET
9182}
9183
9184static void bnx2x_get_ethtool_stats(struct net_device *dev,
9185 struct ethtool_stats *stats, u64 *buf)
9186{
9187 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9188 u32 *hw_stats = (u32 *)&bp->eth_stats;
9189 int i, j;
a2fbb9ea 9190
bb2a0f7a 9191 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9192 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9193 continue;
bb2a0f7a
YG
9194
9195 if (bnx2x_stats_arr[i].size == 0) {
9196 /* skip this counter */
9197 buf[j] = 0;
9198 j++;
a2fbb9ea
ET
9199 continue;
9200 }
bb2a0f7a 9201 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9202 /* 4-byte counter */
bb2a0f7a
YG
9203 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9204 j++;
a2fbb9ea
ET
9205 continue;
9206 }
9207 /* 8-byte counter */
bb2a0f7a
YG
9208 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9209 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9210 j++;
a2fbb9ea
ET
9211 }
9212}
9213
9214static int bnx2x_phys_id(struct net_device *dev, u32 data)
9215{
9216 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9217 int port = BP_PORT(bp);
a2fbb9ea
ET
9218 int i;
9219
34f80b04
EG
9220 if (!netif_running(dev))
9221 return 0;
9222
9223 if (!bp->port.pmf)
9224 return 0;
9225
a2fbb9ea
ET
9226 if (data == 0)
9227 data = 2;
9228
9229 for (i = 0; i < (data * 2); i++) {
c18487ee 9230 if ((i % 2) == 0)
34f80b04 9231 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9232 bp->link_params.hw_led_mode,
9233 bp->link_params.chip_id);
9234 else
34f80b04 9235 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9236 bp->link_params.hw_led_mode,
9237 bp->link_params.chip_id);
9238
a2fbb9ea
ET
9239 msleep_interruptible(500);
9240 if (signal_pending(current))
9241 break;
9242 }
9243
c18487ee 9244 if (bp->link_vars.link_up)
34f80b04 9245 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9246 bp->link_vars.line_speed,
9247 bp->link_params.hw_led_mode,
9248 bp->link_params.chip_id);
a2fbb9ea
ET
9249
9250 return 0;
9251}
9252
9253static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9254 .get_settings = bnx2x_get_settings,
9255 .set_settings = bnx2x_set_settings,
9256 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9257 .get_wol = bnx2x_get_wol,
9258 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9259 .get_msglevel = bnx2x_get_msglevel,
9260 .set_msglevel = bnx2x_set_msglevel,
9261 .nway_reset = bnx2x_nway_reset,
9262 .get_link = ethtool_op_get_link,
9263 .get_eeprom_len = bnx2x_get_eeprom_len,
9264 .get_eeprom = bnx2x_get_eeprom,
9265 .set_eeprom = bnx2x_set_eeprom,
9266 .get_coalesce = bnx2x_get_coalesce,
9267 .set_coalesce = bnx2x_set_coalesce,
9268 .get_ringparam = bnx2x_get_ringparam,
9269 .set_ringparam = bnx2x_set_ringparam,
9270 .get_pauseparam = bnx2x_get_pauseparam,
9271 .set_pauseparam = bnx2x_set_pauseparam,
9272 .get_rx_csum = bnx2x_get_rx_csum,
9273 .set_rx_csum = bnx2x_set_rx_csum,
9274 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9275 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9276 .set_flags = bnx2x_set_flags,
9277 .get_flags = ethtool_op_get_flags,
9278 .get_sg = ethtool_op_get_sg,
9279 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9280 .get_tso = ethtool_op_get_tso,
9281 .set_tso = bnx2x_set_tso,
9282 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9283 .self_test = bnx2x_self_test,
9284 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9285 .phys_id = bnx2x_phys_id,
9286 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9287 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9288};
9289
9290/* end of ethtool_ops */
9291
9292/****************************************************************************
9293* General service functions
9294****************************************************************************/
9295
9296static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9297{
9298 u16 pmcsr;
9299
9300 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9301
9302 switch (state) {
9303 case PCI_D0:
34f80b04 9304 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9305 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9306 PCI_PM_CTRL_PME_STATUS));
9307
9308 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9309 /* delay required during transition out of D3hot */
a2fbb9ea 9310 msleep(20);
34f80b04 9311 break;
a2fbb9ea 9312
34f80b04
EG
9313 case PCI_D3hot:
9314 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9315 pmcsr |= 3;
a2fbb9ea 9316
34f80b04
EG
9317 if (bp->wol)
9318 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9319
34f80b04
EG
9320 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9321 pmcsr);
a2fbb9ea 9322
34f80b04
EG
9323 /* No more memory access after this point until
9324 * device is brought back to D0.
9325 */
9326 break;
9327
9328 default:
9329 return -EINVAL;
9330 }
9331 return 0;
a2fbb9ea
ET
9332}
9333
237907c1
EG
9334static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9335{
9336 u16 rx_cons_sb;
9337
9338 /* Tell compiler that status block fields can change */
9339 barrier();
9340 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9341 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9342 rx_cons_sb++;
9343 return (fp->rx_comp_cons != rx_cons_sb);
9344}
9345
34f80b04
EG
9346/*
9347 * net_device service functions
9348 */
9349
a2fbb9ea
ET
9350static int bnx2x_poll(struct napi_struct *napi, int budget)
9351{
9352 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9353 napi);
9354 struct bnx2x *bp = fp->bp;
9355 int work_done = 0;
9356
9357#ifdef BNX2X_STOP_ON_ERROR
9358 if (unlikely(bp->panic))
34f80b04 9359 goto poll_panic;
a2fbb9ea
ET
9360#endif
9361
9362 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9363 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9364 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9365
9366 bnx2x_update_fpsb_idx(fp);
9367
237907c1 9368 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9369 bnx2x_tx_int(fp, budget);
9370
237907c1 9371 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9372 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9373 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9374
9375 /* must not complete if we consumed full budget */
da5a662a 9376 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9377
9378#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9379poll_panic:
a2fbb9ea 9380#endif
288379f0 9381 napi_complete(napi);
a2fbb9ea 9382
34f80b04 9383 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9384 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9385 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9386 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9387 }
a2fbb9ea
ET
9388 return work_done;
9389}
9390
755735eb
EG
9391
9392/* we split the first BD into headers and data BDs
33471629 9393 * to ease the pain of our fellow microcode engineers
755735eb
EG
9394 * we use one mapping for both BDs
9395 * So far this has only been observed to happen
9396 * in Other Operating Systems(TM)
9397 */
9398static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9399 struct bnx2x_fastpath *fp,
9400 struct eth_tx_bd **tx_bd, u16 hlen,
9401 u16 bd_prod, int nbd)
9402{
9403 struct eth_tx_bd *h_tx_bd = *tx_bd;
9404 struct eth_tx_bd *d_tx_bd;
9405 dma_addr_t mapping;
9406 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9407
9408 /* first fix first BD */
9409 h_tx_bd->nbd = cpu_to_le16(nbd);
9410 h_tx_bd->nbytes = cpu_to_le16(hlen);
9411
9412 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9413 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9414 h_tx_bd->addr_lo, h_tx_bd->nbd);
9415
9416 /* now get a new data BD
9417 * (after the pbd) and fill it */
9418 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9419 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9420
9421 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9422 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9423
9424 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9425 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9426 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9427 d_tx_bd->vlan = 0;
9428 /* this marks the BD as one that has no individual mapping
9429 * the FW ignores this flag in a BD not marked start
9430 */
9431 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9432 DP(NETIF_MSG_TX_QUEUED,
9433 "TSO split data size is %d (%x:%x)\n",
9434 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9435
9436 /* update tx_bd for marking the last BD flag */
9437 *tx_bd = d_tx_bd;
9438
9439 return bd_prod;
9440}
9441
9442static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9443{
9444 if (fix > 0)
9445 csum = (u16) ~csum_fold(csum_sub(csum,
9446 csum_partial(t_header - fix, fix, 0)));
9447
9448 else if (fix < 0)
9449 csum = (u16) ~csum_fold(csum_add(csum,
9450 csum_partial(t_header, -fix, 0)));
9451
9452 return swab16(csum);
9453}
9454
9455static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9456{
9457 u32 rc;
9458
9459 if (skb->ip_summed != CHECKSUM_PARTIAL)
9460 rc = XMIT_PLAIN;
9461
9462 else {
9463 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9464 rc = XMIT_CSUM_V6;
9465 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9466 rc |= XMIT_CSUM_TCP;
9467
9468 } else {
9469 rc = XMIT_CSUM_V4;
9470 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9471 rc |= XMIT_CSUM_TCP;
9472 }
9473 }
9474
9475 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9476 rc |= XMIT_GSO_V4;
9477
9478 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9479 rc |= XMIT_GSO_V6;
9480
9481 return rc;
9482}
9483
632da4d6 9484#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
9485/* check if packet requires linearization (packet is too fragmented) */
9486static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9487 u32 xmit_type)
9488{
9489 int to_copy = 0;
9490 int hlen = 0;
9491 int first_bd_sz = 0;
9492
9493 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9494 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9495
9496 if (xmit_type & XMIT_GSO) {
9497 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9498 /* Check if LSO packet needs to be copied:
9499 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9500 int wnd_size = MAX_FETCH_BD - 3;
33471629 9501 /* Number of windows to check */
755735eb
EG
9502 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9503 int wnd_idx = 0;
9504 int frag_idx = 0;
9505 u32 wnd_sum = 0;
9506
9507 /* Headers length */
9508 hlen = (int)(skb_transport_header(skb) - skb->data) +
9509 tcp_hdrlen(skb);
9510
9511 /* Amount of data (w/o headers) on linear part of SKB*/
9512 first_bd_sz = skb_headlen(skb) - hlen;
9513
9514 wnd_sum = first_bd_sz;
9515
9516 /* Calculate the first sum - it's special */
9517 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9518 wnd_sum +=
9519 skb_shinfo(skb)->frags[frag_idx].size;
9520
9521 /* If there was data on linear skb data - check it */
9522 if (first_bd_sz > 0) {
9523 if (unlikely(wnd_sum < lso_mss)) {
9524 to_copy = 1;
9525 goto exit_lbl;
9526 }
9527
9528 wnd_sum -= first_bd_sz;
9529 }
9530
9531 /* Others are easier: run through the frag list and
9532 check all windows */
9533 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9534 wnd_sum +=
9535 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9536
9537 if (unlikely(wnd_sum < lso_mss)) {
9538 to_copy = 1;
9539 break;
9540 }
9541 wnd_sum -=
9542 skb_shinfo(skb)->frags[wnd_idx].size;
9543 }
9544
9545 } else {
9546 /* in non-LSO too fragmented packet should always
9547 be linearized */
9548 to_copy = 1;
9549 }
9550 }
9551
9552exit_lbl:
9553 if (unlikely(to_copy))
9554 DP(NETIF_MSG_TX_QUEUED,
9555 "Linearization IS REQUIRED for %s packet. "
9556 "num_frags %d hlen %d first_bd_sz %d\n",
9557 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9558 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9559
9560 return to_copy;
9561}
632da4d6 9562#endif
755735eb
EG
9563
9564/* called with netif_tx_lock
a2fbb9ea 9565 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9566 * netif_wake_queue()
a2fbb9ea
ET
9567 */
9568static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9569{
9570 struct bnx2x *bp = netdev_priv(dev);
9571 struct bnx2x_fastpath *fp;
9572 struct sw_tx_bd *tx_buf;
9573 struct eth_tx_bd *tx_bd;
9574 struct eth_tx_parse_bd *pbd = NULL;
9575 u16 pkt_prod, bd_prod;
755735eb 9576 int nbd, fp_index;
a2fbb9ea 9577 dma_addr_t mapping;
755735eb
EG
9578 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9579 int vlan_off = (bp->e1hov ? 4 : 0);
9580 int i;
9581 u8 hlen = 0;
a2fbb9ea
ET
9582
9583#ifdef BNX2X_STOP_ON_ERROR
9584 if (unlikely(bp->panic))
9585 return NETDEV_TX_BUSY;
9586#endif
9587
755735eb 9588 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9589 fp = &bp->fp[fp_index];
755735eb 9590
231fd58a 9591 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9592 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9593 netif_stop_queue(dev);
9594 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9595 return NETDEV_TX_BUSY;
9596 }
9597
755735eb
EG
9598 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9599 " gso type %x xmit_type %x\n",
9600 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9601 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9602
632da4d6 9603#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 9604 /* First, check if we need to linearize the skb
755735eb
EG
9605 (due to FW restrictions) */
9606 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9607 /* Statistics of linearization */
9608 bp->lin_cnt++;
9609 if (skb_linearize(skb) != 0) {
9610 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9611 "silently dropping this SKB\n");
9612 dev_kfree_skb_any(skb);
da5a662a 9613 return NETDEV_TX_OK;
755735eb
EG
9614 }
9615 }
632da4d6 9616#endif
755735eb 9617
a2fbb9ea 9618 /*
755735eb 9619 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9620 then for TSO or xsum we have a parsing info BD,
755735eb 9621 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9622 (don't forget to mark the last one as last,
9623 and to unmap only AFTER you write to the BD ...)
755735eb 9624 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9625 */
9626
9627 pkt_prod = fp->tx_pkt_prod++;
755735eb 9628 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9629
755735eb 9630 /* get a tx_buf and first BD */
a2fbb9ea
ET
9631 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9632 tx_bd = &fp->tx_desc_ring[bd_prod];
9633
9634 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9635 tx_bd->general_data = (UNICAST_ADDRESS <<
9636 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9637 /* header nbd */
9638 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9639
755735eb
EG
9640 /* remember the first BD of the packet */
9641 tx_buf->first_bd = fp->tx_bd_prod;
9642 tx_buf->skb = skb;
a2fbb9ea
ET
9643
9644 DP(NETIF_MSG_TX_QUEUED,
9645 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9646 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9647
0c6671b0
EG
9648#ifdef BCM_VLAN
9649 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9650 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
9651 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9652 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9653 vlan_off += 4;
9654 } else
0c6671b0 9655#endif
755735eb 9656 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9657
755735eb 9658 if (xmit_type) {
755735eb 9659 /* turn on parsing and get a BD */
a2fbb9ea
ET
9660 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9661 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9662
9663 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9664 }
9665
9666 if (xmit_type & XMIT_CSUM) {
9667 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9668
9669 /* for now NS flag is not used in Linux */
755735eb 9670 pbd->global_data = (hlen |
96fc1784 9671 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9672 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9673
755735eb
EG
9674 pbd->ip_hlen = (skb_transport_header(skb) -
9675 skb_network_header(skb)) / 2;
9676
9677 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9678
755735eb
EG
9679 pbd->total_hlen = cpu_to_le16(hlen);
9680 hlen = hlen*2 - vlan_off;
a2fbb9ea 9681
755735eb
EG
9682 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9683
9684 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9685 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9686 ETH_TX_BD_FLAGS_IP_CSUM;
9687 else
9688 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9689
9690 if (xmit_type & XMIT_CSUM_TCP) {
9691 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9692
9693 } else {
9694 s8 fix = SKB_CS_OFF(skb); /* signed! */
9695
a2fbb9ea 9696 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9697 pbd->cs_offset = fix / 2;
a2fbb9ea 9698
755735eb
EG
9699 DP(NETIF_MSG_TX_QUEUED,
9700 "hlen %d offset %d fix %d csum before fix %x\n",
9701 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9702 SKB_CS(skb));
9703
9704 /* HW bug: fixup the CSUM */
9705 pbd->tcp_pseudo_csum =
9706 bnx2x_csum_fix(skb_transport_header(skb),
9707 SKB_CS(skb), fix);
9708
9709 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9710 pbd->tcp_pseudo_csum);
9711 }
a2fbb9ea
ET
9712 }
9713
9714 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9715 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9716
9717 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9718 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9719 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9720 tx_bd->nbd = cpu_to_le16(nbd);
9721 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9722
9723 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9724 " nbytes %d flags %x vlan %x\n",
9725 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9726 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9727 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9728
755735eb 9729 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9730
9731 DP(NETIF_MSG_TX_QUEUED,
9732 "TSO packet len %d hlen %d total len %d tso size %d\n",
9733 skb->len, hlen, skb_headlen(skb),
9734 skb_shinfo(skb)->gso_size);
9735
9736 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9737
755735eb
EG
9738 if (unlikely(skb_headlen(skb) > hlen))
9739 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9740 bd_prod, ++nbd);
a2fbb9ea
ET
9741
9742 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9743 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9744 pbd->tcp_flags = pbd_tcp_flags(skb);
9745
9746 if (xmit_type & XMIT_GSO_V4) {
9747 pbd->ip_id = swab16(ip_hdr(skb)->id);
9748 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9749 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9750 ip_hdr(skb)->daddr,
9751 0, IPPROTO_TCP, 0));
755735eb
EG
9752
9753 } else
9754 pbd->tcp_pseudo_csum =
9755 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9756 &ipv6_hdr(skb)->daddr,
9757 0, IPPROTO_TCP, 0));
9758
a2fbb9ea
ET
9759 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9760 }
9761
755735eb
EG
9762 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9763 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9764
755735eb
EG
9765 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9766 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9767
755735eb
EG
9768 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9769 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9770
755735eb
EG
9771 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9772 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9773 tx_bd->nbytes = cpu_to_le16(frag->size);
9774 tx_bd->vlan = cpu_to_le16(pkt_prod);
9775 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9776
755735eb
EG
9777 DP(NETIF_MSG_TX_QUEUED,
9778 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9779 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9780 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9781 }
9782
755735eb 9783 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9784 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9785
9786 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9787 tx_bd, tx_bd->bd_flags.as_bitfield);
9788
a2fbb9ea
ET
9789 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9790
755735eb 9791 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9792 * if the packet contains or ends with it
9793 */
9794 if (TX_BD_POFF(bd_prod) < nbd)
9795 nbd++;
9796
9797 if (pbd)
9798 DP(NETIF_MSG_TX_QUEUED,
9799 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9800 " tcp_flags %x xsum %x seq %u hlen %u\n",
9801 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9802 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9803 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9804
755735eb 9805 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9806
58f4c4cf
EG
9807 /*
9808 * Make sure that the BD data is updated before updating the producer
9809 * since FW might read the BD right after the producer is updated.
9810 * This is only applicable for weak-ordered memory model archs such
9811 * as IA-64. The following barrier is also mandatory since FW will
9812 * assumes packets must have BDs.
9813 */
9814 wmb();
9815
96fc1784
ET
9816 fp->hw_tx_prods->bds_prod =
9817 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9818 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9819 fp->hw_tx_prods->packets_prod =
9820 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9821 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9822
9823 mmiowb();
9824
755735eb 9825 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9826 dev->trans_start = jiffies;
9827
9828 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9829 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9830 if we put Tx into XOFF state. */
9831 smp_mb();
a2fbb9ea 9832 netif_stop_queue(dev);
bb2a0f7a 9833 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9834 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9835 netif_wake_queue(dev);
9836 }
9837 fp->tx_pkt++;
9838
9839 return NETDEV_TX_OK;
9840}
9841
bb2a0f7a 9842/* called with rtnl_lock */
a2fbb9ea
ET
9843static int bnx2x_open(struct net_device *dev)
9844{
9845 struct bnx2x *bp = netdev_priv(dev);
9846
6eccabb3
EG
9847 netif_carrier_off(dev);
9848
a2fbb9ea
ET
9849 bnx2x_set_power_state(bp, PCI_D0);
9850
bb2a0f7a 9851 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9852}
9853
bb2a0f7a 9854/* called with rtnl_lock */
a2fbb9ea
ET
9855static int bnx2x_close(struct net_device *dev)
9856{
a2fbb9ea
ET
9857 struct bnx2x *bp = netdev_priv(dev);
9858
9859 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9860 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9861 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9862 if (!CHIP_REV_IS_SLOW(bp))
9863 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9864
9865 return 0;
9866}
9867
34f80b04
EG
9868/* called with netif_tx_lock from set_multicast */
9869static void bnx2x_set_rx_mode(struct net_device *dev)
9870{
9871 struct bnx2x *bp = netdev_priv(dev);
9872 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9873 int port = BP_PORT(bp);
9874
9875 if (bp->state != BNX2X_STATE_OPEN) {
9876 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9877 return;
9878 }
9879
9880 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9881
9882 if (dev->flags & IFF_PROMISC)
9883 rx_mode = BNX2X_RX_MODE_PROMISC;
9884
9885 else if ((dev->flags & IFF_ALLMULTI) ||
9886 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9887 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9888
9889 else { /* some multicasts */
9890 if (CHIP_IS_E1(bp)) {
9891 int i, old, offset;
9892 struct dev_mc_list *mclist;
9893 struct mac_configuration_cmd *config =
9894 bnx2x_sp(bp, mcast_config);
9895
9896 for (i = 0, mclist = dev->mc_list;
9897 mclist && (i < dev->mc_count);
9898 i++, mclist = mclist->next) {
9899
9900 config->config_table[i].
9901 cam_entry.msb_mac_addr =
9902 swab16(*(u16 *)&mclist->dmi_addr[0]);
9903 config->config_table[i].
9904 cam_entry.middle_mac_addr =
9905 swab16(*(u16 *)&mclist->dmi_addr[2]);
9906 config->config_table[i].
9907 cam_entry.lsb_mac_addr =
9908 swab16(*(u16 *)&mclist->dmi_addr[4]);
9909 config->config_table[i].cam_entry.flags =
9910 cpu_to_le16(port);
9911 config->config_table[i].
9912 target_table_entry.flags = 0;
9913 config->config_table[i].
9914 target_table_entry.client_id = 0;
9915 config->config_table[i].
9916 target_table_entry.vlan_id = 0;
9917
9918 DP(NETIF_MSG_IFUP,
9919 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9920 config->config_table[i].
9921 cam_entry.msb_mac_addr,
9922 config->config_table[i].
9923 cam_entry.middle_mac_addr,
9924 config->config_table[i].
9925 cam_entry.lsb_mac_addr);
9926 }
8d9c5f34 9927 old = config->hdr.length;
34f80b04
EG
9928 if (old > i) {
9929 for (; i < old; i++) {
9930 if (CAM_IS_INVALID(config->
9931 config_table[i])) {
af246401 9932 /* already invalidated */
34f80b04
EG
9933 break;
9934 }
9935 /* invalidate */
9936 CAM_INVALIDATE(config->
9937 config_table[i]);
9938 }
9939 }
9940
9941 if (CHIP_REV_IS_SLOW(bp))
9942 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9943 else
9944 offset = BNX2X_MAX_MULTICAST*(1 + port);
9945
8d9c5f34 9946 config->hdr.length = i;
34f80b04 9947 config->hdr.offset = offset;
8d9c5f34 9948 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
9949 config->hdr.reserved1 = 0;
9950
9951 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9952 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9953 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9954 0);
9955 } else { /* E1H */
9956 /* Accept one or more multicasts */
9957 struct dev_mc_list *mclist;
9958 u32 mc_filter[MC_HASH_SIZE];
9959 u32 crc, bit, regidx;
9960 int i;
9961
9962 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9963
9964 for (i = 0, mclist = dev->mc_list;
9965 mclist && (i < dev->mc_count);
9966 i++, mclist = mclist->next) {
9967
7c510e4b
JB
9968 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9969 mclist->dmi_addr);
34f80b04
EG
9970
9971 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9972 bit = (crc >> 24) & 0xff;
9973 regidx = bit >> 5;
9974 bit &= 0x1f;
9975 mc_filter[regidx] |= (1 << bit);
9976 }
9977
9978 for (i = 0; i < MC_HASH_SIZE; i++)
9979 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9980 mc_filter[i]);
9981 }
9982 }
9983
9984 bp->rx_mode = rx_mode;
9985 bnx2x_set_storm_rx_mode(bp);
9986}
9987
9988/* called with rtnl_lock */
a2fbb9ea
ET
9989static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9990{
9991 struct sockaddr *addr = p;
9992 struct bnx2x *bp = netdev_priv(dev);
9993
34f80b04 9994 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9995 return -EINVAL;
9996
9997 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9998 if (netif_running(dev)) {
9999 if (CHIP_IS_E1(bp))
3101c2bc 10000 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10001 else
3101c2bc 10002 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10003 }
a2fbb9ea
ET
10004
10005 return 0;
10006}
10007
c18487ee 10008/* called with rtnl_lock */
a2fbb9ea
ET
10009static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10010{
10011 struct mii_ioctl_data *data = if_mii(ifr);
10012 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10013 int port = BP_PORT(bp);
a2fbb9ea
ET
10014 int err;
10015
10016 switch (cmd) {
10017 case SIOCGMIIPHY:
34f80b04 10018 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10019
c14423fe 10020 /* fallthrough */
c18487ee 10021
a2fbb9ea 10022 case SIOCGMIIREG: {
c18487ee 10023 u16 mii_regval;
a2fbb9ea 10024
c18487ee
YR
10025 if (!netif_running(dev))
10026 return -EAGAIN;
a2fbb9ea 10027
34f80b04 10028 mutex_lock(&bp->port.phy_mutex);
3196a88a 10029 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10030 DEFAULT_PHY_DEV_ADDR,
10031 (data->reg_num & 0x1f), &mii_regval);
10032 data->val_out = mii_regval;
34f80b04 10033 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10034 return err;
10035 }
10036
10037 case SIOCSMIIREG:
10038 if (!capable(CAP_NET_ADMIN))
10039 return -EPERM;
10040
c18487ee
YR
10041 if (!netif_running(dev))
10042 return -EAGAIN;
10043
34f80b04 10044 mutex_lock(&bp->port.phy_mutex);
3196a88a 10045 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10046 DEFAULT_PHY_DEV_ADDR,
10047 (data->reg_num & 0x1f), data->val_in);
34f80b04 10048 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10049 return err;
10050
10051 default:
10052 /* do nothing */
10053 break;
10054 }
10055
10056 return -EOPNOTSUPP;
10057}
10058
34f80b04 10059/* called with rtnl_lock */
a2fbb9ea
ET
10060static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10061{
10062 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10063 int rc = 0;
a2fbb9ea
ET
10064
10065 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10066 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10067 return -EINVAL;
10068
10069 /* This does not race with packet allocation
c14423fe 10070 * because the actual alloc size is
a2fbb9ea
ET
10071 * only updated as part of load
10072 */
10073 dev->mtu = new_mtu;
10074
10075 if (netif_running(dev)) {
34f80b04
EG
10076 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10077 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10078 }
34f80b04
EG
10079
10080 return rc;
a2fbb9ea
ET
10081}
10082
10083static void bnx2x_tx_timeout(struct net_device *dev)
10084{
10085 struct bnx2x *bp = netdev_priv(dev);
10086
10087#ifdef BNX2X_STOP_ON_ERROR
10088 if (!bp->panic)
10089 bnx2x_panic();
10090#endif
10091 /* This allows the netif to be shutdown gracefully before resetting */
10092 schedule_work(&bp->reset_task);
10093}
10094
10095#ifdef BCM_VLAN
34f80b04 10096/* called with rtnl_lock */
a2fbb9ea
ET
10097static void bnx2x_vlan_rx_register(struct net_device *dev,
10098 struct vlan_group *vlgrp)
10099{
10100 struct bnx2x *bp = netdev_priv(dev);
10101
10102 bp->vlgrp = vlgrp;
0c6671b0
EG
10103
10104 /* Set flags according to the required capabilities */
10105 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10106
10107 if (dev->features & NETIF_F_HW_VLAN_TX)
10108 bp->flags |= HW_VLAN_TX_FLAG;
10109
10110 if (dev->features & NETIF_F_HW_VLAN_RX)
10111 bp->flags |= HW_VLAN_RX_FLAG;
10112
a2fbb9ea 10113 if (netif_running(dev))
49d66772 10114 bnx2x_set_client_config(bp);
a2fbb9ea 10115}
34f80b04 10116
a2fbb9ea
ET
10117#endif
10118
10119#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10120static void poll_bnx2x(struct net_device *dev)
10121{
10122 struct bnx2x *bp = netdev_priv(dev);
10123
10124 disable_irq(bp->pdev->irq);
10125 bnx2x_interrupt(bp->pdev->irq, dev);
10126 enable_irq(bp->pdev->irq);
10127}
10128#endif
10129
c64213cd
SH
10130static const struct net_device_ops bnx2x_netdev_ops = {
10131 .ndo_open = bnx2x_open,
10132 .ndo_stop = bnx2x_close,
10133 .ndo_start_xmit = bnx2x_start_xmit,
10134 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10135 .ndo_set_mac_address = bnx2x_change_mac_addr,
10136 .ndo_validate_addr = eth_validate_addr,
10137 .ndo_do_ioctl = bnx2x_ioctl,
10138 .ndo_change_mtu = bnx2x_change_mtu,
10139 .ndo_tx_timeout = bnx2x_tx_timeout,
10140#ifdef BCM_VLAN
10141 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10142#endif
10143#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10144 .ndo_poll_controller = poll_bnx2x,
10145#endif
10146};
10147
10148
34f80b04
EG
10149static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10150 struct net_device *dev)
a2fbb9ea
ET
10151{
10152 struct bnx2x *bp;
10153 int rc;
10154
10155 SET_NETDEV_DEV(dev, &pdev->dev);
10156 bp = netdev_priv(dev);
10157
34f80b04
EG
10158 bp->dev = dev;
10159 bp->pdev = pdev;
a2fbb9ea 10160 bp->flags = 0;
34f80b04 10161 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10162
10163 rc = pci_enable_device(pdev);
10164 if (rc) {
10165 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10166 goto err_out;
10167 }
10168
10169 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10170 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10171 " aborting\n");
10172 rc = -ENODEV;
10173 goto err_out_disable;
10174 }
10175
10176 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10177 printk(KERN_ERR PFX "Cannot find second PCI device"
10178 " base address, aborting\n");
10179 rc = -ENODEV;
10180 goto err_out_disable;
10181 }
10182
34f80b04
EG
10183 if (atomic_read(&pdev->enable_cnt) == 1) {
10184 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10185 if (rc) {
10186 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10187 " aborting\n");
10188 goto err_out_disable;
10189 }
a2fbb9ea 10190
34f80b04
EG
10191 pci_set_master(pdev);
10192 pci_save_state(pdev);
10193 }
a2fbb9ea
ET
10194
10195 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10196 if (bp->pm_cap == 0) {
10197 printk(KERN_ERR PFX "Cannot find power management"
10198 " capability, aborting\n");
10199 rc = -EIO;
10200 goto err_out_release;
10201 }
10202
10203 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10204 if (bp->pcie_cap == 0) {
10205 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10206 " aborting\n");
10207 rc = -EIO;
10208 goto err_out_release;
10209 }
10210
10211 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10212 bp->flags |= USING_DAC_FLAG;
10213 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10214 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10215 " failed, aborting\n");
10216 rc = -EIO;
10217 goto err_out_release;
10218 }
10219
10220 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10221 printk(KERN_ERR PFX "System does not support DMA,"
10222 " aborting\n");
10223 rc = -EIO;
10224 goto err_out_release;
10225 }
10226
34f80b04
EG
10227 dev->mem_start = pci_resource_start(pdev, 0);
10228 dev->base_addr = dev->mem_start;
10229 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10230
10231 dev->irq = pdev->irq;
10232
275f165f 10233 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10234 if (!bp->regview) {
10235 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10236 rc = -ENOMEM;
10237 goto err_out_release;
10238 }
10239
34f80b04
EG
10240 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10241 min_t(u64, BNX2X_DB_SIZE,
10242 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10243 if (!bp->doorbells) {
10244 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10245 rc = -ENOMEM;
10246 goto err_out_unmap;
10247 }
10248
10249 bnx2x_set_power_state(bp, PCI_D0);
10250
34f80b04
EG
10251 /* clean indirect addresses */
10252 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10253 PCICFG_VENDOR_ID_OFFSET);
10254 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10255 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10256 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10257 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10258
34f80b04 10259 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10260
c64213cd 10261 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10262 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10263 dev->features |= NETIF_F_SG;
10264 dev->features |= NETIF_F_HW_CSUM;
10265 if (bp->flags & USING_DAC_FLAG)
10266 dev->features |= NETIF_F_HIGHDMA;
10267#ifdef BCM_VLAN
10268 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10269 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10270#endif
10271 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10272 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10273
10274 return 0;
10275
10276err_out_unmap:
10277 if (bp->regview) {
10278 iounmap(bp->regview);
10279 bp->regview = NULL;
10280 }
a2fbb9ea
ET
10281 if (bp->doorbells) {
10282 iounmap(bp->doorbells);
10283 bp->doorbells = NULL;
10284 }
10285
10286err_out_release:
34f80b04
EG
10287 if (atomic_read(&pdev->enable_cnt) == 1)
10288 pci_release_regions(pdev);
a2fbb9ea
ET
10289
10290err_out_disable:
10291 pci_disable_device(pdev);
10292 pci_set_drvdata(pdev, NULL);
10293
10294err_out:
10295 return rc;
10296}
10297
25047950
ET
10298static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10299{
10300 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10301
10302 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10303 return val;
10304}
10305
10306/* return value of 1=2.5GHz 2=5GHz */
10307static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10308{
10309 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10310
10311 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10312 return val;
10313}
10314
a2fbb9ea
ET
10315static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10316 const struct pci_device_id *ent)
10317{
10318 static int version_printed;
10319 struct net_device *dev = NULL;
10320 struct bnx2x *bp;
25047950 10321 int rc;
a2fbb9ea
ET
10322
10323 if (version_printed++ == 0)
10324 printk(KERN_INFO "%s", version);
10325
10326 /* dev zeroed in init_etherdev */
10327 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10328 if (!dev) {
10329 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10330 return -ENOMEM;
34f80b04 10331 }
a2fbb9ea 10332
a2fbb9ea
ET
10333 bp = netdev_priv(dev);
10334 bp->msglevel = debug;
10335
34f80b04 10336 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10337 if (rc < 0) {
10338 free_netdev(dev);
10339 return rc;
10340 }
10341
a2fbb9ea
ET
10342 pci_set_drvdata(pdev, dev);
10343
34f80b04 10344 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10345 if (rc)
10346 goto init_one_exit;
10347
10348 rc = register_netdev(dev);
34f80b04 10349 if (rc) {
693fc0d1 10350 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10351 goto init_one_exit;
10352 }
10353
10354 bp->common.name = board_info[ent->driver_data].name;
25047950 10355 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10356 " IRQ %d, ", dev->name, bp->common.name,
10357 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10358 bnx2x_get_pcie_width(bp),
10359 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10360 dev->base_addr, bp->pdev->irq);
e174961c 10361 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10362 return 0;
34f80b04
EG
10363
10364init_one_exit:
10365 if (bp->regview)
10366 iounmap(bp->regview);
10367
10368 if (bp->doorbells)
10369 iounmap(bp->doorbells);
10370
10371 free_netdev(dev);
10372
10373 if (atomic_read(&pdev->enable_cnt) == 1)
10374 pci_release_regions(pdev);
10375
10376 pci_disable_device(pdev);
10377 pci_set_drvdata(pdev, NULL);
10378
10379 return rc;
a2fbb9ea
ET
10380}
10381
10382static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10383{
10384 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10385 struct bnx2x *bp;
10386
10387 if (!dev) {
228241eb
ET
10388 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10389 return;
10390 }
228241eb 10391 bp = netdev_priv(dev);
a2fbb9ea 10392
a2fbb9ea
ET
10393 unregister_netdev(dev);
10394
10395 if (bp->regview)
10396 iounmap(bp->regview);
10397
10398 if (bp->doorbells)
10399 iounmap(bp->doorbells);
10400
10401 free_netdev(dev);
34f80b04
EG
10402
10403 if (atomic_read(&pdev->enable_cnt) == 1)
10404 pci_release_regions(pdev);
10405
a2fbb9ea
ET
10406 pci_disable_device(pdev);
10407 pci_set_drvdata(pdev, NULL);
10408}
10409
10410static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10411{
10412 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10413 struct bnx2x *bp;
10414
34f80b04
EG
10415 if (!dev) {
10416 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10417 return -ENODEV;
10418 }
10419 bp = netdev_priv(dev);
a2fbb9ea 10420
34f80b04 10421 rtnl_lock();
a2fbb9ea 10422
34f80b04 10423 pci_save_state(pdev);
228241eb 10424
34f80b04
EG
10425 if (!netif_running(dev)) {
10426 rtnl_unlock();
10427 return 0;
10428 }
a2fbb9ea
ET
10429
10430 netif_device_detach(dev);
a2fbb9ea 10431
da5a662a 10432 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10433
a2fbb9ea 10434 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10435
34f80b04
EG
10436 rtnl_unlock();
10437
a2fbb9ea
ET
10438 return 0;
10439}
10440
10441static int bnx2x_resume(struct pci_dev *pdev)
10442{
10443 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10444 struct bnx2x *bp;
a2fbb9ea
ET
10445 int rc;
10446
228241eb
ET
10447 if (!dev) {
10448 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10449 return -ENODEV;
10450 }
228241eb 10451 bp = netdev_priv(dev);
a2fbb9ea 10452
34f80b04
EG
10453 rtnl_lock();
10454
228241eb 10455 pci_restore_state(pdev);
34f80b04
EG
10456
10457 if (!netif_running(dev)) {
10458 rtnl_unlock();
10459 return 0;
10460 }
10461
a2fbb9ea
ET
10462 bnx2x_set_power_state(bp, PCI_D0);
10463 netif_device_attach(dev);
10464
da5a662a 10465 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10466
34f80b04
EG
10467 rtnl_unlock();
10468
10469 return rc;
a2fbb9ea
ET
10470}
10471
f8ef6e44
YG
10472static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10473{
10474 int i;
10475
10476 bp->state = BNX2X_STATE_ERROR;
10477
10478 bp->rx_mode = BNX2X_RX_MODE_NONE;
10479
10480 bnx2x_netif_stop(bp, 0);
10481
10482 del_timer_sync(&bp->timer);
10483 bp->stats_state = STATS_STATE_DISABLED;
10484 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10485
10486 /* Release IRQs */
10487 bnx2x_free_irq(bp);
10488
10489 if (CHIP_IS_E1(bp)) {
10490 struct mac_configuration_cmd *config =
10491 bnx2x_sp(bp, mcast_config);
10492
8d9c5f34 10493 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
10494 CAM_INVALIDATE(config->config_table[i]);
10495 }
10496
10497 /* Free SKBs, SGEs, TPA pool and driver internals */
10498 bnx2x_free_skbs(bp);
10499 for_each_queue(bp, i)
10500 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7cde1c8b
EG
10501 for_each_queue(bp, i)
10502 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
10503 bnx2x_free_mem(bp);
10504
10505 bp->state = BNX2X_STATE_CLOSED;
10506
10507 netif_carrier_off(bp->dev);
10508
10509 return 0;
10510}
10511
10512static void bnx2x_eeh_recover(struct bnx2x *bp)
10513{
10514 u32 val;
10515
10516 mutex_init(&bp->port.phy_mutex);
10517
10518 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10519 bp->link_params.shmem_base = bp->common.shmem_base;
10520 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10521
10522 if (!bp->common.shmem_base ||
10523 (bp->common.shmem_base < 0xA0000) ||
10524 (bp->common.shmem_base >= 0xC0000)) {
10525 BNX2X_DEV_INFO("MCP not active\n");
10526 bp->flags |= NO_MCP_FLAG;
10527 return;
10528 }
10529
10530 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10531 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10532 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10533 BNX2X_ERR("BAD MCP validity signature\n");
10534
10535 if (!BP_NOMCP(bp)) {
10536 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10537 & DRV_MSG_SEQ_NUMBER_MASK);
10538 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10539 }
10540}
10541
493adb1f
WX
10542/**
10543 * bnx2x_io_error_detected - called when PCI error is detected
10544 * @pdev: Pointer to PCI device
10545 * @state: The current pci connection state
10546 *
10547 * This function is called after a PCI bus error affecting
10548 * this device has been detected.
10549 */
10550static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10551 pci_channel_state_t state)
10552{
10553 struct net_device *dev = pci_get_drvdata(pdev);
10554 struct bnx2x *bp = netdev_priv(dev);
10555
10556 rtnl_lock();
10557
10558 netif_device_detach(dev);
10559
10560 if (netif_running(dev))
f8ef6e44 10561 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10562
10563 pci_disable_device(pdev);
10564
10565 rtnl_unlock();
10566
10567 /* Request a slot reset */
10568 return PCI_ERS_RESULT_NEED_RESET;
10569}
10570
10571/**
10572 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10573 * @pdev: Pointer to PCI device
10574 *
10575 * Restart the card from scratch, as if from a cold-boot.
10576 */
10577static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10578{
10579 struct net_device *dev = pci_get_drvdata(pdev);
10580 struct bnx2x *bp = netdev_priv(dev);
10581
10582 rtnl_lock();
10583
10584 if (pci_enable_device(pdev)) {
10585 dev_err(&pdev->dev,
10586 "Cannot re-enable PCI device after reset\n");
10587 rtnl_unlock();
10588 return PCI_ERS_RESULT_DISCONNECT;
10589 }
10590
10591 pci_set_master(pdev);
10592 pci_restore_state(pdev);
10593
10594 if (netif_running(dev))
10595 bnx2x_set_power_state(bp, PCI_D0);
10596
10597 rtnl_unlock();
10598
10599 return PCI_ERS_RESULT_RECOVERED;
10600}
10601
10602/**
10603 * bnx2x_io_resume - called when traffic can start flowing again
10604 * @pdev: Pointer to PCI device
10605 *
10606 * This callback is called when the error recovery driver tells us that
10607 * its OK to resume normal operation.
10608 */
10609static void bnx2x_io_resume(struct pci_dev *pdev)
10610{
10611 struct net_device *dev = pci_get_drvdata(pdev);
10612 struct bnx2x *bp = netdev_priv(dev);
10613
10614 rtnl_lock();
10615
f8ef6e44
YG
10616 bnx2x_eeh_recover(bp);
10617
493adb1f 10618 if (netif_running(dev))
f8ef6e44 10619 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10620
10621 netif_device_attach(dev);
10622
10623 rtnl_unlock();
10624}
10625
10626static struct pci_error_handlers bnx2x_err_handler = {
10627 .error_detected = bnx2x_io_error_detected,
10628 .slot_reset = bnx2x_io_slot_reset,
10629 .resume = bnx2x_io_resume,
10630};
10631
a2fbb9ea 10632static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10633 .name = DRV_MODULE_NAME,
10634 .id_table = bnx2x_pci_tbl,
10635 .probe = bnx2x_init_one,
10636 .remove = __devexit_p(bnx2x_remove_one),
10637 .suspend = bnx2x_suspend,
10638 .resume = bnx2x_resume,
10639 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10640};
10641
10642static int __init bnx2x_init(void)
10643{
1cf167f2
EG
10644 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10645 if (bnx2x_wq == NULL) {
10646 printk(KERN_ERR PFX "Cannot create workqueue\n");
10647 return -ENOMEM;
10648 }
10649
a2fbb9ea
ET
10650 return pci_register_driver(&bnx2x_pci_driver);
10651}
10652
10653static void __exit bnx2x_cleanup(void)
10654{
10655 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10656
10657 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10658}
10659
10660module_init(bnx2x_init);
10661module_exit(bnx2x_cleanup);
10662