]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Moving skb_record_rx_queue
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
a2fbb9ea 91module_param(debug, int, 0);
19680c48 92MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 93MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 94MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 95
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
594
595 if (msix) {
8badd27a
EG
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
600 } else if (msi) {
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
605 } else {
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 610
8badd27a
EG
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612 val, port, addr);
615f8fd9
ET
613
614 REG_WR(bp, addr, val);
615
a2fbb9ea
ET
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617 }
618
8badd27a
EG
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
621
622 REG_WR(bp, addr, val);
34f80b04
EG
623
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
626 if (IS_E1HMF(bp)) {
8badd27a 627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04
EG
628 if (bp->port.pmf)
629 /* enable nig attention */
630 val |= 0x0100;
631 } else
632 val = 0xffff;
633
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 }
a2fbb9ea
ET
637}
638
615f8fd9 639static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 640{
34f80b04 641 int port = BP_PORT(bp);
a2fbb9ea
ET
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
644
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651 val, port, addr);
652
8badd27a
EG
653 /* flush all outstanding writes */
654 mmiowb();
655
a2fbb9ea
ET
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659}
660
f8ef6e44 661static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 662{
a2fbb9ea 663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 664 int i, offset;
a2fbb9ea 665
34f80b04 666 /* disable interrupt handling */
a2fbb9ea 667 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
668 if (disable_hw)
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
a2fbb9ea
ET
671
672 /* make sure all ISRs are done */
673 if (msix) {
8badd27a
EG
674 synchronize_irq(bp->msix_table[0].vector);
675 offset = 1;
a2fbb9ea 676 for_each_queue(bp, i)
8badd27a 677 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
678 } else
679 synchronize_irq(bp->pdev->irq);
680
681 /* make sure sp_task is not running */
1cf167f2
EG
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
684}
685
34f80b04 686/* fast path */
a2fbb9ea
ET
687
688/*
34f80b04 689 * General service functions
a2fbb9ea
ET
690 */
691
34f80b04 692static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
693 u8 storm, u16 index, u8 op, u8 update)
694{
5c862848
EG
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
697 struct igu_ack_register igu_ack;
698
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
34f80b04 701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
5c862848
EG
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
709}
710
711static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712{
713 struct host_status_block *fpsb = fp->status_blk;
714 u16 rc = 0;
715
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719 rc |= 1;
720 }
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723 rc |= 2;
724 }
725 return rc;
726}
727
a2fbb9ea
ET
728static u16 bnx2x_ack_int(struct bnx2x *bp)
729{
5c862848
EG
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 733
5c862848
EG
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 result, hc_addr);
a2fbb9ea 736
a2fbb9ea
ET
737 return result;
738}
739
740
741/*
742 * fast path service functions
743 */
744
237907c1
EG
745static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746{
747 u16 tx_cons_sb;
748
749 /* Tell compiler that status block fields can change */
750 barrier();
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
752 return (fp->tx_pkt_cons != tx_cons_sb);
753}
754
755static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756{
757 /* Tell compiler that consumer and producer can change */
758 barrier();
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
237907c1
EG
761}
762
a2fbb9ea
ET
763/* free skb in the packet ring at pos idx
764 * return idx of last bd freed
765 */
766static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767 u16 idx)
768{
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
34f80b04 772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
773 int nbd;
774
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
776 idx, tx_buf, skb);
777
778 /* unmap first bd */
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 785 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
786#ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 788 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
789 bnx2x_panic();
790 }
791#endif
792
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
795 if (nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 }
809 }
810
811 /* now free frags */
812 while (nbd > 0) {
813
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818 if (--nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 }
821
822 /* release skb */
53e5e96e 823 WARN_ON(!skb);
a2fbb9ea
ET
824 dev_kfree_skb(skb);
825 tx_buf->first_bd = 0;
826 tx_buf->skb = NULL;
827
34f80b04 828 return new_cons;
a2fbb9ea
ET
829}
830
34f80b04 831static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 832{
34f80b04
EG
833 s16 used;
834 u16 prod;
835 u16 cons;
a2fbb9ea 836
34f80b04 837 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
840
34f80b04
EG
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 844
34f80b04 845#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
846 WARN_ON(used < 0);
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 849#endif
a2fbb9ea 850
34f80b04 851 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
852}
853
854static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855{
856 struct bnx2x *bp = fp->bp;
555f6c78 857 struct netdev_queue *txq;
a2fbb9ea
ET
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859 int done = 0;
860
861#ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
863 return;
864#endif
865
555f6c78 866 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
869
870 while (sw_cons != hw_cons) {
871 u16 pkt_cons;
872
873 pkt_cons = TX_BD(sw_cons);
874
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
34f80b04 877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
878 hw_cons, sw_cons, pkt_cons);
879
34f80b04 880/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
881 rmb();
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883 }
884*/
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 sw_cons++;
887 done++;
888
889 if (done == work)
890 break;
891 }
892
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
895
555f6c78
EG
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
900 */
901 smp_mb();
902
903 /* TBD need a thresh? */
555f6c78 904 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 905
555f6c78 906 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 907
555f6c78 908 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 909 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 911 netif_tx_wake_queue(txq);
a2fbb9ea 912
555f6c78 913 __netif_tx_unlock(txq);
a2fbb9ea
ET
914 }
915}
916
3196a88a 917
a2fbb9ea
ET
918static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
920{
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
34f80b04 925 DP(BNX2X_MSG_SP,
a2fbb9ea 926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
929
930 bp->spq_left++;
931
34f80b04 932 if (FP_IDX(fp)) {
a2fbb9ea
ET
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937 cid);
938 fp->state = BNX2X_FP_STATE_OPEN;
939 break;
940
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943 cid);
944 fp->state = BNX2X_FP_STATE_HALTED;
945 break;
946
947 default:
34f80b04
EG
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
950 break;
a2fbb9ea 951 }
34f80b04 952 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
953 return;
954 }
c14423fe 955
a2fbb9ea
ET
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
a2fbb9ea 968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
971 break;
972
3196a88a 973
a2fbb9ea 974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 977 bp->set_mac_pending = 0;
a2fbb9ea
ET
978 break;
979
49d66772 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
982 break;
983
a2fbb9ea 984 default:
34f80b04 985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 986 command, bp->state);
34f80b04 987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990}
991
7a9b2557
VZ
992static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999 /* Skip "next page" elements */
1000 if (!page)
1001 return;
1002
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007 sw_buf->page = NULL;
1008 sge->addr_hi = 0;
1009 sge->addr_lo = 0;
1010}
1011
1012static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1014{
1015 int i;
1016
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1019}
1020
1021static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027 dma_addr_t mapping;
1028
1029 if (unlikely(page == NULL))
1030 return -ENOMEM;
1031
4f40f2cb 1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1033 PCI_DMA_FROMDEVICE);
8d8bb39b 1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036 return -ENOMEM;
1037 }
1038
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045 return 0;
1046}
1047
a2fbb9ea
ET
1048static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1050{
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054 dma_addr_t mapping;
1055
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1058 return -ENOMEM;
1059
437cf2f1 1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1061 PCI_DMA_FROMDEVICE);
8d8bb39b 1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1063 dev_kfree_skb(skb);
1064 return -ENOMEM;
1065 }
1066
1067 rx_buf->skb = skb;
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073 return 0;
1074}
1075
1076/* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1080 */
1081static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1083{
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 bp->rx_offset + RX_COPY_THRESH,
1093 PCI_DMA_FROMDEVICE);
1094
1095 prod_rx_buf->skb = cons_rx_buf->skb;
1096 pci_unmap_addr_set(prod_rx_buf, mapping,
1097 pci_unmap_addr(cons_rx_buf, mapping));
1098 *prod_bd = *cons_bd;
1099}
1100
7a9b2557
VZ
1101static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1102 u16 idx)
1103{
1104 u16 last_max = fp->last_max_sge;
1105
1106 if (SUB_S16(idx, last_max) > 0)
1107 fp->last_max_sge = idx;
1108}
1109
1110static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1111{
1112 int i, j;
1113
1114 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1115 int idx = RX_SGE_CNT * i - 1;
1116
1117 for (j = 0; j < 2; j++) {
1118 SGE_MASK_CLEAR_BIT(fp, idx);
1119 idx--;
1120 }
1121 }
1122}
1123
1124static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1125 struct eth_fast_path_rx_cqe *fp_cqe)
1126{
1127 struct bnx2x *bp = fp->bp;
4f40f2cb 1128 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1129 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1130 SGE_PAGE_SHIFT;
7a9b2557
VZ
1131 u16 last_max, last_elem, first_elem;
1132 u16 delta = 0;
1133 u16 i;
1134
1135 if (!sge_len)
1136 return;
1137
1138 /* First mark all used pages */
1139 for (i = 0; i < sge_len; i++)
1140 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1141
1142 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1143 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1144
1145 /* Here we assume that the last SGE index is the biggest */
1146 prefetch((void *)(fp->sge_mask));
1147 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148
1149 last_max = RX_SGE(fp->last_max_sge);
1150 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1151 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1152
1153 /* If ring is not full */
1154 if (last_elem + 1 != first_elem)
1155 last_elem++;
1156
1157 /* Now update the prod */
1158 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1159 if (likely(fp->sge_mask[i]))
1160 break;
1161
1162 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1163 delta += RX_SGE_MASK_ELEM_SZ;
1164 }
1165
1166 if (delta > 0) {
1167 fp->rx_sge_prod += delta;
1168 /* clear page-end entries */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1170 }
1171
1172 DP(NETIF_MSG_RX_STATUS,
1173 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1174 fp->last_max_sge, fp->rx_sge_prod);
1175}
1176
1177static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1178{
1179 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1180 memset(fp->sge_mask, 0xff,
1181 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1182
33471629
EG
1183 /* Clear the two last indices in the page to 1:
1184 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1185 hence will never be indicated and should be removed from
1186 the calculations. */
1187 bnx2x_clear_sge_mask_next_elems(fp);
1188}
1189
1190static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1191 struct sk_buff *skb, u16 cons, u16 prod)
1192{
1193 struct bnx2x *bp = fp->bp;
1194 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1195 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1196 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1197 dma_addr_t mapping;
1198
1199 /* move empty skb from pool to prod and map it */
1200 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1201 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1202 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1203 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1204
1205 /* move partial skb from cons to pool (don't unmap yet) */
1206 fp->tpa_pool[queue] = *cons_rx_buf;
1207
1208 /* mark bin state as start - print error if current state != stop */
1209 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1210 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1211
1212 fp->tpa_state[queue] = BNX2X_TPA_START;
1213
1214 /* point prod_bd to new skb */
1215 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1216 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1217
1218#ifdef BNX2X_STOP_ON_ERROR
1219 fp->tpa_queue_used |= (1 << queue);
1220#ifdef __powerpc64__
1221 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1222#else
1223 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1224#endif
1225 fp->tpa_queue_used);
1226#endif
1227}
1228
1229static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1230 struct sk_buff *skb,
1231 struct eth_fast_path_rx_cqe *fp_cqe,
1232 u16 cqe_idx)
1233{
1234 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1235 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1236 u32 i, frag_len, frag_size, pages;
1237 int err;
1238 int j;
1239
1240 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1241 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1242
1243 /* This is needed in order to enable forwarding support */
1244 if (frag_size)
4f40f2cb 1245 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1246 max(frag_size, (u32)len_on_bd));
1247
1248#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1249 if (pages >
1250 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1251 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1252 pages, cqe_idx);
1253 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1254 fp_cqe->pkt_len, len_on_bd);
1255 bnx2x_panic();
1256 return -EINVAL;
1257 }
1258#endif
1259
1260 /* Run through the SGL and compose the fragmented skb */
1261 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1262 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1263
1264 /* FW gives the indices of the SGE as if the ring is an array
1265 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1266 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1267 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1268 old_rx_pg = *rx_pg;
1269
1270 /* If we fail to allocate a substitute page, we simply stop
1271 where we are and drop the whole packet */
1272 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1273 if (unlikely(err)) {
de832a55 1274 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1275 return err;
1276 }
1277
1278 /* Unmap the page as we r going to pass it to the stack */
1279 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1280 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1281
1282 /* Add one frag and update the appropriate fields in the skb */
1283 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1284
1285 skb->data_len += frag_len;
1286 skb->truesize += frag_len;
1287 skb->len += frag_len;
1288
1289 frag_size -= frag_len;
1290 }
1291
1292 return 0;
1293}
1294
1295static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1297 u16 cqe_idx)
1298{
1299 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1300 struct sk_buff *skb = rx_buf->skb;
1301 /* alloc new skb */
1302 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1303
1304 /* Unmap skb in the pool anyway, as we are going to change
1305 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1306 fails. */
1307 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1308 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1309
7a9b2557 1310 if (likely(new_skb)) {
66e855f3
YG
1311 /* fix ip xsum and give it to the stack */
1312 /* (no need to map the new skb) */
0c6671b0
EG
1313#ifdef BCM_VLAN
1314 int is_vlan_cqe =
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN);
1317 int is_not_hwaccel_vlan_cqe =
1318 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1319#endif
7a9b2557
VZ
1320
1321 prefetch(skb);
1322 prefetch(((char *)(skb)) + 128);
1323
7a9b2557
VZ
1324#ifdef BNX2X_STOP_ON_ERROR
1325 if (pad + len > bp->rx_buf_size) {
1326 BNX2X_ERR("skb_put is about to fail... "
1327 "pad %d len %d rx_buf_size %d\n",
1328 pad, len, bp->rx_buf_size);
1329 bnx2x_panic();
1330 return;
1331 }
1332#endif
1333
1334 skb_reserve(skb, pad);
1335 skb_put(skb, len);
1336
1337 skb->protocol = eth_type_trans(skb, bp->dev);
1338 skb->ip_summed = CHECKSUM_UNNECESSARY;
1339
1340 {
1341 struct iphdr *iph;
1342
1343 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1344#ifdef BCM_VLAN
1345 /* If there is no Rx VLAN offloading -
1346 take VLAN tag into an account */
1347 if (unlikely(is_not_hwaccel_vlan_cqe))
1348 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1349#endif
7a9b2557
VZ
1350 iph->check = 0;
1351 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1352 }
1353
1354 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1355 &cqe->fast_path_cqe, cqe_idx)) {
1356#ifdef BCM_VLAN
0c6671b0
EG
1357 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1358 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1359 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1360 le16_to_cpu(cqe->fast_path_cqe.
1361 vlan_tag));
1362 else
1363#endif
1364 netif_receive_skb(skb);
1365 } else {
1366 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1367 " - dropping packet!\n");
1368 dev_kfree_skb(skb);
1369 }
1370
7a9b2557
VZ
1371
1372 /* put new skb in bin */
1373 fp->tpa_pool[queue].skb = new_skb;
1374
1375 } else {
66e855f3 1376 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1377 DP(NETIF_MSG_RX_STATUS,
1378 "Failed to allocate new skb - dropping packet!\n");
de832a55 1379 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1380 }
1381
1382 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1383}
1384
1385static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1386 struct bnx2x_fastpath *fp,
1387 u16 bd_prod, u16 rx_comp_prod,
1388 u16 rx_sge_prod)
1389{
8d9c5f34 1390 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1391 int i;
1392
1393 /* Update producers */
1394 rx_prods.bd_prod = bd_prod;
1395 rx_prods.cqe_prod = rx_comp_prod;
1396 rx_prods.sge_prod = rx_sge_prod;
1397
58f4c4cf
EG
1398 /*
1399 * Make sure that the BD and SGE data is updated before updating the
1400 * producers since FW might read the BD/SGE right after the producer
1401 * is updated.
1402 * This is only applicable for weak-ordered memory model archs such
1403 * as IA-64. The following barrier is also mandatory since FW will
1404 * assumes BDs must have buffers.
1405 */
1406 wmb();
1407
8d9c5f34
EG
1408 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1409 REG_WR(bp, BAR_USTRORM_INTMEM +
1410 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1411 ((u32 *)&rx_prods)[i]);
1412
58f4c4cf
EG
1413 mmiowb(); /* keep prod updates ordered */
1414
7a9b2557 1415 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1416 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1417 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1418}
1419
a2fbb9ea
ET
1420static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1421{
1422 struct bnx2x *bp = fp->bp;
34f80b04 1423 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1424 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1425 int rx_pkt = 0;
1426
1427#ifdef BNX2X_STOP_ON_ERROR
1428 if (unlikely(bp->panic))
1429 return 0;
1430#endif
1431
34f80b04
EG
1432 /* CQ "next element" is of the size of the regular element,
1433 that's why it's ok here */
a2fbb9ea
ET
1434 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1435 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1436 hw_comp_cons++;
1437
1438 bd_cons = fp->rx_bd_cons;
1439 bd_prod = fp->rx_bd_prod;
34f80b04 1440 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1441 sw_comp_cons = fp->rx_comp_cons;
1442 sw_comp_prod = fp->rx_comp_prod;
1443
1444 /* Memory barrier necessary as speculative reads of the rx
1445 * buffer can be ahead of the index in the status block
1446 */
1447 rmb();
1448
1449 DP(NETIF_MSG_RX_STATUS,
1450 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1451 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1452
1453 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1454 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1455 struct sk_buff *skb;
1456 union eth_rx_cqe *cqe;
34f80b04
EG
1457 u8 cqe_fp_flags;
1458 u16 len, pad;
a2fbb9ea
ET
1459
1460 comp_ring_cons = RCQ_BD(sw_comp_cons);
1461 bd_prod = RX_BD(bd_prod);
1462 bd_cons = RX_BD(bd_cons);
1463
1464 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1465 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1466
a2fbb9ea 1467 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1468 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1469 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1470 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1471 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1472 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1473
1474 /* is this a slowpath msg? */
34f80b04 1475 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1476 bnx2x_sp_event(fp, cqe);
1477 goto next_cqe;
1478
1479 /* this is an rx packet */
1480 } else {
1481 rx_buf = &fp->rx_buf_ring[bd_cons];
1482 skb = rx_buf->skb;
a2fbb9ea
ET
1483 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1484 pad = cqe->fast_path_cqe.placement_offset;
1485
7a9b2557
VZ
1486 /* If CQE is marked both TPA_START and TPA_END
1487 it is a non-TPA CQE */
1488 if ((!fp->disable_tpa) &&
1489 (TPA_TYPE(cqe_fp_flags) !=
1490 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1491 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1492
1493 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1494 DP(NETIF_MSG_RX_STATUS,
1495 "calling tpa_start on queue %d\n",
1496 queue);
1497
1498 bnx2x_tpa_start(fp, queue, skb,
1499 bd_cons, bd_prod);
1500 goto next_rx;
1501 }
1502
1503 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1504 DP(NETIF_MSG_RX_STATUS,
1505 "calling tpa_stop on queue %d\n",
1506 queue);
1507
1508 if (!BNX2X_RX_SUM_FIX(cqe))
1509 BNX2X_ERR("STOP on none TCP "
1510 "data\n");
1511
1512 /* This is a size of the linear data
1513 on this skb */
1514 len = le16_to_cpu(cqe->fast_path_cqe.
1515 len_on_bd);
1516 bnx2x_tpa_stop(bp, fp, queue, pad,
1517 len, cqe, comp_ring_cons);
1518#ifdef BNX2X_STOP_ON_ERROR
1519 if (bp->panic)
1520 return -EINVAL;
1521#endif
1522
1523 bnx2x_update_sge_prod(fp,
1524 &cqe->fast_path_cqe);
1525 goto next_cqe;
1526 }
1527 }
1528
a2fbb9ea
ET
1529 pci_dma_sync_single_for_device(bp->pdev,
1530 pci_unmap_addr(rx_buf, mapping),
1531 pad + RX_COPY_THRESH,
1532 PCI_DMA_FROMDEVICE);
1533 prefetch(skb);
1534 prefetch(((char *)(skb)) + 128);
1535
1536 /* is this an error packet? */
34f80b04 1537 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1538 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1539 "ERROR flags %x rx packet %u\n",
1540 cqe_fp_flags, sw_comp_cons);
de832a55 1541 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1542 goto reuse_rx;
1543 }
1544
1545 /* Since we don't have a jumbo ring
1546 * copy small packets if mtu > 1500
1547 */
1548 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1549 (len <= RX_COPY_THRESH)) {
1550 struct sk_buff *new_skb;
1551
1552 new_skb = netdev_alloc_skb(bp->dev,
1553 len + pad);
1554 if (new_skb == NULL) {
1555 DP(NETIF_MSG_RX_ERR,
34f80b04 1556 "ERROR packet dropped "
a2fbb9ea 1557 "because of alloc failure\n");
de832a55 1558 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1559 goto reuse_rx;
1560 }
1561
1562 /* aligned copy */
1563 skb_copy_from_linear_data_offset(skb, pad,
1564 new_skb->data + pad, len);
1565 skb_reserve(new_skb, pad);
1566 skb_put(new_skb, len);
1567
1568 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1569
1570 skb = new_skb;
1571
1572 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1573 pci_unmap_single(bp->pdev,
1574 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1575 bp->rx_buf_size,
a2fbb9ea
ET
1576 PCI_DMA_FROMDEVICE);
1577 skb_reserve(skb, pad);
1578 skb_put(skb, len);
1579
1580 } else {
1581 DP(NETIF_MSG_RX_ERR,
34f80b04 1582 "ERROR packet dropped because "
a2fbb9ea 1583 "of alloc failure\n");
de832a55 1584 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1585reuse_rx:
1586 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1587 goto next_rx;
1588 }
1589
1590 skb->protocol = eth_type_trans(skb, bp->dev);
1591
1592 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1593 if (bp->rx_csum) {
1adcd8be
EG
1594 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1595 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1596 else
de832a55 1597 fp->eth_q_stats.hw_csum_err++;
66e855f3 1598 }
a2fbb9ea
ET
1599 }
1600
748e5439 1601 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1602#ifdef BCM_VLAN
0c6671b0 1603 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1604 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1605 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1606 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1608 else
1609#endif
34f80b04 1610 netif_receive_skb(skb);
a2fbb9ea 1611
a2fbb9ea
ET
1612
1613next_rx:
1614 rx_buf->skb = NULL;
1615
1616 bd_cons = NEXT_RX_IDX(bd_cons);
1617 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1618 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1619 rx_pkt++;
a2fbb9ea
ET
1620next_cqe:
1621 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1622 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1623
34f80b04 1624 if (rx_pkt == budget)
a2fbb9ea
ET
1625 break;
1626 } /* while */
1627
1628 fp->rx_bd_cons = bd_cons;
34f80b04 1629 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1630 fp->rx_comp_cons = sw_comp_cons;
1631 fp->rx_comp_prod = sw_comp_prod;
1632
7a9b2557
VZ
1633 /* Update producers */
1634 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1635 fp->rx_sge_prod);
a2fbb9ea
ET
1636
1637 fp->rx_pkt += rx_pkt;
1638 fp->rx_calls++;
1639
1640 return rx_pkt;
1641}
1642
1643static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1644{
1645 struct bnx2x_fastpath *fp = fp_cookie;
1646 struct bnx2x *bp = fp->bp;
34f80b04 1647 int index = FP_IDX(fp);
a2fbb9ea 1648
da5a662a
VZ
1649 /* Return here if interrupt is disabled */
1650 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1651 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1652 return IRQ_HANDLED;
1653 }
1654
34f80b04
EG
1655 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1656 index, FP_SB_ID(fp));
1657 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1658
1659#ifdef BNX2X_STOP_ON_ERROR
1660 if (unlikely(bp->panic))
1661 return IRQ_HANDLED;
1662#endif
1663
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668
288379f0 1669 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1670
a2fbb9ea
ET
1671 return IRQ_HANDLED;
1672}
1673
1674static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1675{
555f6c78 1676 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1677 u16 status = bnx2x_ack_int(bp);
34f80b04 1678 u16 mask;
a2fbb9ea 1679
34f80b04 1680 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1681 if (unlikely(status == 0)) {
1682 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1683 return IRQ_NONE;
1684 }
34f80b04 1685 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1686
34f80b04 1687 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1688 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1689 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1690 return IRQ_HANDLED;
1691 }
1692
3196a88a
EG
1693#ifdef BNX2X_STOP_ON_ERROR
1694 if (unlikely(bp->panic))
1695 return IRQ_HANDLED;
1696#endif
1697
34f80b04
EG
1698 mask = 0x2 << bp->fp[0].sb_id;
1699 if (status & mask) {
a2fbb9ea
ET
1700 struct bnx2x_fastpath *fp = &bp->fp[0];
1701
1702 prefetch(fp->rx_cons_sb);
1703 prefetch(fp->tx_cons_sb);
1704 prefetch(&fp->status_blk->c_status_block.status_block_index);
1705 prefetch(&fp->status_blk->u_status_block.status_block_index);
1706
288379f0 1707 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1708
34f80b04 1709 status &= ~mask;
a2fbb9ea
ET
1710 }
1711
a2fbb9ea 1712
34f80b04 1713 if (unlikely(status & 0x1)) {
1cf167f2 1714 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1715
1716 status &= ~0x1;
1717 if (!status)
1718 return IRQ_HANDLED;
1719 }
1720
34f80b04
EG
1721 if (status)
1722 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1723 status);
a2fbb9ea 1724
c18487ee 1725 return IRQ_HANDLED;
a2fbb9ea
ET
1726}
1727
c18487ee 1728/* end of fast path */
a2fbb9ea 1729
bb2a0f7a 1730static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1731
c18487ee
YR
1732/* Link */
1733
1734/*
1735 * General service functions
1736 */
a2fbb9ea 1737
4a37fb66 1738static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1739{
1740 u32 lock_status;
1741 u32 resource_bit = (1 << resource);
4a37fb66
YG
1742 int func = BP_FUNC(bp);
1743 u32 hw_lock_control_reg;
c18487ee 1744 int cnt;
a2fbb9ea 1745
c18487ee
YR
1746 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1748 DP(NETIF_MSG_HW,
1749 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1751 return -EINVAL;
1752 }
a2fbb9ea 1753
4a37fb66
YG
1754 if (func <= 5) {
1755 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1756 } else {
1757 hw_lock_control_reg =
1758 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1759 }
1760
c18487ee 1761 /* Validating that the resource is not already taken */
4a37fb66 1762 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1763 if (lock_status & resource_bit) {
1764 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1765 lock_status, resource_bit);
1766 return -EEXIST;
1767 }
a2fbb9ea 1768
46230476
EG
1769 /* Try for 5 second every 5ms */
1770 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1771 /* Try to acquire the lock */
4a37fb66
YG
1772 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1773 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1774 if (lock_status & resource_bit)
1775 return 0;
a2fbb9ea 1776
c18487ee 1777 msleep(5);
a2fbb9ea 1778 }
c18487ee
YR
1779 DP(NETIF_MSG_HW, "Timeout\n");
1780 return -EAGAIN;
1781}
a2fbb9ea 1782
4a37fb66 1783static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1784{
1785 u32 lock_status;
1786 u32 resource_bit = (1 << resource);
4a37fb66
YG
1787 int func = BP_FUNC(bp);
1788 u32 hw_lock_control_reg;
a2fbb9ea 1789
c18487ee
YR
1790 /* Validating that the resource is within range */
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 DP(NETIF_MSG_HW,
1793 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1795 return -EINVAL;
1796 }
1797
4a37fb66
YG
1798 if (func <= 5) {
1799 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 } else {
1801 hw_lock_control_reg =
1802 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1803 }
1804
c18487ee 1805 /* Validating that the resource is currently taken */
4a37fb66 1806 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1807 if (!(lock_status & resource_bit)) {
1808 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1809 lock_status, resource_bit);
1810 return -EFAULT;
a2fbb9ea
ET
1811 }
1812
4a37fb66 1813 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1814 return 0;
1815}
1816
1817/* HW Lock for shared dual port PHYs */
4a37fb66 1818static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1819{
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1821
34f80b04 1822 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1823
c18487ee
YR
1824 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1825 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1827}
a2fbb9ea 1828
4a37fb66 1829static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1830{
1831 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1832
c18487ee
YR
1833 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1834 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1836
34f80b04 1837 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1838}
a2fbb9ea 1839
17de50b7 1840int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1841{
1842 /* The GPIO should be swapped if swap register is set and active */
1843 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1844 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1845 int gpio_shift = gpio_num +
1846 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1847 u32 gpio_mask = (1 << gpio_shift);
1848 u32 gpio_reg;
a2fbb9ea 1849
c18487ee
YR
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1852 return -EINVAL;
1853 }
a2fbb9ea 1854
4a37fb66 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1856 /* read GPIO and mask except the float bits */
1857 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1858
c18487ee
YR
1859 switch (mode) {
1860 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1861 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1862 gpio_num, gpio_shift);
1863 /* clear FLOAT and set CLR */
1864 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1865 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1866 break;
a2fbb9ea 1867
c18487ee
YR
1868 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1869 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1870 gpio_num, gpio_shift);
1871 /* clear FLOAT and set SET */
1872 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1873 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1874 break;
a2fbb9ea 1875
17de50b7 1876 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1877 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1878 gpio_num, gpio_shift);
1879 /* set FLOAT */
1880 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1881 break;
a2fbb9ea 1882
c18487ee
YR
1883 default:
1884 break;
a2fbb9ea
ET
1885 }
1886
c18487ee 1887 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1889
c18487ee 1890 return 0;
a2fbb9ea
ET
1891}
1892
c18487ee 1893static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1894{
c18487ee
YR
1895 u32 spio_mask = (1 << spio_num);
1896 u32 spio_reg;
a2fbb9ea 1897
c18487ee
YR
1898 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1899 (spio_num > MISC_REGISTERS_SPIO_7)) {
1900 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1901 return -EINVAL;
a2fbb9ea
ET
1902 }
1903
4a37fb66 1904 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1905 /* read SPIO and mask except the float bits */
1906 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1907
c18487ee 1908 switch (mode) {
6378c025 1909 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1910 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1911 /* clear FLOAT and set CLR */
1912 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1913 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1914 break;
a2fbb9ea 1915
6378c025 1916 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1917 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1918 /* clear FLOAT and set SET */
1919 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1920 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1921 break;
a2fbb9ea 1922
c18487ee
YR
1923 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1924 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1925 /* set FLOAT */
1926 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1927 break;
a2fbb9ea 1928
c18487ee
YR
1929 default:
1930 break;
a2fbb9ea
ET
1931 }
1932
c18487ee 1933 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1935
a2fbb9ea
ET
1936 return 0;
1937}
1938
c18487ee 1939static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1940{
ad33ea3a
EG
1941 switch (bp->link_vars.ieee_fc &
1942 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1943 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1945 ADVERTISED_Pause);
1946 break;
1947 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1948 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1949 ADVERTISED_Pause);
1950 break;
1951 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1952 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1953 break;
1954 default:
34f80b04 1955 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1956 ADVERTISED_Pause);
1957 break;
1958 }
1959}
f1410647 1960
c18487ee
YR
1961static void bnx2x_link_report(struct bnx2x *bp)
1962{
1963 if (bp->link_vars.link_up) {
1964 if (bp->state == BNX2X_STATE_OPEN)
1965 netif_carrier_on(bp->dev);
1966 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1967
c18487ee 1968 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1969
c18487ee
YR
1970 if (bp->link_vars.duplex == DUPLEX_FULL)
1971 printk("full duplex");
1972 else
1973 printk("half duplex");
f1410647 1974
c0700f90
DM
1975 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1976 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1977 printk(", receive ");
c0700f90 1978 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1979 printk("& transmit ");
1980 } else {
1981 printk(", transmit ");
1982 }
1983 printk("flow control ON");
1984 }
1985 printk("\n");
f1410647 1986
c18487ee
YR
1987 } else { /* link_down */
1988 netif_carrier_off(bp->dev);
1989 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1990 }
c18487ee
YR
1991}
1992
1993static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1994{
19680c48
EG
1995 if (!BP_NOMCP(bp)) {
1996 u8 rc;
a2fbb9ea 1997
19680c48 1998 /* Initialize link parameters structure variables */
8c99e7b0
YR
1999 /* It is recommended to turn off RX FC for jumbo frames
2000 for better performance */
2001 if (IS_E1HMF(bp))
c0700f90 2002 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2003 else if (bp->dev->mtu > 5000)
c0700f90 2004 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2005 else
c0700f90 2006 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2007
4a37fb66 2008 bnx2x_acquire_phy_lock(bp);
19680c48 2009 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2010 bnx2x_release_phy_lock(bp);
a2fbb9ea 2011
3c96c68b
EG
2012 bnx2x_calc_fc_adv(bp);
2013
19680c48
EG
2014 if (bp->link_vars.link_up)
2015 bnx2x_link_report(bp);
a2fbb9ea 2016
34f80b04 2017
19680c48
EG
2018 return rc;
2019 }
2020 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2021 return -EINVAL;
a2fbb9ea
ET
2022}
2023
c18487ee 2024static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2025{
19680c48 2026 if (!BP_NOMCP(bp)) {
4a37fb66 2027 bnx2x_acquire_phy_lock(bp);
19680c48 2028 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2029 bnx2x_release_phy_lock(bp);
a2fbb9ea 2030
19680c48
EG
2031 bnx2x_calc_fc_adv(bp);
2032 } else
2033 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2034}
a2fbb9ea 2035
c18487ee
YR
2036static void bnx2x__link_reset(struct bnx2x *bp)
2037{
19680c48 2038 if (!BP_NOMCP(bp)) {
4a37fb66 2039 bnx2x_acquire_phy_lock(bp);
19680c48 2040 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2041 bnx2x_release_phy_lock(bp);
19680c48
EG
2042 } else
2043 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2044}
a2fbb9ea 2045
c18487ee
YR
2046static u8 bnx2x_link_test(struct bnx2x *bp)
2047{
2048 u8 rc;
a2fbb9ea 2049
4a37fb66 2050 bnx2x_acquire_phy_lock(bp);
c18487ee 2051 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2052 bnx2x_release_phy_lock(bp);
a2fbb9ea 2053
c18487ee
YR
2054 return rc;
2055}
a2fbb9ea 2056
34f80b04
EG
2057/* Calculates the sum of vn_min_rates.
2058 It's needed for further normalizing of the min_rates.
2059
2060 Returns:
2061 sum of vn_min_rates
2062 or
2063 0 - if all the min_rates are 0.
33471629 2064 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2065 If not all min_rates are zero then those that are zeroes will
2066 be set to 1.
2067 */
2068static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2069{
2070 int i, port = BP_PORT(bp);
2071 u32 wsum = 0;
2072 int all_zero = 1;
2073
2074 for (i = 0; i < E1HVN_MAX; i++) {
2075 u32 vn_cfg =
2076 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2077 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2078 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2079 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2080 /* If min rate is zero - set it to 1 */
2081 if (!vn_min_rate)
2082 vn_min_rate = DEF_MIN_RATE;
2083 else
2084 all_zero = 0;
2085
2086 wsum += vn_min_rate;
2087 }
2088 }
2089
2090 /* ... only if all min rates are zeros - disable FAIRNESS */
2091 if (all_zero)
2092 return 0;
2093
2094 return wsum;
2095}
2096
2097static void bnx2x_init_port_minmax(struct bnx2x *bp,
2098 int en_fness,
2099 u16 port_rate,
2100 struct cmng_struct_per_port *m_cmng_port)
2101{
2102 u32 r_param = port_rate / 8;
2103 int port = BP_PORT(bp);
2104 int i;
2105
2106 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2107
2108 /* Enable minmax only if we are in e1hmf mode */
2109 if (IS_E1HMF(bp)) {
2110 u32 fair_periodic_timeout_usec;
2111 u32 t_fair;
2112
2113 /* Enable rate shaping and fairness */
2114 m_cmng_port->flags.cmng_vn_enable = 1;
2115 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2116 m_cmng_port->flags.rate_shaping_enable = 1;
2117
2118 if (!en_fness)
2119 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2120 " fairness will be disabled\n");
2121
2122 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2123 m_cmng_port->rs_vars.rs_periodic_timeout =
2124 RS_PERIODIC_TIMEOUT_USEC / 4;
2125
2126 /* this is the threshold below which no timer arming will occur
2127 1.25 coefficient is for the threshold to be a little bigger
2128 than the real time, to compensate for timer in-accuracy */
2129 m_cmng_port->rs_vars.rs_threshold =
2130 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2131
2132 /* resolution of fairness timer */
2133 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2134 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2135 t_fair = T_FAIR_COEF / port_rate;
2136
2137 /* this is the threshold below which we won't arm
2138 the timer anymore */
2139 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2140
2141 /* we multiply by 1e3/8 to get bytes/msec.
2142 We don't want the credits to pass a credit
2143 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2144 m_cmng_port->fair_vars.upper_bound =
2145 r_param * t_fair * FAIR_MEM;
2146 /* since each tick is 4 usec */
2147 m_cmng_port->fair_vars.fairness_timeout =
2148 fair_periodic_timeout_usec / 4;
2149
2150 } else {
2151 /* Disable rate shaping and fairness */
2152 m_cmng_port->flags.cmng_vn_enable = 0;
2153 m_cmng_port->flags.fairness_enable = 0;
2154 m_cmng_port->flags.rate_shaping_enable = 0;
2155
2156 DP(NETIF_MSG_IFUP,
2157 "Single function mode minmax will be disabled\n");
2158 }
2159
2160 /* Store it to internal memory */
2161 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2162 REG_WR(bp, BAR_XSTRORM_INTMEM +
2163 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2164 ((u32 *)(m_cmng_port))[i]);
2165}
2166
2167static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2168 u32 wsum, u16 port_rate,
2169 struct cmng_struct_per_port *m_cmng_port)
2170{
2171 struct rate_shaping_vars_per_vn m_rs_vn;
2172 struct fairness_vars_per_vn m_fair_vn;
2173 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2174 u16 vn_min_rate, vn_max_rate;
2175 int i;
2176
2177 /* If function is hidden - set min and max to zeroes */
2178 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2179 vn_min_rate = 0;
2180 vn_max_rate = 0;
2181
2182 } else {
2183 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2184 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2185 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2186 if current min rate is zero - set it to 1.
33471629 2187 This is a requirement of the algorithm. */
34f80b04
EG
2188 if ((vn_min_rate == 0) && wsum)
2189 vn_min_rate = DEF_MIN_RATE;
2190 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2191 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2192 }
2193
2194 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2195 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2196
2197 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2198 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2199
2200 /* global vn counter - maximal Mbps for this vn */
2201 m_rs_vn.vn_counter.rate = vn_max_rate;
2202
2203 /* quota - number of bytes transmitted in this period */
2204 m_rs_vn.vn_counter.quota =
2205 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2206
2207#ifdef BNX2X_PER_PROT_QOS
2208 /* per protocol counter */
2209 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2210 /* maximal Mbps for this protocol */
2211 m_rs_vn.protocol_counters[protocol].rate =
2212 protocol_max_rate[protocol];
2213 /* the quota in each timer period -
2214 number of bytes transmitted in this period */
2215 m_rs_vn.protocol_counters[protocol].quota =
2216 (u32)(rs_periodic_timeout_usec *
2217 ((double)m_rs_vn.
2218 protocol_counters[protocol].rate/8));
2219 }
2220#endif
2221
2222 if (wsum) {
2223 /* credit for each period of the fairness algorithm:
2224 number of bytes in T_FAIR (the vn share the port rate).
2225 wsum should not be larger than 10000, thus
2226 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2227 m_fair_vn.vn_credit_delta =
2228 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2229 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2230 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2231 m_fair_vn.vn_credit_delta);
2232 }
2233
2234#ifdef BNX2X_PER_PROT_QOS
2235 do {
2236 u32 protocolWeightSum = 0;
2237
2238 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2239 protocolWeightSum +=
2240 drvInit.protocol_min_rate[protocol];
2241 /* per protocol counter -
2242 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2243 if (protocolWeightSum > 0) {
2244 for (protocol = 0;
2245 protocol < NUM_OF_PROTOCOLS; protocol++)
2246 /* credit for each period of the
2247 fairness algorithm - number of bytes in
2248 T_FAIR (the protocol share the vn rate) */
2249 m_fair_vn.protocol_credit_delta[protocol] =
2250 (u32)((vn_min_rate / 8) * t_fair *
2251 protocol_min_rate / protocolWeightSum);
2252 }
2253 } while (0);
2254#endif
2255
2256 /* Store it to internal memory */
2257 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2258 REG_WR(bp, BAR_XSTRORM_INTMEM +
2259 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2260 ((u32 *)(&m_rs_vn))[i]);
2261
2262 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2263 REG_WR(bp, BAR_XSTRORM_INTMEM +
2264 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2265 ((u32 *)(&m_fair_vn))[i]);
2266}
2267
c18487ee
YR
2268/* This function is called upon link interrupt */
2269static void bnx2x_link_attn(struct bnx2x *bp)
2270{
34f80b04
EG
2271 int vn;
2272
bb2a0f7a
YG
2273 /* Make sure that we are synced with the current statistics */
2274 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2275
c18487ee 2276 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2277
bb2a0f7a
YG
2278 if (bp->link_vars.link_up) {
2279
2280 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2281 struct host_port_stats *pstats;
2282
2283 pstats = bnx2x_sp(bp, port_stats);
2284 /* reset old bmac stats */
2285 memset(&(pstats->mac_stx[0]), 0,
2286 sizeof(struct mac_stx));
2287 }
2288 if ((bp->state == BNX2X_STATE_OPEN) ||
2289 (bp->state == BNX2X_STATE_DISABLED))
2290 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2291 }
2292
c18487ee
YR
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
34f80b04
EG
2295
2296 if (IS_E1HMF(bp)) {
2297 int func;
2298
2299 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2300 if (vn == BP_E1HVN(bp))
2301 continue;
2302
2303 func = ((vn << 1) | BP_PORT(bp));
2304
2305 /* Set the attention towards other drivers
2306 on the same port */
2307 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2308 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2309 }
2310 }
2311
2312 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2313 struct cmng_struct_per_port m_cmng_port;
2314 u32 wsum;
2315 int port = BP_PORT(bp);
2316
2317 /* Init RATE SHAPING and FAIRNESS contexts */
2318 wsum = bnx2x_calc_vn_wsum(bp);
2319 bnx2x_init_port_minmax(bp, (int)wsum,
2320 bp->link_vars.line_speed,
2321 &m_cmng_port);
2322 if (IS_E1HMF(bp))
2323 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2324 bnx2x_init_vn_minmax(bp, 2*vn + port,
2325 wsum, bp->link_vars.line_speed,
2326 &m_cmng_port);
2327 }
c18487ee 2328}
a2fbb9ea 2329
c18487ee
YR
2330static void bnx2x__link_status_update(struct bnx2x *bp)
2331{
2332 if (bp->state != BNX2X_STATE_OPEN)
2333 return;
a2fbb9ea 2334
c18487ee 2335 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2336
bb2a0f7a
YG
2337 if (bp->link_vars.link_up)
2338 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2339 else
2340 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2341
c18487ee
YR
2342 /* indicate link status */
2343 bnx2x_link_report(bp);
a2fbb9ea 2344}
a2fbb9ea 2345
34f80b04
EG
2346static void bnx2x_pmf_update(struct bnx2x *bp)
2347{
2348 int port = BP_PORT(bp);
2349 u32 val;
2350
2351 bp->port.pmf = 1;
2352 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2353
2354 /* enable nig attention */
2355 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2356 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2357 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2358
2359 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2360}
2361
c18487ee 2362/* end of Link */
a2fbb9ea
ET
2363
2364/* slow path */
2365
2366/*
2367 * General service functions
2368 */
2369
2370/* the slow path queue is odd since completions arrive on the fastpath ring */
2371static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2372 u32 data_hi, u32 data_lo, int common)
2373{
34f80b04 2374 int func = BP_FUNC(bp);
a2fbb9ea 2375
34f80b04
EG
2376 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2377 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2378 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2379 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2380 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2381
2382#ifdef BNX2X_STOP_ON_ERROR
2383 if (unlikely(bp->panic))
2384 return -EIO;
2385#endif
2386
34f80b04 2387 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2388
2389 if (!bp->spq_left) {
2390 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2391 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2392 bnx2x_panic();
2393 return -EBUSY;
2394 }
f1410647 2395
a2fbb9ea
ET
2396 /* CID needs port number to be encoded int it */
2397 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2398 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2399 HW_CID(bp, cid)));
2400 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2401 if (common)
2402 bp->spq_prod_bd->hdr.type |=
2403 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2404
2405 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2406 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2407
2408 bp->spq_left--;
2409
2410 if (bp->spq_prod_bd == bp->spq_last_bd) {
2411 bp->spq_prod_bd = bp->spq;
2412 bp->spq_prod_idx = 0;
2413 DP(NETIF_MSG_TIMER, "end of spq\n");
2414
2415 } else {
2416 bp->spq_prod_bd++;
2417 bp->spq_prod_idx++;
2418 }
2419
34f80b04 2420 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2421 bp->spq_prod_idx);
2422
34f80b04 2423 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2424 return 0;
2425}
2426
2427/* acquire split MCP access lock register */
4a37fb66 2428static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2429{
a2fbb9ea 2430 u32 i, j, val;
34f80b04 2431 int rc = 0;
a2fbb9ea
ET
2432
2433 might_sleep();
2434 i = 100;
2435 for (j = 0; j < i*10; j++) {
2436 val = (1UL << 31);
2437 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2438 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2439 if (val & (1L << 31))
2440 break;
2441
2442 msleep(5);
2443 }
a2fbb9ea 2444 if (!(val & (1L << 31))) {
19680c48 2445 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2446 rc = -EBUSY;
2447 }
2448
2449 return rc;
2450}
2451
4a37fb66
YG
2452/* release split MCP access lock register */
2453static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2454{
2455 u32 val = 0;
2456
2457 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2458}
2459
2460static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2461{
2462 struct host_def_status_block *def_sb = bp->def_status_blk;
2463 u16 rc = 0;
2464
2465 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2466 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2467 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2468 rc |= 1;
2469 }
2470 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2471 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2472 rc |= 2;
2473 }
2474 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2475 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2476 rc |= 4;
2477 }
2478 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2479 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2480 rc |= 8;
2481 }
2482 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2483 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2484 rc |= 16;
2485 }
2486 return rc;
2487}
2488
2489/*
2490 * slow path service functions
2491 */
2492
2493static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2494{
34f80b04 2495 int port = BP_PORT(bp);
5c862848
EG
2496 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2497 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2498 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2499 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2500 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2501 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2502 u32 aeu_mask;
a2fbb9ea 2503
a2fbb9ea
ET
2504 if (bp->attn_state & asserted)
2505 BNX2X_ERR("IGU ERROR\n");
2506
3fcaf2e5
EG
2507 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2508 aeu_mask = REG_RD(bp, aeu_addr);
2509
a2fbb9ea 2510 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2511 aeu_mask, asserted);
2512 aeu_mask &= ~(asserted & 0xff);
2513 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2514
3fcaf2e5
EG
2515 REG_WR(bp, aeu_addr, aeu_mask);
2516 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2517
3fcaf2e5 2518 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2519 bp->attn_state |= asserted;
3fcaf2e5 2520 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2521
2522 if (asserted & ATTN_HARD_WIRED_MASK) {
2523 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2524
a5e9a7cf
EG
2525 bnx2x_acquire_phy_lock(bp);
2526
877e9aa4
ET
2527 /* save nig interrupt mask */
2528 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2529 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2530
c18487ee 2531 bnx2x_link_attn(bp);
a2fbb9ea
ET
2532
2533 /* handle unicore attn? */
2534 }
2535 if (asserted & ATTN_SW_TIMER_4_FUNC)
2536 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2537
2538 if (asserted & GPIO_2_FUNC)
2539 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2540
2541 if (asserted & GPIO_3_FUNC)
2542 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2543
2544 if (asserted & GPIO_4_FUNC)
2545 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2546
2547 if (port == 0) {
2548 if (asserted & ATTN_GENERAL_ATTN_1) {
2549 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2550 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2551 }
2552 if (asserted & ATTN_GENERAL_ATTN_2) {
2553 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2554 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2555 }
2556 if (asserted & ATTN_GENERAL_ATTN_3) {
2557 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2558 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2559 }
2560 } else {
2561 if (asserted & ATTN_GENERAL_ATTN_4) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2564 }
2565 if (asserted & ATTN_GENERAL_ATTN_5) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2568 }
2569 if (asserted & ATTN_GENERAL_ATTN_6) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2572 }
2573 }
2574
2575 } /* if hardwired */
2576
5c862848
EG
2577 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2578 asserted, hc_addr);
2579 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2580
2581 /* now set back the mask */
a5e9a7cf 2582 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2583 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2584 bnx2x_release_phy_lock(bp);
2585 }
a2fbb9ea
ET
2586}
2587
877e9aa4 2588static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2589{
34f80b04 2590 int port = BP_PORT(bp);
877e9aa4
ET
2591 int reg_offset;
2592 u32 val;
2593
34f80b04
EG
2594 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2595 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2596
34f80b04 2597 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2598
2599 val = REG_RD(bp, reg_offset);
2600 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2601 REG_WR(bp, reg_offset, val);
2602
2603 BNX2X_ERR("SPIO5 hw attention\n");
2604
34f80b04 2605 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2606 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2607 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2608 /* Fan failure attention */
2609
17de50b7 2610 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2611 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2612 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2613 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2614 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2615 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2616 /* mark the failure */
c18487ee 2617 bp->link_params.ext_phy_config &=
877e9aa4 2618 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2619 bp->link_params.ext_phy_config |=
877e9aa4
ET
2620 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2621 SHMEM_WR(bp,
2622 dev_info.port_hw_config[port].
2623 external_phy_config,
c18487ee 2624 bp->link_params.ext_phy_config);
877e9aa4
ET
2625 /* log the failure */
2626 printk(KERN_ERR PFX "Fan Failure on Network"
2627 " Controller %s has caused the driver to"
2628 " shutdown the card to prevent permanent"
2629 " damage. Please contact Dell Support for"
2630 " assistance\n", bp->dev->name);
2631 break;
2632
2633 default:
2634 break;
2635 }
2636 }
34f80b04
EG
2637
2638 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2639
2640 val = REG_RD(bp, reg_offset);
2641 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2642 REG_WR(bp, reg_offset, val);
2643
2644 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2645 (attn & HW_INTERRUT_ASSERT_SET_0));
2646 bnx2x_panic();
2647 }
877e9aa4
ET
2648}
2649
2650static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2651{
2652 u32 val;
2653
2654 if (attn & BNX2X_DOORQ_ASSERT) {
2655
2656 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2657 BNX2X_ERR("DB hw attention 0x%x\n", val);
2658 /* DORQ discard attention */
2659 if (val & 0x2)
2660 BNX2X_ERR("FATAL error from DORQ\n");
2661 }
34f80b04
EG
2662
2663 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2664
2665 int port = BP_PORT(bp);
2666 int reg_offset;
2667
2668 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2669 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2670
2671 val = REG_RD(bp, reg_offset);
2672 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2673 REG_WR(bp, reg_offset, val);
2674
2675 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2676 (attn & HW_INTERRUT_ASSERT_SET_1));
2677 bnx2x_panic();
2678 }
877e9aa4
ET
2679}
2680
2681static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2682{
2683 u32 val;
2684
2685 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2686
2687 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2688 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2689 /* CFC error attention */
2690 if (val & 0x2)
2691 BNX2X_ERR("FATAL error from CFC\n");
2692 }
2693
2694 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2695
2696 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2697 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2698 /* RQ_USDMDP_FIFO_OVERFLOW */
2699 if (val & 0x18000)
2700 BNX2X_ERR("FATAL error from PXP\n");
2701 }
34f80b04
EG
2702
2703 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2704
2705 int port = BP_PORT(bp);
2706 int reg_offset;
2707
2708 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2709 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2710
2711 val = REG_RD(bp, reg_offset);
2712 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2713 REG_WR(bp, reg_offset, val);
2714
2715 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2716 (attn & HW_INTERRUT_ASSERT_SET_2));
2717 bnx2x_panic();
2718 }
877e9aa4
ET
2719}
2720
2721static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2722{
34f80b04
EG
2723 u32 val;
2724
877e9aa4
ET
2725 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2726
34f80b04
EG
2727 if (attn & BNX2X_PMF_LINK_ASSERT) {
2728 int func = BP_FUNC(bp);
2729
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2731 bnx2x__link_status_update(bp);
2732 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2733 DRV_STATUS_PMF)
2734 bnx2x_pmf_update(bp);
2735
2736 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2737
2738 BNX2X_ERR("MC assert!\n");
2739 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2740 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2741 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2742 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2743 bnx2x_panic();
2744
2745 } else if (attn & BNX2X_MCP_ASSERT) {
2746
2747 BNX2X_ERR("MCP assert!\n");
2748 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2749 bnx2x_fw_dump(bp);
877e9aa4
ET
2750
2751 } else
2752 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2753 }
2754
2755 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2756 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2757 if (attn & BNX2X_GRC_TIMEOUT) {
2758 val = CHIP_IS_E1H(bp) ?
2759 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2760 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2761 }
2762 if (attn & BNX2X_GRC_RSV) {
2763 val = CHIP_IS_E1H(bp) ?
2764 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2765 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2766 }
877e9aa4 2767 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2768 }
2769}
2770
2771static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2772{
a2fbb9ea
ET
2773 struct attn_route attn;
2774 struct attn_route group_mask;
34f80b04 2775 int port = BP_PORT(bp);
877e9aa4 2776 int index;
a2fbb9ea
ET
2777 u32 reg_addr;
2778 u32 val;
3fcaf2e5 2779 u32 aeu_mask;
a2fbb9ea
ET
2780
2781 /* need to take HW lock because MCP or other port might also
2782 try to handle this event */
4a37fb66 2783 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2784
2785 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2786 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2787 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2788 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2789 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2790 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2791
2792 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2793 if (deasserted & (1 << index)) {
2794 group_mask = bp->attn_group[index];
2795
34f80b04
EG
2796 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2797 index, group_mask.sig[0], group_mask.sig[1],
2798 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2799
877e9aa4
ET
2800 bnx2x_attn_int_deasserted3(bp,
2801 attn.sig[3] & group_mask.sig[3]);
2802 bnx2x_attn_int_deasserted1(bp,
2803 attn.sig[1] & group_mask.sig[1]);
2804 bnx2x_attn_int_deasserted2(bp,
2805 attn.sig[2] & group_mask.sig[2]);
2806 bnx2x_attn_int_deasserted0(bp,
2807 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2808
a2fbb9ea
ET
2809 if ((attn.sig[0] & group_mask.sig[0] &
2810 HW_PRTY_ASSERT_SET_0) ||
2811 (attn.sig[1] & group_mask.sig[1] &
2812 HW_PRTY_ASSERT_SET_1) ||
2813 (attn.sig[2] & group_mask.sig[2] &
2814 HW_PRTY_ASSERT_SET_2))
6378c025 2815 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2816 }
2817 }
2818
4a37fb66 2819 bnx2x_release_alr(bp);
a2fbb9ea 2820
5c862848 2821 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2822
2823 val = ~deasserted;
3fcaf2e5
EG
2824 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2825 val, reg_addr);
5c862848 2826 REG_WR(bp, reg_addr, val);
a2fbb9ea 2827
a2fbb9ea 2828 if (~bp->attn_state & deasserted)
3fcaf2e5 2829 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2830
2831 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2832 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2833
3fcaf2e5
EG
2834 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2835 aeu_mask = REG_RD(bp, reg_addr);
2836
2837 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2838 aeu_mask, deasserted);
2839 aeu_mask |= (deasserted & 0xff);
2840 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2841
3fcaf2e5
EG
2842 REG_WR(bp, reg_addr, aeu_mask);
2843 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2844
2845 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2846 bp->attn_state &= ~deasserted;
2847 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2848}
2849
2850static void bnx2x_attn_int(struct bnx2x *bp)
2851{
2852 /* read local copy of bits */
68d59484
EG
2853 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2854 attn_bits);
2855 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2856 attn_bits_ack);
a2fbb9ea
ET
2857 u32 attn_state = bp->attn_state;
2858
2859 /* look for changed bits */
2860 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2861 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2862
2863 DP(NETIF_MSG_HW,
2864 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2865 attn_bits, attn_ack, asserted, deasserted);
2866
2867 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2868 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2869
2870 /* handle bits that were raised */
2871 if (asserted)
2872 bnx2x_attn_int_asserted(bp, asserted);
2873
2874 if (deasserted)
2875 bnx2x_attn_int_deasserted(bp, deasserted);
2876}
2877
2878static void bnx2x_sp_task(struct work_struct *work)
2879{
1cf167f2 2880 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2881 u16 status;
2882
34f80b04 2883
a2fbb9ea
ET
2884 /* Return here if interrupt is disabled */
2885 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2886 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2887 return;
2888 }
2889
2890 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2891/* if (status == 0) */
2892/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2893
3196a88a 2894 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2895
877e9aa4
ET
2896 /* HW attentions */
2897 if (status & 0x1)
a2fbb9ea 2898 bnx2x_attn_int(bp);
a2fbb9ea 2899
68d59484 2900 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2901 IGU_INT_NOP, 1);
2902 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2903 IGU_INT_NOP, 1);
2904 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2905 IGU_INT_NOP, 1);
2906 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2907 IGU_INT_NOP, 1);
2908 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2909 IGU_INT_ENABLE, 1);
877e9aa4 2910
a2fbb9ea
ET
2911}
2912
2913static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2914{
2915 struct net_device *dev = dev_instance;
2916 struct bnx2x *bp = netdev_priv(dev);
2917
2918 /* Return here if interrupt is disabled */
2919 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2920 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2921 return IRQ_HANDLED;
2922 }
2923
8d9c5f34 2924 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2925
2926#ifdef BNX2X_STOP_ON_ERROR
2927 if (unlikely(bp->panic))
2928 return IRQ_HANDLED;
2929#endif
2930
1cf167f2 2931 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2932
2933 return IRQ_HANDLED;
2934}
2935
2936/* end of slow path */
2937
2938/* Statistics */
2939
2940/****************************************************************************
2941* Macros
2942****************************************************************************/
2943
a2fbb9ea
ET
2944/* sum[hi:lo] += add[hi:lo] */
2945#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2946 do { \
2947 s_lo += a_lo; \
f5ba6772 2948 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2949 } while (0)
2950
2951/* difference = minuend - subtrahend */
2952#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2953 do { \
bb2a0f7a
YG
2954 if (m_lo < s_lo) { \
2955 /* underflow */ \
a2fbb9ea 2956 d_hi = m_hi - s_hi; \
bb2a0f7a 2957 if (d_hi > 0) { \
6378c025 2958 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2959 d_hi--; \
2960 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2961 } else { \
6378c025 2962 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2963 d_hi = 0; \
2964 d_lo = 0; \
2965 } \
bb2a0f7a
YG
2966 } else { \
2967 /* m_lo >= s_lo */ \
a2fbb9ea 2968 if (m_hi < s_hi) { \
bb2a0f7a
YG
2969 d_hi = 0; \
2970 d_lo = 0; \
2971 } else { \
6378c025 2972 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2973 d_hi = m_hi - s_hi; \
2974 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2975 } \
2976 } \
2977 } while (0)
2978
bb2a0f7a 2979#define UPDATE_STAT64(s, t) \
a2fbb9ea 2980 do { \
bb2a0f7a
YG
2981 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2982 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2983 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2984 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2985 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2986 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2987 } while (0)
2988
bb2a0f7a 2989#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2990 do { \
bb2a0f7a
YG
2991 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2992 diff.lo, new->s##_lo, old->s##_lo); \
2993 ADD_64(estats->t##_hi, diff.hi, \
2994 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2995 } while (0)
2996
2997/* sum[hi:lo] += add */
2998#define ADD_EXTEND_64(s_hi, s_lo, a) \
2999 do { \
3000 s_lo += a; \
3001 s_hi += (s_lo < a) ? 1 : 0; \
3002 } while (0)
3003
bb2a0f7a 3004#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3005 do { \
bb2a0f7a
YG
3006 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3007 pstats->mac_stx[1].s##_lo, \
3008 new->s); \
a2fbb9ea
ET
3009 } while (0)
3010
bb2a0f7a 3011#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
3012 do { \
3013 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3014 old_tclient->s = le32_to_cpu(tclient->s); \
de832a55
EG
3015 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3016 } while (0)
3017
3018#define UPDATE_EXTEND_USTAT(s, t) \
3019 do { \
3020 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3021 old_uclient->s = uclient->s; \
3022 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3023 } while (0)
3024
3025#define UPDATE_EXTEND_XSTAT(s, t) \
3026 do { \
3027 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3028 old_xclient->s = le32_to_cpu(xclient->s); \
de832a55
EG
3029 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3030 } while (0)
3031
3032/* minuend -= subtrahend */
3033#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3034 do { \
3035 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3036 } while (0)
3037
3038/* minuend[hi:lo] -= subtrahend */
3039#define SUB_EXTEND_64(m_hi, m_lo, s) \
3040 do { \
3041 SUB_64(m_hi, 0, m_lo, s); \
3042 } while (0)
3043
3044#define SUB_EXTEND_USTAT(s, t) \
3045 do { \
3046 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3047 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3048 } while (0)
3049
3050/*
3051 * General service functions
3052 */
3053
3054static inline long bnx2x_hilo(u32 *hiref)
3055{
3056 u32 lo = *(hiref + 1);
3057#if (BITS_PER_LONG == 64)
3058 u32 hi = *hiref;
3059
3060 return HILO_U64(hi, lo);
3061#else
3062 return lo;
3063#endif
3064}
3065
3066/*
3067 * Init service functions
3068 */
3069
bb2a0f7a
YG
3070static void bnx2x_storm_stats_post(struct bnx2x *bp)
3071{
3072 if (!bp->stats_pending) {
3073 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3074 int i, rc;
bb2a0f7a
YG
3075
3076 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3077 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3078 for_each_queue(bp, i)
3079 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3080
3081 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3082 ((u32 *)&ramrod_data)[1],
3083 ((u32 *)&ramrod_data)[0], 0);
3084 if (rc == 0) {
3085 /* stats ramrod has it's own slot on the spq */
3086 bp->spq_left++;
3087 bp->stats_pending = 1;
3088 }
3089 }
3090}
3091
3092static void bnx2x_stats_init(struct bnx2x *bp)
3093{
3094 int port = BP_PORT(bp);
de832a55 3095 int i;
bb2a0f7a 3096
de832a55 3097 bp->stats_pending = 0;
bb2a0f7a
YG
3098 bp->executer_idx = 0;
3099 bp->stats_counter = 0;
3100
3101 /* port stats */
3102 if (!BP_NOMCP(bp))
3103 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3104 else
3105 bp->port.port_stx = 0;
3106 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3107
3108 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3109 bp->port.old_nig_stats.brb_discard =
3110 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3111 bp->port.old_nig_stats.brb_truncate =
3112 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3113 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3114 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3115 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3116 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3117
3118 /* function stats */
de832a55
EG
3119 for_each_queue(bp, i) {
3120 struct bnx2x_fastpath *fp = &bp->fp[i];
3121
3122 memset(&fp->old_tclient, 0,
3123 sizeof(struct tstorm_per_client_stats));
3124 memset(&fp->old_uclient, 0,
3125 sizeof(struct ustorm_per_client_stats));
3126 memset(&fp->old_xclient, 0,
3127 sizeof(struct xstorm_per_client_stats));
3128 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3129 }
3130
bb2a0f7a 3131 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3132 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3133
3134 bp->stats_state = STATS_STATE_DISABLED;
3135 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3136 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3137}
3138
3139static void bnx2x_hw_stats_post(struct bnx2x *bp)
3140{
3141 struct dmae_command *dmae = &bp->stats_dmae;
3142 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3143
3144 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3145 if (CHIP_REV_IS_SLOW(bp))
3146 return;
bb2a0f7a
YG
3147
3148 /* loader */
3149 if (bp->executer_idx) {
3150 int loader_idx = PMF_DMAE_C(bp);
3151
3152 memset(dmae, 0, sizeof(struct dmae_command));
3153
3154 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3155 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3156 DMAE_CMD_DST_RESET |
3157#ifdef __BIG_ENDIAN
3158 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3159#else
3160 DMAE_CMD_ENDIANITY_DW_SWAP |
3161#endif
3162 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3163 DMAE_CMD_PORT_0) |
3164 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3165 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3166 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3167 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3168 sizeof(struct dmae_command) *
3169 (loader_idx + 1)) >> 2;
3170 dmae->dst_addr_hi = 0;
3171 dmae->len = sizeof(struct dmae_command) >> 2;
3172 if (CHIP_IS_E1(bp))
3173 dmae->len--;
3174 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3175 dmae->comp_addr_hi = 0;
3176 dmae->comp_val = 1;
3177
3178 *stats_comp = 0;
3179 bnx2x_post_dmae(bp, dmae, loader_idx);
3180
3181 } else if (bp->func_stx) {
3182 *stats_comp = 0;
3183 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3184 }
3185}
3186
3187static int bnx2x_stats_comp(struct bnx2x *bp)
3188{
3189 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3190 int cnt = 10;
3191
3192 might_sleep();
3193 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3194 if (!cnt) {
3195 BNX2X_ERR("timeout waiting for stats finished\n");
3196 break;
3197 }
3198 cnt--;
12469401 3199 msleep(1);
bb2a0f7a
YG
3200 }
3201 return 1;
3202}
3203
3204/*
3205 * Statistics service functions
3206 */
3207
3208static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3209{
3210 struct dmae_command *dmae;
3211 u32 opcode;
3212 int loader_idx = PMF_DMAE_C(bp);
3213 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3214
3215 /* sanity */
3216 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3217 BNX2X_ERR("BUG!\n");
3218 return;
3219 }
3220
3221 bp->executer_idx = 0;
3222
3223 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3224 DMAE_CMD_C_ENABLE |
3225 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3226#ifdef __BIG_ENDIAN
3227 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3228#else
3229 DMAE_CMD_ENDIANITY_DW_SWAP |
3230#endif
3231 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3232 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3233
3234 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3235 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3236 dmae->src_addr_lo = bp->port.port_stx >> 2;
3237 dmae->src_addr_hi = 0;
3238 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3239 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3240 dmae->len = DMAE_LEN32_RD_MAX;
3241 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3242 dmae->comp_addr_hi = 0;
3243 dmae->comp_val = 1;
3244
3245 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3246 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3247 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3248 dmae->src_addr_hi = 0;
7a9b2557
VZ
3249 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3250 DMAE_LEN32_RD_MAX * 4);
3251 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3252 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3253 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3254 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3255 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3256 dmae->comp_val = DMAE_COMP_VAL;
3257
3258 *stats_comp = 0;
3259 bnx2x_hw_stats_post(bp);
3260 bnx2x_stats_comp(bp);
3261}
3262
3263static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3264{
3265 struct dmae_command *dmae;
34f80b04 3266 int port = BP_PORT(bp);
bb2a0f7a 3267 int vn = BP_E1HVN(bp);
a2fbb9ea 3268 u32 opcode;
bb2a0f7a 3269 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3270 u32 mac_addr;
bb2a0f7a
YG
3271 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3272
3273 /* sanity */
3274 if (!bp->link_vars.link_up || !bp->port.pmf) {
3275 BNX2X_ERR("BUG!\n");
3276 return;
3277 }
a2fbb9ea
ET
3278
3279 bp->executer_idx = 0;
bb2a0f7a
YG
3280
3281 /* MCP */
3282 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3283 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3284 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3285#ifdef __BIG_ENDIAN
bb2a0f7a 3286 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3287#else
bb2a0f7a 3288 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3289#endif
bb2a0f7a
YG
3290 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3291 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3292
bb2a0f7a 3293 if (bp->port.port_stx) {
a2fbb9ea
ET
3294
3295 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296 dmae->opcode = opcode;
bb2a0f7a
YG
3297 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3298 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3299 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3300 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3301 dmae->len = sizeof(struct host_port_stats) >> 2;
3302 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3303 dmae->comp_addr_hi = 0;
3304 dmae->comp_val = 1;
a2fbb9ea
ET
3305 }
3306
bb2a0f7a
YG
3307 if (bp->func_stx) {
3308
3309 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3310 dmae->opcode = opcode;
3311 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3312 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3313 dmae->dst_addr_lo = bp->func_stx >> 2;
3314 dmae->dst_addr_hi = 0;
3315 dmae->len = sizeof(struct host_func_stats) >> 2;
3316 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3317 dmae->comp_addr_hi = 0;
3318 dmae->comp_val = 1;
a2fbb9ea
ET
3319 }
3320
bb2a0f7a 3321 /* MAC */
a2fbb9ea
ET
3322 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3323 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3324 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3325#ifdef __BIG_ENDIAN
3326 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3327#else
3328 DMAE_CMD_ENDIANITY_DW_SWAP |
3329#endif
bb2a0f7a
YG
3330 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3331 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3332
c18487ee 3333 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3334
3335 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3336 NIG_REG_INGRESS_BMAC0_MEM);
3337
3338 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3339 BIGMAC_REGISTER_TX_STAT_GTBYT */
3340 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3341 dmae->opcode = opcode;
3342 dmae->src_addr_lo = (mac_addr +
3343 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3344 dmae->src_addr_hi = 0;
3345 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3346 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3347 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3348 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3349 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3350 dmae->comp_addr_hi = 0;
3351 dmae->comp_val = 1;
3352
3353 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3354 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3355 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3356 dmae->opcode = opcode;
3357 dmae->src_addr_lo = (mac_addr +
3358 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3359 dmae->src_addr_hi = 0;
3360 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3361 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3362 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3363 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3364 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3365 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3366 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3367 dmae->comp_addr_hi = 0;
3368 dmae->comp_val = 1;
3369
c18487ee 3370 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3371
3372 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3373
3374 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3375 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3376 dmae->opcode = opcode;
3377 dmae->src_addr_lo = (mac_addr +
3378 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3379 dmae->src_addr_hi = 0;
3380 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3381 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3382 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3383 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3384 dmae->comp_addr_hi = 0;
3385 dmae->comp_val = 1;
3386
3387 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3388 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3389 dmae->opcode = opcode;
3390 dmae->src_addr_lo = (mac_addr +
3391 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3392 dmae->src_addr_hi = 0;
3393 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3394 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3395 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3396 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3397 dmae->len = 1;
3398 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3399 dmae->comp_addr_hi = 0;
3400 dmae->comp_val = 1;
3401
3402 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3403 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3404 dmae->opcode = opcode;
3405 dmae->src_addr_lo = (mac_addr +
3406 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3407 dmae->src_addr_hi = 0;
3408 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3409 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3410 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3411 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3412 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3413 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3414 dmae->comp_addr_hi = 0;
3415 dmae->comp_val = 1;
3416 }
3417
3418 /* NIG */
bb2a0f7a
YG
3419 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3420 dmae->opcode = opcode;
3421 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3422 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3423 dmae->src_addr_hi = 0;
3424 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3425 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3426 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3427 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3428 dmae->comp_addr_hi = 0;
3429 dmae->comp_val = 1;
3430
3431 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3432 dmae->opcode = opcode;
3433 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3434 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3435 dmae->src_addr_hi = 0;
3436 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3437 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3438 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3439 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3440 dmae->len = (2*sizeof(u32)) >> 2;
3441 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3442 dmae->comp_addr_hi = 0;
3443 dmae->comp_val = 1;
3444
a2fbb9ea
ET
3445 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3446 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3447 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3448 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3449#ifdef __BIG_ENDIAN
3450 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3451#else
3452 DMAE_CMD_ENDIANITY_DW_SWAP |
3453#endif
bb2a0f7a
YG
3454 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3455 (vn << DMAE_CMD_E1HVN_SHIFT));
3456 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3457 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3458 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3459 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3460 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3461 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3462 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3463 dmae->len = (2*sizeof(u32)) >> 2;
3464 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3465 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3466 dmae->comp_val = DMAE_COMP_VAL;
3467
3468 *stats_comp = 0;
a2fbb9ea
ET
3469}
3470
bb2a0f7a 3471static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3472{
bb2a0f7a
YG
3473 struct dmae_command *dmae = &bp->stats_dmae;
3474 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3475
bb2a0f7a
YG
3476 /* sanity */
3477 if (!bp->func_stx) {
3478 BNX2X_ERR("BUG!\n");
3479 return;
3480 }
a2fbb9ea 3481
bb2a0f7a
YG
3482 bp->executer_idx = 0;
3483 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3484
bb2a0f7a
YG
3485 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3486 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3487 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3488#ifdef __BIG_ENDIAN
3489 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3490#else
3491 DMAE_CMD_ENDIANITY_DW_SWAP |
3492#endif
3493 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3494 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3495 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3496 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3497 dmae->dst_addr_lo = bp->func_stx >> 2;
3498 dmae->dst_addr_hi = 0;
3499 dmae->len = sizeof(struct host_func_stats) >> 2;
3500 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3501 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3502 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3503
bb2a0f7a
YG
3504 *stats_comp = 0;
3505}
a2fbb9ea 3506
bb2a0f7a
YG
3507static void bnx2x_stats_start(struct bnx2x *bp)
3508{
3509 if (bp->port.pmf)
3510 bnx2x_port_stats_init(bp);
3511
3512 else if (bp->func_stx)
3513 bnx2x_func_stats_init(bp);
3514
3515 bnx2x_hw_stats_post(bp);
3516 bnx2x_storm_stats_post(bp);
3517}
3518
3519static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3520{
3521 bnx2x_stats_comp(bp);
3522 bnx2x_stats_pmf_update(bp);
3523 bnx2x_stats_start(bp);
3524}
3525
3526static void bnx2x_stats_restart(struct bnx2x *bp)
3527{
3528 bnx2x_stats_comp(bp);
3529 bnx2x_stats_start(bp);
3530}
3531
3532static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3533{
3534 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3535 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3536 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3537 struct regpair diff;
3538
3539 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3540 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3541 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3542 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3543 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3544 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3545 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3546 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3547 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3548 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3549 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3550 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3551 UPDATE_STAT64(tx_stat_gt127,
3552 tx_stat_etherstatspkts65octetsto127octets);
3553 UPDATE_STAT64(tx_stat_gt255,
3554 tx_stat_etherstatspkts128octetsto255octets);
3555 UPDATE_STAT64(tx_stat_gt511,
3556 tx_stat_etherstatspkts256octetsto511octets);
3557 UPDATE_STAT64(tx_stat_gt1023,
3558 tx_stat_etherstatspkts512octetsto1023octets);
3559 UPDATE_STAT64(tx_stat_gt1518,
3560 tx_stat_etherstatspkts1024octetsto1522octets);
3561 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3562 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3563 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3564 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3565 UPDATE_STAT64(tx_stat_gterr,
3566 tx_stat_dot3statsinternalmactransmiterrors);
3567 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3568
3569 estats->pause_frames_received_hi =
3570 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3571 estats->pause_frames_received_lo =
3572 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3573
3574 estats->pause_frames_sent_hi =
3575 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3576 estats->pause_frames_sent_lo =
3577 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3578}
3579
3580static void bnx2x_emac_stats_update(struct bnx2x *bp)
3581{
3582 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3583 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3584 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3585
3586 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3587 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3588 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3589 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3590 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3591 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3592 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3593 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3594 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3595 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3596 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3597 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3598 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3599 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3600 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3601 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3602 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3603 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3604 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3605 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3606 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3607 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3608 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3609 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3610 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3611 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3612 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3613 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3614 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3615 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3616 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3617
3618 estats->pause_frames_received_hi =
3619 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3620 estats->pause_frames_received_lo =
3621 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3622 ADD_64(estats->pause_frames_received_hi,
3623 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3624 estats->pause_frames_received_lo,
3625 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3626
3627 estats->pause_frames_sent_hi =
3628 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3629 estats->pause_frames_sent_lo =
3630 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3631 ADD_64(estats->pause_frames_sent_hi,
3632 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3633 estats->pause_frames_sent_lo,
3634 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3635}
3636
3637static int bnx2x_hw_stats_update(struct bnx2x *bp)
3638{
3639 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3640 struct nig_stats *old = &(bp->port.old_nig_stats);
3641 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3642 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3643 struct regpair diff;
de832a55 3644 u32 nig_timer_max;
bb2a0f7a
YG
3645
3646 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3647 bnx2x_bmac_stats_update(bp);
3648
3649 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3650 bnx2x_emac_stats_update(bp);
3651
3652 else { /* unreached */
3653 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3654 return -1;
3655 }
a2fbb9ea 3656
bb2a0f7a
YG
3657 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3658 new->brb_discard - old->brb_discard);
66e855f3
YG
3659 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3660 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3661
bb2a0f7a
YG
3662 UPDATE_STAT64_NIG(egress_mac_pkt0,
3663 etherstatspkts1024octetsto1522octets);
3664 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3665
bb2a0f7a 3666 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3667
bb2a0f7a
YG
3668 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3669 sizeof(struct mac_stx));
3670 estats->brb_drop_hi = pstats->brb_drop_hi;
3671 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3672
bb2a0f7a 3673 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3674
de832a55
EG
3675 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3676 if (nig_timer_max != estats->nig_timer_max) {
3677 estats->nig_timer_max = nig_timer_max;
3678 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3679 }
3680
bb2a0f7a 3681 return 0;
a2fbb9ea
ET
3682}
3683
bb2a0f7a 3684static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3685{
3686 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3687 struct tstorm_per_port_stats *tport =
de832a55 3688 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3689 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3690 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3691 int i;
3692
3693 memset(&(fstats->total_bytes_received_hi), 0,
3694 sizeof(struct host_func_stats) - 2*sizeof(u32));
3695 estats->error_bytes_received_hi = 0;
3696 estats->error_bytes_received_lo = 0;
3697 estats->etherstatsoverrsizepkts_hi = 0;
3698 estats->etherstatsoverrsizepkts_lo = 0;
3699 estats->no_buff_discard_hi = 0;
3700 estats->no_buff_discard_lo = 0;
a2fbb9ea 3701
de832a55
EG
3702 for_each_queue(bp, i) {
3703 struct bnx2x_fastpath *fp = &bp->fp[i];
3704 int cl_id = fp->cl_id;
3705 struct tstorm_per_client_stats *tclient =
3706 &stats->tstorm_common.client_statistics[cl_id];
3707 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3708 struct ustorm_per_client_stats *uclient =
3709 &stats->ustorm_common.client_statistics[cl_id];
3710 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3711 struct xstorm_per_client_stats *xclient =
3712 &stats->xstorm_common.client_statistics[cl_id];
3713 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3714 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3715 u32 diff;
3716
3717 /* are storm stats valid? */
3718 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3719 bp->stats_counter) {
de832a55
EG
3720 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3721 " xstorm counter (%d) != stats_counter (%d)\n",
3722 i, xclient->stats_counter, bp->stats_counter);
3723 return -1;
3724 }
3725 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3726 bp->stats_counter) {
de832a55
EG
3727 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3728 " tstorm counter (%d) != stats_counter (%d)\n",
3729 i, tclient->stats_counter, bp->stats_counter);
3730 return -2;
3731 }
3732 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3733 bp->stats_counter) {
3734 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3735 " ustorm counter (%d) != stats_counter (%d)\n",
3736 i, uclient->stats_counter, bp->stats_counter);
3737 return -4;
3738 }
a2fbb9ea 3739
de832a55
EG
3740 qstats->total_bytes_received_hi =
3741 qstats->valid_bytes_received_hi =
a2fbb9ea 3742 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3743 qstats->total_bytes_received_lo =
3744 qstats->valid_bytes_received_lo =
a2fbb9ea 3745 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3746
de832a55 3747 qstats->error_bytes_received_hi =
bb2a0f7a 3748 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3749 qstats->error_bytes_received_lo =
bb2a0f7a 3750 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3751
de832a55
EG
3752 ADD_64(qstats->total_bytes_received_hi,
3753 qstats->error_bytes_received_hi,
3754 qstats->total_bytes_received_lo,
3755 qstats->error_bytes_received_lo);
3756
3757 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3758 total_unicast_packets_received);
3759 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3760 total_multicast_packets_received);
3761 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3762 total_broadcast_packets_received);
3763 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3764 etherstatsoverrsizepkts);
3765 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3766
3767 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3768 total_unicast_packets_received);
3769 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3770 total_multicast_packets_received);
3771 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3772 total_broadcast_packets_received);
3773 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3774 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3775 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3776
3777 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3778 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3779 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3780 le32_to_cpu(xclient->total_sent_bytes.lo);
3781
de832a55
EG
3782 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3783 total_unicast_packets_transmitted);
3784 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3785 total_multicast_packets_transmitted);
3786 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3787 total_broadcast_packets_transmitted);
3788
3789 old_tclient->checksum_discard = tclient->checksum_discard;
3790 old_tclient->ttl0_discard = tclient->ttl0_discard;
3791
3792 ADD_64(fstats->total_bytes_received_hi,
3793 qstats->total_bytes_received_hi,
3794 fstats->total_bytes_received_lo,
3795 qstats->total_bytes_received_lo);
3796 ADD_64(fstats->total_bytes_transmitted_hi,
3797 qstats->total_bytes_transmitted_hi,
3798 fstats->total_bytes_transmitted_lo,
3799 qstats->total_bytes_transmitted_lo);
3800 ADD_64(fstats->total_unicast_packets_received_hi,
3801 qstats->total_unicast_packets_received_hi,
3802 fstats->total_unicast_packets_received_lo,
3803 qstats->total_unicast_packets_received_lo);
3804 ADD_64(fstats->total_multicast_packets_received_hi,
3805 qstats->total_multicast_packets_received_hi,
3806 fstats->total_multicast_packets_received_lo,
3807 qstats->total_multicast_packets_received_lo);
3808 ADD_64(fstats->total_broadcast_packets_received_hi,
3809 qstats->total_broadcast_packets_received_hi,
3810 fstats->total_broadcast_packets_received_lo,
3811 qstats->total_broadcast_packets_received_lo);
3812 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3813 qstats->total_unicast_packets_transmitted_hi,
3814 fstats->total_unicast_packets_transmitted_lo,
3815 qstats->total_unicast_packets_transmitted_lo);
3816 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3817 qstats->total_multicast_packets_transmitted_hi,
3818 fstats->total_multicast_packets_transmitted_lo,
3819 qstats->total_multicast_packets_transmitted_lo);
3820 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3821 qstats->total_broadcast_packets_transmitted_hi,
3822 fstats->total_broadcast_packets_transmitted_lo,
3823 qstats->total_broadcast_packets_transmitted_lo);
3824 ADD_64(fstats->valid_bytes_received_hi,
3825 qstats->valid_bytes_received_hi,
3826 fstats->valid_bytes_received_lo,
3827 qstats->valid_bytes_received_lo);
3828
3829 ADD_64(estats->error_bytes_received_hi,
3830 qstats->error_bytes_received_hi,
3831 estats->error_bytes_received_lo,
3832 qstats->error_bytes_received_lo);
3833 ADD_64(estats->etherstatsoverrsizepkts_hi,
3834 qstats->etherstatsoverrsizepkts_hi,
3835 estats->etherstatsoverrsizepkts_lo,
3836 qstats->etherstatsoverrsizepkts_lo);
3837 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3838 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3839 }
3840
3841 ADD_64(fstats->total_bytes_received_hi,
3842 estats->rx_stat_ifhcinbadoctets_hi,
3843 fstats->total_bytes_received_lo,
3844 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3845
3846 memcpy(estats, &(fstats->total_bytes_received_hi),
3847 sizeof(struct host_func_stats) - 2*sizeof(u32));
3848
de832a55
EG
3849 ADD_64(estats->etherstatsoverrsizepkts_hi,
3850 estats->rx_stat_dot3statsframestoolong_hi,
3851 estats->etherstatsoverrsizepkts_lo,
3852 estats->rx_stat_dot3statsframestoolong_lo);
3853 ADD_64(estats->error_bytes_received_hi,
3854 estats->rx_stat_ifhcinbadoctets_hi,
3855 estats->error_bytes_received_lo,
3856 estats->rx_stat_ifhcinbadoctets_lo);
3857
3858 if (bp->port.pmf) {
3859 estats->mac_filter_discard =
3860 le32_to_cpu(tport->mac_filter_discard);
3861 estats->xxoverflow_discard =
3862 le32_to_cpu(tport->xxoverflow_discard);
3863 estats->brb_truncate_discard =
bb2a0f7a 3864 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3865 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3866 }
bb2a0f7a
YG
3867
3868 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3869
de832a55
EG
3870 bp->stats_pending = 0;
3871
a2fbb9ea
ET
3872 return 0;
3873}
3874
bb2a0f7a 3875static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3876{
bb2a0f7a 3877 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3878 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3879 int i;
a2fbb9ea
ET
3880
3881 nstats->rx_packets =
3882 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3883 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3884 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3885
3886 nstats->tx_packets =
3887 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3888 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3889 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3890
de832a55 3891 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3892
0e39e645 3893 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3894
de832a55
EG
3895 nstats->rx_dropped = estats->mac_discard;
3896 for_each_queue(bp, i)
3897 nstats->rx_dropped +=
3898 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3899
a2fbb9ea
ET
3900 nstats->tx_dropped = 0;
3901
3902 nstats->multicast =
de832a55 3903 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3904
bb2a0f7a 3905 nstats->collisions =
de832a55 3906 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3907
3908 nstats->rx_length_errors =
de832a55
EG
3909 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3910 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3911 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3912 bnx2x_hilo(&estats->brb_truncate_hi);
3913 nstats->rx_crc_errors =
3914 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3915 nstats->rx_frame_errors =
3916 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3917 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3918 nstats->rx_missed_errors = estats->xxoverflow_discard;
3919
3920 nstats->rx_errors = nstats->rx_length_errors +
3921 nstats->rx_over_errors +
3922 nstats->rx_crc_errors +
3923 nstats->rx_frame_errors +
0e39e645
ET
3924 nstats->rx_fifo_errors +
3925 nstats->rx_missed_errors;
a2fbb9ea 3926
bb2a0f7a 3927 nstats->tx_aborted_errors =
de832a55
EG
3928 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3929 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3930 nstats->tx_carrier_errors =
3931 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3932 nstats->tx_fifo_errors = 0;
3933 nstats->tx_heartbeat_errors = 0;
3934 nstats->tx_window_errors = 0;
3935
3936 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3937 nstats->tx_carrier_errors +
3938 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3939}
3940
3941static void bnx2x_drv_stats_update(struct bnx2x *bp)
3942{
3943 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3944 int i;
3945
3946 estats->driver_xoff = 0;
3947 estats->rx_err_discard_pkt = 0;
3948 estats->rx_skb_alloc_failed = 0;
3949 estats->hw_csum_err = 0;
3950 for_each_queue(bp, i) {
3951 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3952
3953 estats->driver_xoff += qstats->driver_xoff;
3954 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3955 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3956 estats->hw_csum_err += qstats->hw_csum_err;
3957 }
a2fbb9ea
ET
3958}
3959
bb2a0f7a 3960static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3961{
bb2a0f7a 3962 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3963
bb2a0f7a
YG
3964 if (*stats_comp != DMAE_COMP_VAL)
3965 return;
3966
3967 if (bp->port.pmf)
de832a55 3968 bnx2x_hw_stats_update(bp);
a2fbb9ea 3969
de832a55
EG
3970 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3971 BNX2X_ERR("storm stats were not updated for 3 times\n");
3972 bnx2x_panic();
3973 return;
a2fbb9ea
ET
3974 }
3975
de832a55
EG
3976 bnx2x_net_stats_update(bp);
3977 bnx2x_drv_stats_update(bp);
3978
a2fbb9ea 3979 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3980 struct tstorm_per_client_stats *old_tclient =
3981 &bp->fp->old_tclient;
3982 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3983 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3984 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3985 int i;
a2fbb9ea
ET
3986
3987 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3988 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3989 " tx pkt (%lx)\n",
3990 bnx2x_tx_avail(bp->fp),
7a9b2557 3991 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3992 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3993 " rx pkt (%lx)\n",
7a9b2557
VZ
3994 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3995 bp->fp->rx_comp_cons),
3996 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
3997 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3998 "brb truncate %u\n",
3999 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4000 qstats->driver_xoff,
4001 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4002 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4003 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4004 "mac_discard %u mac_filter_discard %u "
4005 "xxovrflow_discard %u brb_truncate_discard %u "
4006 "ttl0_discard %u\n",
bb2a0f7a 4007 old_tclient->checksum_discard,
de832a55
EG
4008 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4009 bnx2x_hilo(&qstats->no_buff_discard_hi),
4010 estats->mac_discard, estats->mac_filter_discard,
4011 estats->xxoverflow_discard, estats->brb_truncate_discard,
bb2a0f7a 4012 old_tclient->ttl0_discard);
a2fbb9ea
ET
4013
4014 for_each_queue(bp, i) {
4015 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4016 bnx2x_fp(bp, i, tx_pkt),
4017 bnx2x_fp(bp, i, rx_pkt),
4018 bnx2x_fp(bp, i, rx_calls));
4019 }
4020 }
4021
bb2a0f7a
YG
4022 bnx2x_hw_stats_post(bp);
4023 bnx2x_storm_stats_post(bp);
4024}
a2fbb9ea 4025
bb2a0f7a
YG
4026static void bnx2x_port_stats_stop(struct bnx2x *bp)
4027{
4028 struct dmae_command *dmae;
4029 u32 opcode;
4030 int loader_idx = PMF_DMAE_C(bp);
4031 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4032
bb2a0f7a 4033 bp->executer_idx = 0;
a2fbb9ea 4034
bb2a0f7a
YG
4035 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4036 DMAE_CMD_C_ENABLE |
4037 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4038#ifdef __BIG_ENDIAN
bb2a0f7a 4039 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4040#else
bb2a0f7a 4041 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4042#endif
bb2a0f7a
YG
4043 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4044 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4045
4046 if (bp->port.port_stx) {
4047
4048 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4049 if (bp->func_stx)
4050 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4051 else
4052 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4053 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4054 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4055 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4056 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4057 dmae->len = sizeof(struct host_port_stats) >> 2;
4058 if (bp->func_stx) {
4059 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4060 dmae->comp_addr_hi = 0;
4061 dmae->comp_val = 1;
4062 } else {
4063 dmae->comp_addr_lo =
4064 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4065 dmae->comp_addr_hi =
4066 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4067 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4068
bb2a0f7a
YG
4069 *stats_comp = 0;
4070 }
a2fbb9ea
ET
4071 }
4072
bb2a0f7a
YG
4073 if (bp->func_stx) {
4074
4075 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4076 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4077 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4078 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4079 dmae->dst_addr_lo = bp->func_stx >> 2;
4080 dmae->dst_addr_hi = 0;
4081 dmae->len = sizeof(struct host_func_stats) >> 2;
4082 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4083 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4084 dmae->comp_val = DMAE_COMP_VAL;
4085
4086 *stats_comp = 0;
a2fbb9ea 4087 }
bb2a0f7a
YG
4088}
4089
4090static void bnx2x_stats_stop(struct bnx2x *bp)
4091{
4092 int update = 0;
4093
4094 bnx2x_stats_comp(bp);
4095
4096 if (bp->port.pmf)
4097 update = (bnx2x_hw_stats_update(bp) == 0);
4098
4099 update |= (bnx2x_storm_stats_update(bp) == 0);
4100
4101 if (update) {
4102 bnx2x_net_stats_update(bp);
a2fbb9ea 4103
bb2a0f7a
YG
4104 if (bp->port.pmf)
4105 bnx2x_port_stats_stop(bp);
4106
4107 bnx2x_hw_stats_post(bp);
4108 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4109 }
4110}
4111
bb2a0f7a
YG
4112static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4113{
4114}
4115
4116static const struct {
4117 void (*action)(struct bnx2x *bp);
4118 enum bnx2x_stats_state next_state;
4119} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4120/* state event */
4121{
4122/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4123/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4124/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4125/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4126},
4127{
4128/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4129/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4130/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4131/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4132}
4133};
4134
4135static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4136{
4137 enum bnx2x_stats_state state = bp->stats_state;
4138
4139 bnx2x_stats_stm[state][event].action(bp);
4140 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4141
4142 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4143 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4144 state, event, bp->stats_state);
4145}
4146
a2fbb9ea
ET
4147static void bnx2x_timer(unsigned long data)
4148{
4149 struct bnx2x *bp = (struct bnx2x *) data;
4150
4151 if (!netif_running(bp->dev))
4152 return;
4153
4154 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4155 goto timer_restart;
a2fbb9ea
ET
4156
4157 if (poll) {
4158 struct bnx2x_fastpath *fp = &bp->fp[0];
4159 int rc;
4160
4161 bnx2x_tx_int(fp, 1000);
4162 rc = bnx2x_rx_int(fp, 1000);
4163 }
4164
34f80b04
EG
4165 if (!BP_NOMCP(bp)) {
4166 int func = BP_FUNC(bp);
a2fbb9ea
ET
4167 u32 drv_pulse;
4168 u32 mcp_pulse;
4169
4170 ++bp->fw_drv_pulse_wr_seq;
4171 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4172 /* TBD - add SYSTEM_TIME */
4173 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4174 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4175
34f80b04 4176 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4177 MCP_PULSE_SEQ_MASK);
4178 /* The delta between driver pulse and mcp response
4179 * should be 1 (before mcp response) or 0 (after mcp response)
4180 */
4181 if ((drv_pulse != mcp_pulse) &&
4182 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4183 /* someone lost a heartbeat... */
4184 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4185 drv_pulse, mcp_pulse);
4186 }
4187 }
4188
bb2a0f7a
YG
4189 if ((bp->state == BNX2X_STATE_OPEN) ||
4190 (bp->state == BNX2X_STATE_DISABLED))
4191 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4192
f1410647 4193timer_restart:
a2fbb9ea
ET
4194 mod_timer(&bp->timer, jiffies + bp->current_interval);
4195}
4196
4197/* end of Statistics */
4198
4199/* nic init */
4200
4201/*
4202 * nic init service functions
4203 */
4204
34f80b04 4205static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4206{
34f80b04
EG
4207 int port = BP_PORT(bp);
4208
4209 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4210 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4211 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4212 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4213 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4214 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4215}
4216
5c862848
EG
4217static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4218 dma_addr_t mapping, int sb_id)
34f80b04
EG
4219{
4220 int port = BP_PORT(bp);
bb2a0f7a 4221 int func = BP_FUNC(bp);
a2fbb9ea 4222 int index;
34f80b04 4223 u64 section;
a2fbb9ea
ET
4224
4225 /* USTORM */
4226 section = ((u64)mapping) + offsetof(struct host_status_block,
4227 u_status_block);
34f80b04 4228 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4229
4230 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4231 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4232 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4233 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4234 U64_HI(section));
bb2a0f7a
YG
4235 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4236 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4237
4238 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4239 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4240 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4241
4242 /* CSTORM */
4243 section = ((u64)mapping) + offsetof(struct host_status_block,
4244 c_status_block);
34f80b04 4245 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4246
4247 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4248 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4249 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4250 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4251 U64_HI(section));
7a9b2557
VZ
4252 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4253 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4254
4255 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4256 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4257 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4258
4259 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4260}
4261
4262static void bnx2x_zero_def_sb(struct bnx2x *bp)
4263{
4264 int func = BP_FUNC(bp);
a2fbb9ea 4265
34f80b04
EG
4266 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4267 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4268 sizeof(struct ustorm_def_status_block)/4);
4269 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4270 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4271 sizeof(struct cstorm_def_status_block)/4);
4272 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4273 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4274 sizeof(struct xstorm_def_status_block)/4);
4275 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4276 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4277 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4278}
4279
4280static void bnx2x_init_def_sb(struct bnx2x *bp,
4281 struct host_def_status_block *def_sb,
34f80b04 4282 dma_addr_t mapping, int sb_id)
a2fbb9ea 4283{
34f80b04
EG
4284 int port = BP_PORT(bp);
4285 int func = BP_FUNC(bp);
a2fbb9ea
ET
4286 int index, val, reg_offset;
4287 u64 section;
4288
4289 /* ATTN */
4290 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4291 atten_status_block);
34f80b04 4292 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4293
49d66772
ET
4294 bp->attn_state = 0;
4295
a2fbb9ea
ET
4296 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4297 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4298
34f80b04 4299 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4300 bp->attn_group[index].sig[0] = REG_RD(bp,
4301 reg_offset + 0x10*index);
4302 bp->attn_group[index].sig[1] = REG_RD(bp,
4303 reg_offset + 0x4 + 0x10*index);
4304 bp->attn_group[index].sig[2] = REG_RD(bp,
4305 reg_offset + 0x8 + 0x10*index);
4306 bp->attn_group[index].sig[3] = REG_RD(bp,
4307 reg_offset + 0xc + 0x10*index);
4308 }
4309
a2fbb9ea
ET
4310 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4311 HC_REG_ATTN_MSG0_ADDR_L);
4312
4313 REG_WR(bp, reg_offset, U64_LO(section));
4314 REG_WR(bp, reg_offset + 4, U64_HI(section));
4315
4316 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4317
4318 val = REG_RD(bp, reg_offset);
34f80b04 4319 val |= sb_id;
a2fbb9ea
ET
4320 REG_WR(bp, reg_offset, val);
4321
4322 /* USTORM */
4323 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4324 u_def_status_block);
34f80b04 4325 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4326
4327 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4328 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4329 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4330 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4331 U64_HI(section));
5c862848 4332 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4333 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4334
4335 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4336 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4337 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4338
4339 /* CSTORM */
4340 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4341 c_def_status_block);
34f80b04 4342 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4343
4344 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4345 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4346 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4347 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4348 U64_HI(section));
5c862848 4349 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4350 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4351
4352 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4353 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4354 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4355
4356 /* TSTORM */
4357 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4358 t_def_status_block);
34f80b04 4359 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4360
4361 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4362 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4363 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4364 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4365 U64_HI(section));
5c862848 4366 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4367 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4368
4369 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4370 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4371 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4372
4373 /* XSTORM */
4374 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4375 x_def_status_block);
34f80b04 4376 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4377
4378 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4379 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4380 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4381 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4382 U64_HI(section));
5c862848 4383 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4384 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4385
4386 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4387 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4388 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4389
bb2a0f7a 4390 bp->stats_pending = 0;
66e855f3 4391 bp->set_mac_pending = 0;
bb2a0f7a 4392
34f80b04 4393 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4394}
4395
4396static void bnx2x_update_coalesce(struct bnx2x *bp)
4397{
34f80b04 4398 int port = BP_PORT(bp);
a2fbb9ea
ET
4399 int i;
4400
4401 for_each_queue(bp, i) {
34f80b04 4402 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4403
4404 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4405 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4406 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4407 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4408 bp->rx_ticks/12);
a2fbb9ea 4409 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4410 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4411 U_SB_ETH_RX_CQ_INDEX),
4412 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4413
4414 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4415 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4416 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4417 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4418 bp->tx_ticks/12);
a2fbb9ea 4419 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4420 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4421 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4422 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4423 }
4424}
4425
7a9b2557
VZ
4426static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4427 struct bnx2x_fastpath *fp, int last)
4428{
4429 int i;
4430
4431 for (i = 0; i < last; i++) {
4432 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4433 struct sk_buff *skb = rx_buf->skb;
4434
4435 if (skb == NULL) {
4436 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4437 continue;
4438 }
4439
4440 if (fp->tpa_state[i] == BNX2X_TPA_START)
4441 pci_unmap_single(bp->pdev,
4442 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4443 bp->rx_buf_size,
7a9b2557
VZ
4444 PCI_DMA_FROMDEVICE);
4445
4446 dev_kfree_skb(skb);
4447 rx_buf->skb = NULL;
4448 }
4449}
4450
a2fbb9ea
ET
4451static void bnx2x_init_rx_rings(struct bnx2x *bp)
4452{
7a9b2557 4453 int func = BP_FUNC(bp);
32626230
EG
4454 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4455 ETH_MAX_AGGREGATION_QUEUES_E1H;
4456 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4457 int i, j;
a2fbb9ea 4458
0f00846d
EG
4459 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4460 DP(NETIF_MSG_IFUP,
4461 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4462
7a9b2557 4463 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4464
555f6c78 4465 for_each_rx_queue(bp, j) {
32626230 4466 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4467
32626230 4468 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4469 fp->tpa_pool[i].skb =
4470 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4471 if (!fp->tpa_pool[i].skb) {
4472 BNX2X_ERR("Failed to allocate TPA "
4473 "skb pool for queue[%d] - "
4474 "disabling TPA on this "
4475 "queue!\n", j);
4476 bnx2x_free_tpa_pool(bp, fp, i);
4477 fp->disable_tpa = 1;
4478 break;
4479 }
4480 pci_unmap_addr_set((struct sw_rx_bd *)
4481 &bp->fp->tpa_pool[i],
4482 mapping, 0);
4483 fp->tpa_state[i] = BNX2X_TPA_STOP;
4484 }
4485 }
4486 }
4487
555f6c78 4488 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4489 struct bnx2x_fastpath *fp = &bp->fp[j];
4490
4491 fp->rx_bd_cons = 0;
4492 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4493 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4494
4495 /* "next page" elements initialization */
4496 /* SGE ring */
4497 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4498 struct eth_rx_sge *sge;
4499
4500 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4501 sge->addr_hi =
4502 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4503 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4504 sge->addr_lo =
4505 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4506 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4507 }
4508
4509 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4510
7a9b2557 4511 /* RX BD ring */
a2fbb9ea
ET
4512 for (i = 1; i <= NUM_RX_RINGS; i++) {
4513 struct eth_rx_bd *rx_bd;
4514
4515 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4516 rx_bd->addr_hi =
4517 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4518 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4519 rx_bd->addr_lo =
4520 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4521 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4522 }
4523
34f80b04 4524 /* CQ ring */
a2fbb9ea
ET
4525 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4526 struct eth_rx_cqe_next_page *nextpg;
4527
4528 nextpg = (struct eth_rx_cqe_next_page *)
4529 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4530 nextpg->addr_hi =
4531 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4532 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4533 nextpg->addr_lo =
4534 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4535 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4536 }
4537
7a9b2557
VZ
4538 /* Allocate SGEs and initialize the ring elements */
4539 for (i = 0, ring_prod = 0;
4540 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4541
7a9b2557
VZ
4542 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4543 BNX2X_ERR("was only able to allocate "
4544 "%d rx sges\n", i);
4545 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4546 /* Cleanup already allocated elements */
4547 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4548 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4549 fp->disable_tpa = 1;
4550 ring_prod = 0;
4551 break;
4552 }
4553 ring_prod = NEXT_SGE_IDX(ring_prod);
4554 }
4555 fp->rx_sge_prod = ring_prod;
4556
4557 /* Allocate BDs and initialize BD ring */
66e855f3 4558 fp->rx_comp_cons = 0;
7a9b2557 4559 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4560 for (i = 0; i < bp->rx_ring_size; i++) {
4561 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4562 BNX2X_ERR("was only able to allocate "
de832a55
EG
4563 "%d rx skbs on queue[%d]\n", i, j);
4564 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4565 break;
4566 }
4567 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4568 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4569 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4570 }
4571
7a9b2557
VZ
4572 fp->rx_bd_prod = ring_prod;
4573 /* must not have more available CQEs than BDs */
4574 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4575 cqe_ring_prod);
a2fbb9ea
ET
4576 fp->rx_pkt = fp->rx_calls = 0;
4577
7a9b2557
VZ
4578 /* Warning!
4579 * this will generate an interrupt (to the TSTORM)
4580 * must only be done after chip is initialized
4581 */
4582 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4583 fp->rx_sge_prod);
a2fbb9ea
ET
4584 if (j != 0)
4585 continue;
4586
4587 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4588 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4589 U64_LO(fp->rx_comp_mapping));
4590 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4591 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4592 U64_HI(fp->rx_comp_mapping));
4593 }
4594}
4595
4596static void bnx2x_init_tx_ring(struct bnx2x *bp)
4597{
4598 int i, j;
4599
555f6c78 4600 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4601 struct bnx2x_fastpath *fp = &bp->fp[j];
4602
4603 for (i = 1; i <= NUM_TX_RINGS; i++) {
4604 struct eth_tx_bd *tx_bd =
4605 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4606
4607 tx_bd->addr_hi =
4608 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4609 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4610 tx_bd->addr_lo =
4611 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4612 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4613 }
4614
4615 fp->tx_pkt_prod = 0;
4616 fp->tx_pkt_cons = 0;
4617 fp->tx_bd_prod = 0;
4618 fp->tx_bd_cons = 0;
4619 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4620 fp->tx_pkt = 0;
4621 }
4622}
4623
4624static void bnx2x_init_sp_ring(struct bnx2x *bp)
4625{
34f80b04 4626 int func = BP_FUNC(bp);
a2fbb9ea
ET
4627
4628 spin_lock_init(&bp->spq_lock);
4629
4630 bp->spq_left = MAX_SPQ_PENDING;
4631 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4632 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4633 bp->spq_prod_bd = bp->spq;
4634 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4635
34f80b04 4636 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4637 U64_LO(bp->spq_mapping));
34f80b04
EG
4638 REG_WR(bp,
4639 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4640 U64_HI(bp->spq_mapping));
4641
34f80b04 4642 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4643 bp->spq_prod_idx);
4644}
4645
4646static void bnx2x_init_context(struct bnx2x *bp)
4647{
4648 int i;
4649
4650 for_each_queue(bp, i) {
4651 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4652 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4653 u8 cl_id = fp->cl_id;
34f80b04 4654 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4655
34f80b04
EG
4656 context->ustorm_st_context.common.sb_index_numbers =
4657 BNX2X_RX_SB_INDEX_NUM;
4658 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4659 context->ustorm_st_context.common.status_block_id = sb_id;
4660 context->ustorm_st_context.common.flags =
de832a55
EG
4661 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4662 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4663 context->ustorm_st_context.common.statistics_counter_id =
4664 cl_id;
8d9c5f34 4665 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4666 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4667 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4668 bp->rx_buf_size;
34f80b04 4669 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4670 U64_HI(fp->rx_desc_mapping);
34f80b04 4671 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4672 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4673 if (!fp->disable_tpa) {
4674 context->ustorm_st_context.common.flags |=
4675 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4676 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4677 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4678 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4679 (u32)0xffff);
7a9b2557
VZ
4680 context->ustorm_st_context.common.sge_page_base_hi =
4681 U64_HI(fp->rx_sge_mapping);
4682 context->ustorm_st_context.common.sge_page_base_lo =
4683 U64_LO(fp->rx_sge_mapping);
4684 }
4685
8d9c5f34
EG
4686 context->ustorm_ag_context.cdu_usage =
4687 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4688 CDU_REGION_NUMBER_UCM_AG,
4689 ETH_CONNECTION_TYPE);
4690
4691 context->xstorm_st_context.tx_bd_page_base_hi =
4692 U64_HI(fp->tx_desc_mapping);
4693 context->xstorm_st_context.tx_bd_page_base_lo =
4694 U64_LO(fp->tx_desc_mapping);
4695 context->xstorm_st_context.db_data_addr_hi =
4696 U64_HI(fp->tx_prods_mapping);
4697 context->xstorm_st_context.db_data_addr_lo =
4698 U64_LO(fp->tx_prods_mapping);
4699 context->xstorm_st_context.statistics_data = (fp->cl_id |
4700 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4701 context->cstorm_st_context.sb_index_number =
5c862848 4702 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4703 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4704
4705 context->xstorm_ag_context.cdu_reserved =
4706 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4707 CDU_REGION_NUMBER_XCM_AG,
4708 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4709 }
4710}
4711
4712static void bnx2x_init_ind_table(struct bnx2x *bp)
4713{
26c8fa4d 4714 int func = BP_FUNC(bp);
a2fbb9ea
ET
4715 int i;
4716
555f6c78 4717 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4718 return;
4719
555f6c78
EG
4720 DP(NETIF_MSG_IFUP,
4721 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4722 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4723 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4724 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4725 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4726}
4727
49d66772
ET
4728static void bnx2x_set_client_config(struct bnx2x *bp)
4729{
49d66772 4730 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4731 int port = BP_PORT(bp);
4732 int i;
49d66772 4733
e7799c5f 4734 tstorm_client.mtu = bp->dev->mtu;
49d66772 4735 tstorm_client.config_flags =
de832a55
EG
4736 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4737 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4738#ifdef BCM_VLAN
0c6671b0 4739 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4740 tstorm_client.config_flags |=
8d9c5f34 4741 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4742 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4743 }
4744#endif
49d66772 4745
7a9b2557
VZ
4746 if (bp->flags & TPA_ENABLE_FLAG) {
4747 tstorm_client.max_sges_for_packet =
4f40f2cb 4748 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4749 tstorm_client.max_sges_for_packet =
4750 ((tstorm_client.max_sges_for_packet +
4751 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4752 PAGES_PER_SGE_SHIFT;
4753
4754 tstorm_client.config_flags |=
4755 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4756 }
4757
49d66772 4758 for_each_queue(bp, i) {
de832a55
EG
4759 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4760
49d66772 4761 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4762 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4763 ((u32 *)&tstorm_client)[0]);
4764 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4765 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4766 ((u32 *)&tstorm_client)[1]);
4767 }
4768
34f80b04
EG
4769 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4770 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4771}
4772
a2fbb9ea
ET
4773static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4774{
a2fbb9ea 4775 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4776 int mode = bp->rx_mode;
4777 int mask = (1 << BP_L_ID(bp));
4778 int func = BP_FUNC(bp);
a2fbb9ea
ET
4779 int i;
4780
3196a88a 4781 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4782
4783 switch (mode) {
4784 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4785 tstorm_mac_filter.ucast_drop_all = mask;
4786 tstorm_mac_filter.mcast_drop_all = mask;
4787 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4788 break;
4789 case BNX2X_RX_MODE_NORMAL:
34f80b04 4790 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4791 break;
4792 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4793 tstorm_mac_filter.mcast_accept_all = mask;
4794 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4795 break;
4796 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4797 tstorm_mac_filter.ucast_accept_all = mask;
4798 tstorm_mac_filter.mcast_accept_all = mask;
4799 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4800 break;
4801 default:
34f80b04
EG
4802 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4803 break;
a2fbb9ea
ET
4804 }
4805
4806 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4807 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4808 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4809 ((u32 *)&tstorm_mac_filter)[i]);
4810
34f80b04 4811/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4812 ((u32 *)&tstorm_mac_filter)[i]); */
4813 }
a2fbb9ea 4814
49d66772
ET
4815 if (mode != BNX2X_RX_MODE_NONE)
4816 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4817}
4818
471de716
EG
4819static void bnx2x_init_internal_common(struct bnx2x *bp)
4820{
4821 int i;
4822
3cdf1db7
YG
4823 if (bp->flags & TPA_ENABLE_FLAG) {
4824 struct tstorm_eth_tpa_exist tpa = {0};
4825
4826 tpa.tpa_exist = 1;
4827
4828 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4829 ((u32 *)&tpa)[0]);
4830 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4831 ((u32 *)&tpa)[1]);
4832 }
4833
471de716
EG
4834 /* Zero this manually as its initialization is
4835 currently missing in the initTool */
4836 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4837 REG_WR(bp, BAR_USTRORM_INTMEM +
4838 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4839}
4840
4841static void bnx2x_init_internal_port(struct bnx2x *bp)
4842{
4843 int port = BP_PORT(bp);
4844
4845 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4846 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4847 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4848 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4849}
4850
4851static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4852{
a2fbb9ea
ET
4853 struct tstorm_eth_function_common_config tstorm_config = {0};
4854 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4855 int port = BP_PORT(bp);
4856 int func = BP_FUNC(bp);
de832a55
EG
4857 int i, j;
4858 u32 offset;
471de716 4859 u16 max_agg_size;
a2fbb9ea
ET
4860
4861 if (is_multi(bp)) {
555f6c78 4862 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4863 tstorm_config.rss_result_mask = MULTI_MASK;
4864 }
8d9c5f34
EG
4865 if (IS_E1HMF(bp))
4866 tstorm_config.config_flags |=
4867 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4868
34f80b04
EG
4869 tstorm_config.leading_client_id = BP_L_ID(bp);
4870
a2fbb9ea 4871 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4872 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4873 (*(u32 *)&tstorm_config));
4874
c14423fe 4875 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4876 bnx2x_set_storm_rx_mode(bp);
4877
de832a55
EG
4878 for_each_queue(bp, i) {
4879 u8 cl_id = bp->fp[i].cl_id;
4880
4881 /* reset xstorm per client statistics */
4882 offset = BAR_XSTRORM_INTMEM +
4883 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4884 for (j = 0;
4885 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4886 REG_WR(bp, offset + j*4, 0);
4887
4888 /* reset tstorm per client statistics */
4889 offset = BAR_TSTRORM_INTMEM +
4890 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4891 for (j = 0;
4892 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4893 REG_WR(bp, offset + j*4, 0);
4894
4895 /* reset ustorm per client statistics */
4896 offset = BAR_USTRORM_INTMEM +
4897 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4898 for (j = 0;
4899 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4900 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4901 }
4902
4903 /* Init statistics related context */
34f80b04 4904 stats_flags.collect_eth = 1;
a2fbb9ea 4905
66e855f3 4906 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4907 ((u32 *)&stats_flags)[0]);
66e855f3 4908 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4909 ((u32 *)&stats_flags)[1]);
4910
66e855f3 4911 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4912 ((u32 *)&stats_flags)[0]);
66e855f3 4913 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4914 ((u32 *)&stats_flags)[1]);
4915
de832a55
EG
4916 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4917 ((u32 *)&stats_flags)[0]);
4918 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4919 ((u32 *)&stats_flags)[1]);
4920
66e855f3 4921 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4922 ((u32 *)&stats_flags)[0]);
66e855f3 4923 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4924 ((u32 *)&stats_flags)[1]);
4925
66e855f3
YG
4926 REG_WR(bp, BAR_XSTRORM_INTMEM +
4927 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4928 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4929 REG_WR(bp, BAR_XSTRORM_INTMEM +
4930 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4931 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4932
4933 REG_WR(bp, BAR_TSTRORM_INTMEM +
4934 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4935 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4936 REG_WR(bp, BAR_TSTRORM_INTMEM +
4937 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4938 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4939
de832a55
EG
4940 REG_WR(bp, BAR_USTRORM_INTMEM +
4941 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4942 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4943 REG_WR(bp, BAR_USTRORM_INTMEM +
4944 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4945 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4946
34f80b04
EG
4947 if (CHIP_IS_E1H(bp)) {
4948 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4949 IS_E1HMF(bp));
4950 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4951 IS_E1HMF(bp));
4952 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4953 IS_E1HMF(bp));
4954 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4955 IS_E1HMF(bp));
4956
7a9b2557
VZ
4957 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4958 bp->e1hov);
34f80b04
EG
4959 }
4960
4f40f2cb
EG
4961 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4962 max_agg_size =
4963 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4964 SGE_PAGE_SIZE * PAGES_PER_SGE),
4965 (u32)0xffff);
555f6c78 4966 for_each_rx_queue(bp, i) {
7a9b2557 4967 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4968
4969 REG_WR(bp, BAR_USTRORM_INTMEM +
4970 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4971 U64_LO(fp->rx_comp_mapping));
4972 REG_WR(bp, BAR_USTRORM_INTMEM +
4973 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4974 U64_HI(fp->rx_comp_mapping));
4975
7a9b2557
VZ
4976 REG_WR16(bp, BAR_USTRORM_INTMEM +
4977 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4978 max_agg_size);
4979 }
a2fbb9ea
ET
4980}
4981
471de716
EG
4982static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4983{
4984 switch (load_code) {
4985 case FW_MSG_CODE_DRV_LOAD_COMMON:
4986 bnx2x_init_internal_common(bp);
4987 /* no break */
4988
4989 case FW_MSG_CODE_DRV_LOAD_PORT:
4990 bnx2x_init_internal_port(bp);
4991 /* no break */
4992
4993 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4994 bnx2x_init_internal_func(bp);
4995 break;
4996
4997 default:
4998 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4999 break;
5000 }
5001}
5002
5003static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5004{
5005 int i;
5006
5007 for_each_queue(bp, i) {
5008 struct bnx2x_fastpath *fp = &bp->fp[i];
5009
34f80b04 5010 fp->bp = bp;
a2fbb9ea 5011 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5012 fp->index = i;
34f80b04
EG
5013 fp->cl_id = BP_L_ID(bp) + i;
5014 fp->sb_id = fp->cl_id;
5015 DP(NETIF_MSG_IFUP,
5016 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5017 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
5018 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5019 FP_SB_ID(fp));
5020 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5021 }
5022
5c862848
EG
5023 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5024 DEF_SB_ID);
5025 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5026 bnx2x_update_coalesce(bp);
5027 bnx2x_init_rx_rings(bp);
5028 bnx2x_init_tx_ring(bp);
5029 bnx2x_init_sp_ring(bp);
5030 bnx2x_init_context(bp);
471de716 5031 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5032 bnx2x_init_ind_table(bp);
0ef00459
EG
5033 bnx2x_stats_init(bp);
5034
5035 /* At this point, we are ready for interrupts */
5036 atomic_set(&bp->intr_sem, 0);
5037
5038 /* flush all before enabling interrupts */
5039 mb();
5040 mmiowb();
5041
615f8fd9 5042 bnx2x_int_enable(bp);
a2fbb9ea
ET
5043}
5044
5045/* end of nic init */
5046
5047/*
5048 * gzip service functions
5049 */
5050
5051static int bnx2x_gunzip_init(struct bnx2x *bp)
5052{
5053 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5054 &bp->gunzip_mapping);
5055 if (bp->gunzip_buf == NULL)
5056 goto gunzip_nomem1;
5057
5058 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5059 if (bp->strm == NULL)
5060 goto gunzip_nomem2;
5061
5062 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5063 GFP_KERNEL);
5064 if (bp->strm->workspace == NULL)
5065 goto gunzip_nomem3;
5066
5067 return 0;
5068
5069gunzip_nomem3:
5070 kfree(bp->strm);
5071 bp->strm = NULL;
5072
5073gunzip_nomem2:
5074 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5075 bp->gunzip_mapping);
5076 bp->gunzip_buf = NULL;
5077
5078gunzip_nomem1:
5079 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5080 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5081 return -ENOMEM;
5082}
5083
5084static void bnx2x_gunzip_end(struct bnx2x *bp)
5085{
5086 kfree(bp->strm->workspace);
5087
5088 kfree(bp->strm);
5089 bp->strm = NULL;
5090
5091 if (bp->gunzip_buf) {
5092 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5093 bp->gunzip_mapping);
5094 bp->gunzip_buf = NULL;
5095 }
5096}
5097
5098static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5099{
5100 int n, rc;
5101
5102 /* check gzip header */
5103 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5104 return -EINVAL;
5105
5106 n = 10;
5107
34f80b04 5108#define FNAME 0x8
a2fbb9ea
ET
5109
5110 if (zbuf[3] & FNAME)
5111 while ((zbuf[n++] != 0) && (n < len));
5112
5113 bp->strm->next_in = zbuf + n;
5114 bp->strm->avail_in = len - n;
5115 bp->strm->next_out = bp->gunzip_buf;
5116 bp->strm->avail_out = FW_BUF_SIZE;
5117
5118 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5119 if (rc != Z_OK)
5120 return rc;
5121
5122 rc = zlib_inflate(bp->strm, Z_FINISH);
5123 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5124 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5125 bp->dev->name, bp->strm->msg);
5126
5127 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5128 if (bp->gunzip_outlen & 0x3)
5129 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5130 " gunzip_outlen (%d) not aligned\n",
5131 bp->dev->name, bp->gunzip_outlen);
5132 bp->gunzip_outlen >>= 2;
5133
5134 zlib_inflateEnd(bp->strm);
5135
5136 if (rc == Z_STREAM_END)
5137 return 0;
5138
5139 return rc;
5140}
5141
5142/* nic load/unload */
5143
5144/*
34f80b04 5145 * General service functions
a2fbb9ea
ET
5146 */
5147
5148/* send a NIG loopback debug packet */
5149static void bnx2x_lb_pckt(struct bnx2x *bp)
5150{
a2fbb9ea 5151 u32 wb_write[3];
a2fbb9ea
ET
5152
5153 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5154 wb_write[0] = 0x55555555;
5155 wb_write[1] = 0x55555555;
34f80b04 5156 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5157 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5158
5159 /* NON-IP protocol */
a2fbb9ea
ET
5160 wb_write[0] = 0x09000000;
5161 wb_write[1] = 0x55555555;
34f80b04 5162 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5163 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5164}
5165
5166/* some of the internal memories
5167 * are not directly readable from the driver
5168 * to test them we send debug packets
5169 */
5170static int bnx2x_int_mem_test(struct bnx2x *bp)
5171{
5172 int factor;
5173 int count, i;
5174 u32 val = 0;
5175
ad8d3948 5176 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5177 factor = 120;
ad8d3948
EG
5178 else if (CHIP_REV_IS_EMUL(bp))
5179 factor = 200;
5180 else
a2fbb9ea 5181 factor = 1;
a2fbb9ea
ET
5182
5183 DP(NETIF_MSG_HW, "start part1\n");
5184
5185 /* Disable inputs of parser neighbor blocks */
5186 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5187 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5188 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5189 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5190
5191 /* Write 0 to parser credits for CFC search request */
5192 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5193
5194 /* send Ethernet packet */
5195 bnx2x_lb_pckt(bp);
5196
5197 /* TODO do i reset NIG statistic? */
5198 /* Wait until NIG register shows 1 packet of size 0x10 */
5199 count = 1000 * factor;
5200 while (count) {
34f80b04 5201
a2fbb9ea
ET
5202 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5203 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5204 if (val == 0x10)
5205 break;
5206
5207 msleep(10);
5208 count--;
5209 }
5210 if (val != 0x10) {
5211 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5212 return -1;
5213 }
5214
5215 /* Wait until PRS register shows 1 packet */
5216 count = 1000 * factor;
5217 while (count) {
5218 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5219 if (val == 1)
5220 break;
5221
5222 msleep(10);
5223 count--;
5224 }
5225 if (val != 0x1) {
5226 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5227 return -2;
5228 }
5229
5230 /* Reset and init BRB, PRS */
34f80b04 5231 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5232 msleep(50);
34f80b04 5233 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5234 msleep(50);
5235 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5236 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5237
5238 DP(NETIF_MSG_HW, "part2\n");
5239
5240 /* Disable inputs of parser neighbor blocks */
5241 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5242 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5243 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5244 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5245
5246 /* Write 0 to parser credits for CFC search request */
5247 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5248
5249 /* send 10 Ethernet packets */
5250 for (i = 0; i < 10; i++)
5251 bnx2x_lb_pckt(bp);
5252
5253 /* Wait until NIG register shows 10 + 1
5254 packets of size 11*0x10 = 0xb0 */
5255 count = 1000 * factor;
5256 while (count) {
34f80b04 5257
a2fbb9ea
ET
5258 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5259 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5260 if (val == 0xb0)
5261 break;
5262
5263 msleep(10);
5264 count--;
5265 }
5266 if (val != 0xb0) {
5267 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5268 return -3;
5269 }
5270
5271 /* Wait until PRS register shows 2 packets */
5272 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5273 if (val != 2)
5274 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5275
5276 /* Write 1 to parser credits for CFC search request */
5277 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5278
5279 /* Wait until PRS register shows 3 packets */
5280 msleep(10 * factor);
5281 /* Wait until NIG register shows 1 packet of size 0x10 */
5282 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5283 if (val != 3)
5284 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5285
5286 /* clear NIG EOP FIFO */
5287 for (i = 0; i < 11; i++)
5288 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5289 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5290 if (val != 1) {
5291 BNX2X_ERR("clear of NIG failed\n");
5292 return -4;
5293 }
5294
5295 /* Reset and init BRB, PRS, NIG */
5296 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5297 msleep(50);
5298 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5299 msleep(50);
5300 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5301 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5302#ifndef BCM_ISCSI
5303 /* set NIC mode */
5304 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5305#endif
5306
5307 /* Enable inputs of parser neighbor blocks */
5308 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5309 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5310 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5311 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5312
5313 DP(NETIF_MSG_HW, "done\n");
5314
5315 return 0; /* OK */
5316}
5317
5318static void enable_blocks_attention(struct bnx2x *bp)
5319{
5320 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5321 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5322 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5323 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5324 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5325 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5326 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5327 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5328 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5329/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5330/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5331 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5332 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5333 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5334/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5335/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5336 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5337 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5338 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5339 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5340/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5341/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5342 if (CHIP_REV_IS_FPGA(bp))
5343 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5344 else
5345 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5346 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5347 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5348 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5349/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5350/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5351 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5352 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5353/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5354 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5355}
5356
34f80b04 5357
81f75bbf
EG
5358static void bnx2x_reset_common(struct bnx2x *bp)
5359{
5360 /* reset_common */
5361 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5362 0xd3ffff7f);
5363 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5364}
5365
34f80b04 5366static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5367{
a2fbb9ea 5368 u32 val, i;
a2fbb9ea 5369
34f80b04 5370 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5371
81f75bbf 5372 bnx2x_reset_common(bp);
34f80b04
EG
5373 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5374 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5375
34f80b04
EG
5376 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5377 if (CHIP_IS_E1H(bp))
5378 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5379
34f80b04
EG
5380 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5381 msleep(30);
5382 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5383
34f80b04
EG
5384 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5385 if (CHIP_IS_E1(bp)) {
5386 /* enable HW interrupt from PXP on USDM overflow
5387 bit 16 on INT_MASK_0 */
5388 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5389 }
a2fbb9ea 5390
34f80b04
EG
5391 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5392 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5393
5394#ifdef __BIG_ENDIAN
34f80b04
EG
5395 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5396 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5397 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5398 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5399 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5400 /* make sure this value is 0 */
5401 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5402
5403/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5404 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5405 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5406 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5407 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5408#endif
5409
34f80b04 5410 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5411#ifdef BCM_ISCSI
34f80b04
EG
5412 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5413 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5414 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5415#endif
5416
34f80b04
EG
5417 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5418 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5419
34f80b04
EG
5420 /* let the HW do it's magic ... */
5421 msleep(100);
5422 /* finish PXP init */
5423 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5424 if (val != 1) {
5425 BNX2X_ERR("PXP2 CFG failed\n");
5426 return -EBUSY;
5427 }
5428 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5429 if (val != 1) {
5430 BNX2X_ERR("PXP2 RD_INIT failed\n");
5431 return -EBUSY;
5432 }
a2fbb9ea 5433
34f80b04
EG
5434 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5435 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5436
34f80b04 5437 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5438
34f80b04
EG
5439 /* clean the DMAE memory */
5440 bp->dmae_ready = 1;
5441 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5442
34f80b04
EG
5443 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5444 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5445 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5446 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5447
34f80b04
EG
5448 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5449 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5450 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5451 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5452
5453 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5454 /* soft reset pulse */
5455 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5456 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5457
5458#ifdef BCM_ISCSI
34f80b04 5459 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5460#endif
a2fbb9ea 5461
34f80b04
EG
5462 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5463 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5464 if (!CHIP_REV_IS_SLOW(bp)) {
5465 /* enable hw interrupt from doorbell Q */
5466 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5467 }
a2fbb9ea 5468
34f80b04
EG
5469 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5470 if (CHIP_REV_IS_SLOW(bp)) {
5471 /* fix for emulation and FPGA for no pause */
5472 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5473 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5474 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5475 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5476 }
a2fbb9ea 5477
34f80b04 5478 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5479 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5480 /* set NIC mode */
5481 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5482 if (CHIP_IS_E1H(bp))
5483 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5484
34f80b04
EG
5485 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5486 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5487 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5488 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5489
34f80b04
EG
5490 if (CHIP_IS_E1H(bp)) {
5491 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5492 STORM_INTMEM_SIZE_E1H/2);
5493 bnx2x_init_fill(bp,
5494 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5495 0, STORM_INTMEM_SIZE_E1H/2);
5496 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5497 STORM_INTMEM_SIZE_E1H/2);
5498 bnx2x_init_fill(bp,
5499 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5500 0, STORM_INTMEM_SIZE_E1H/2);
5501 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5502 STORM_INTMEM_SIZE_E1H/2);
5503 bnx2x_init_fill(bp,
5504 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5505 0, STORM_INTMEM_SIZE_E1H/2);
5506 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5507 STORM_INTMEM_SIZE_E1H/2);
5508 bnx2x_init_fill(bp,
5509 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5510 0, STORM_INTMEM_SIZE_E1H/2);
5511 } else { /* E1 */
ad8d3948
EG
5512 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5513 STORM_INTMEM_SIZE_E1);
5514 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5515 STORM_INTMEM_SIZE_E1);
5516 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5517 STORM_INTMEM_SIZE_E1);
5518 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5519 STORM_INTMEM_SIZE_E1);
34f80b04 5520 }
a2fbb9ea 5521
34f80b04
EG
5522 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5523 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5524 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5525 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5526
34f80b04
EG
5527 /* sync semi rtc */
5528 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5529 0x80000000);
5530 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5531 0x80000000);
a2fbb9ea 5532
34f80b04
EG
5533 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5534 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5535 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5536
34f80b04
EG
5537 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5538 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5539 REG_WR(bp, i, 0xc0cac01a);
5540 /* TODO: replace with something meaningful */
5541 }
8d9c5f34 5542 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5543 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5544
34f80b04
EG
5545 if (sizeof(union cdu_context) != 1024)
5546 /* we currently assume that a context is 1024 bytes */
5547 printk(KERN_ALERT PFX "please adjust the size of"
5548 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5549
34f80b04
EG
5550 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5551 val = (4 << 24) + (0 << 12) + 1024;
5552 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5553 if (CHIP_IS_E1(bp)) {
5554 /* !!! fix pxp client crdit until excel update */
5555 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5556 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5557 }
a2fbb9ea 5558
34f80b04
EG
5559 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5560 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5561 /* enable context validation interrupt from CFC */
5562 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5563
5564 /* set the thresholds to prevent CFC/CDU race */
5565 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5566
34f80b04
EG
5567 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5568 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5569
34f80b04
EG
5570 /* PXPCS COMMON comes here */
5571 /* Reset PCIE errors for debug */
5572 REG_WR(bp, 0x2814, 0xffffffff);
5573 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5574
34f80b04
EG
5575 /* EMAC0 COMMON comes here */
5576 /* EMAC1 COMMON comes here */
5577 /* DBU COMMON comes here */
5578 /* DBG COMMON comes here */
5579
5580 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5581 if (CHIP_IS_E1H(bp)) {
5582 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5583 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5584 }
5585
5586 if (CHIP_REV_IS_SLOW(bp))
5587 msleep(200);
5588
5589 /* finish CFC init */
5590 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5591 if (val != 1) {
5592 BNX2X_ERR("CFC LL_INIT failed\n");
5593 return -EBUSY;
5594 }
5595 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5596 if (val != 1) {
5597 BNX2X_ERR("CFC AC_INIT failed\n");
5598 return -EBUSY;
5599 }
5600 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5601 if (val != 1) {
5602 BNX2X_ERR("CFC CAM_INIT failed\n");
5603 return -EBUSY;
5604 }
5605 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5606
34f80b04
EG
5607 /* read NIG statistic
5608 to see if this is our first up since powerup */
5609 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5610 val = *bnx2x_sp(bp, wb_data[0]);
5611
5612 /* do internal memory self test */
5613 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5614 BNX2X_ERR("internal mem self test failed\n");
5615 return -EBUSY;
5616 }
5617
5618 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5619 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5620 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5621 /* Fan failure is indicated by SPIO 5 */
5622 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5623 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5624
5625 /* set to active low mode */
5626 val = REG_RD(bp, MISC_REG_SPIO_INT);
5627 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5628 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5629 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5630
34f80b04
EG
5631 /* enable interrupt to signal the IGU */
5632 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5633 val |= (1 << MISC_REGISTERS_SPIO_5);
5634 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5635 break;
f1410647 5636
34f80b04
EG
5637 default:
5638 break;
5639 }
f1410647 5640
34f80b04
EG
5641 /* clear PXP2 attentions */
5642 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5643
34f80b04 5644 enable_blocks_attention(bp);
a2fbb9ea 5645
6bbca910
YR
5646 if (!BP_NOMCP(bp)) {
5647 bnx2x_acquire_phy_lock(bp);
5648 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5649 bnx2x_release_phy_lock(bp);
5650 } else
5651 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5652
34f80b04
EG
5653 return 0;
5654}
a2fbb9ea 5655
34f80b04
EG
5656static int bnx2x_init_port(struct bnx2x *bp)
5657{
5658 int port = BP_PORT(bp);
5659 u32 val;
a2fbb9ea 5660
34f80b04
EG
5661 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5662
5663 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5664
5665 /* Port PXP comes here */
5666 /* Port PXP2 comes here */
a2fbb9ea
ET
5667#ifdef BCM_ISCSI
5668 /* Port0 1
5669 * Port1 385 */
5670 i++;
5671 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5672 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5673 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5674 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5675
5676 /* Port0 2
5677 * Port1 386 */
5678 i++;
5679 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5680 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5681 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5682 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5683
5684 /* Port0 3
5685 * Port1 387 */
5686 i++;
5687 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5688 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5689 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5690 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5691#endif
34f80b04 5692 /* Port CMs come here */
8d9c5f34
EG
5693 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5694 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5695
5696 /* Port QM comes here */
a2fbb9ea
ET
5697#ifdef BCM_ISCSI
5698 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5699 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5700
5701 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5702 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5703#endif
5704 /* Port DQ comes here */
5705 /* Port BRB1 comes here */
ad8d3948 5706 /* Port PRS comes here */
a2fbb9ea
ET
5707 /* Port TSDM comes here */
5708 /* Port CSDM comes here */
5709 /* Port USDM comes here */
5710 /* Port XSDM comes here */
34f80b04
EG
5711 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5712 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5713 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5714 port ? USEM_PORT1_END : USEM_PORT0_END);
5715 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5716 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5717 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5718 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5719 /* Port UPB comes here */
34f80b04
EG
5720 /* Port XPB comes here */
5721
5722 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5723 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5724
5725 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5726 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5727
5728 /* update threshold */
34f80b04 5729 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5730 /* update init credit */
34f80b04 5731 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5732
5733 /* probe changes */
34f80b04 5734 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5735 msleep(5);
34f80b04 5736 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5737
5738#ifdef BCM_ISCSI
5739 /* tell the searcher where the T2 table is */
5740 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5741
5742 wb_write[0] = U64_LO(bp->t2_mapping);
5743 wb_write[1] = U64_HI(bp->t2_mapping);
5744 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5745 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5746 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5747 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5748
5749 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5750 /* Port SRCH comes here */
5751#endif
5752 /* Port CDU comes here */
5753 /* Port CFC comes here */
34f80b04
EG
5754
5755 if (CHIP_IS_E1(bp)) {
5756 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5757 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5758 }
5759 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5760 port ? HC_PORT1_END : HC_PORT0_END);
5761
5762 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5763 MISC_AEU_PORT0_START,
34f80b04
EG
5764 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5765 /* init aeu_mask_attn_func_0/1:
5766 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5767 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5768 * bits 4-7 are used for "per vn group attention" */
5769 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5770 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5771
a2fbb9ea
ET
5772 /* Port PXPCS comes here */
5773 /* Port EMAC0 comes here */
5774 /* Port EMAC1 comes here */
5775 /* Port DBU comes here */
5776 /* Port DBG comes here */
34f80b04
EG
5777 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5778 port ? NIG_PORT1_END : NIG_PORT0_END);
5779
5780 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5781
5782 if (CHIP_IS_E1H(bp)) {
5783 u32 wsum;
5784 struct cmng_struct_per_port m_cmng_port;
5785 int vn;
5786
5787 /* 0x2 disable e1hov, 0x1 enable */
5788 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5789 (IS_E1HMF(bp) ? 0x1 : 0x2));
5790
5791 /* Init RATE SHAPING and FAIRNESS contexts.
5792 Initialize as if there is 10G link. */
5793 wsum = bnx2x_calc_vn_wsum(bp);
5794 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5795 if (IS_E1HMF(bp))
5796 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5797 bnx2x_init_vn_minmax(bp, 2*vn + port,
5798 wsum, 10000, &m_cmng_port);
5799 }
5800
a2fbb9ea
ET
5801 /* Port MCP comes here */
5802 /* Port DMAE comes here */
5803
34f80b04 5804 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5805 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5806 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5807 /* add SPIO 5 to group 0 */
5808 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5809 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5810 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5811 break;
5812
5813 default:
5814 break;
5815 }
5816
c18487ee 5817 bnx2x__link_reset(bp);
a2fbb9ea 5818
34f80b04
EG
5819 return 0;
5820}
5821
5822#define ILT_PER_FUNC (768/2)
5823#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5824/* the phys address is shifted right 12 bits and has an added
5825 1=valid bit added to the 53rd bit
5826 then since this is a wide register(TM)
5827 we split it into two 32 bit writes
5828 */
5829#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5830#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5831#define PXP_ONE_ILT(x) (((x) << 10) | x)
5832#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5833
5834#define CNIC_ILT_LINES 0
5835
5836static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5837{
5838 int reg;
5839
5840 if (CHIP_IS_E1H(bp))
5841 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5842 else /* E1 */
5843 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5844
5845 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5846}
5847
5848static int bnx2x_init_func(struct bnx2x *bp)
5849{
5850 int port = BP_PORT(bp);
5851 int func = BP_FUNC(bp);
8badd27a 5852 u32 addr, val;
34f80b04
EG
5853 int i;
5854
5855 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5856
8badd27a
EG
5857 /* set MSI reconfigure capability */
5858 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5859 val = REG_RD(bp, addr);
5860 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5861 REG_WR(bp, addr, val);
5862
34f80b04
EG
5863 i = FUNC_ILT_BASE(func);
5864
5865 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5866 if (CHIP_IS_E1H(bp)) {
5867 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5868 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5869 } else /* E1 */
5870 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5871 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5872
5873
5874 if (CHIP_IS_E1H(bp)) {
5875 for (i = 0; i < 9; i++)
5876 bnx2x_init_block(bp,
5877 cm_start[func][i], cm_end[func][i]);
5878
5879 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5880 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5881 }
5882
5883 /* HC init per function */
5884 if (CHIP_IS_E1H(bp)) {
5885 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5886
5887 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5888 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5889 }
5890 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5891
c14423fe 5892 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5893 REG_WR(bp, 0x2114, 0xffffffff);
5894 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5895
34f80b04
EG
5896 return 0;
5897}
5898
5899static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5900{
5901 int i, rc = 0;
a2fbb9ea 5902
34f80b04
EG
5903 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5904 BP_FUNC(bp), load_code);
a2fbb9ea 5905
34f80b04
EG
5906 bp->dmae_ready = 0;
5907 mutex_init(&bp->dmae_mutex);
5908 bnx2x_gunzip_init(bp);
a2fbb9ea 5909
34f80b04
EG
5910 switch (load_code) {
5911 case FW_MSG_CODE_DRV_LOAD_COMMON:
5912 rc = bnx2x_init_common(bp);
5913 if (rc)
5914 goto init_hw_err;
5915 /* no break */
5916
5917 case FW_MSG_CODE_DRV_LOAD_PORT:
5918 bp->dmae_ready = 1;
5919 rc = bnx2x_init_port(bp);
5920 if (rc)
5921 goto init_hw_err;
5922 /* no break */
5923
5924 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5925 bp->dmae_ready = 1;
5926 rc = bnx2x_init_func(bp);
5927 if (rc)
5928 goto init_hw_err;
5929 break;
5930
5931 default:
5932 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5933 break;
5934 }
5935
5936 if (!BP_NOMCP(bp)) {
5937 int func = BP_FUNC(bp);
a2fbb9ea
ET
5938
5939 bp->fw_drv_pulse_wr_seq =
34f80b04 5940 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5941 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5942 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5943 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5944 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5945 } else
5946 bp->func_stx = 0;
a2fbb9ea 5947
34f80b04
EG
5948 /* this needs to be done before gunzip end */
5949 bnx2x_zero_def_sb(bp);
5950 for_each_queue(bp, i)
5951 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5952
5953init_hw_err:
5954 bnx2x_gunzip_end(bp);
5955
5956 return rc;
a2fbb9ea
ET
5957}
5958
c14423fe 5959/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5960static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5961{
34f80b04 5962 int func = BP_FUNC(bp);
f1410647
ET
5963 u32 seq = ++bp->fw_seq;
5964 u32 rc = 0;
19680c48
EG
5965 u32 cnt = 1;
5966 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5967
34f80b04 5968 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5969 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5970
19680c48
EG
5971 do {
5972 /* let the FW do it's magic ... */
5973 msleep(delay);
a2fbb9ea 5974
19680c48 5975 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5976
19680c48
EG
5977 /* Give the FW up to 2 second (200*10ms) */
5978 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5979
5980 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5981 cnt*delay, rc, seq);
a2fbb9ea
ET
5982
5983 /* is this a reply to our command? */
5984 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5985 rc &= FW_MSG_CODE_MASK;
f1410647 5986
a2fbb9ea
ET
5987 } else {
5988 /* FW BUG! */
5989 BNX2X_ERR("FW failed to respond!\n");
5990 bnx2x_fw_dump(bp);
5991 rc = 0;
5992 }
f1410647 5993
a2fbb9ea
ET
5994 return rc;
5995}
5996
5997static void bnx2x_free_mem(struct bnx2x *bp)
5998{
5999
6000#define BNX2X_PCI_FREE(x, y, size) \
6001 do { \
6002 if (x) { \
6003 pci_free_consistent(bp->pdev, size, x, y); \
6004 x = NULL; \
6005 y = 0; \
6006 } \
6007 } while (0)
6008
6009#define BNX2X_FREE(x) \
6010 do { \
6011 if (x) { \
6012 vfree(x); \
6013 x = NULL; \
6014 } \
6015 } while (0)
6016
6017 int i;
6018
6019 /* fastpath */
555f6c78 6020 /* Common */
a2fbb9ea
ET
6021 for_each_queue(bp, i) {
6022
555f6c78 6023 /* status blocks */
a2fbb9ea
ET
6024 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6025 bnx2x_fp(bp, i, status_blk_mapping),
6026 sizeof(struct host_status_block) +
6027 sizeof(struct eth_tx_db_data));
555f6c78
EG
6028 }
6029 /* Rx */
6030 for_each_rx_queue(bp, i) {
a2fbb9ea 6031
555f6c78 6032 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6033 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6034 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6035 bnx2x_fp(bp, i, rx_desc_mapping),
6036 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6037
6038 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6039 bnx2x_fp(bp, i, rx_comp_mapping),
6040 sizeof(struct eth_fast_path_rx_cqe) *
6041 NUM_RCQ_BD);
a2fbb9ea 6042
7a9b2557 6043 /* SGE ring */
32626230 6044 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6045 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6046 bnx2x_fp(bp, i, rx_sge_mapping),
6047 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6048 }
555f6c78
EG
6049 /* Tx */
6050 for_each_tx_queue(bp, i) {
6051
6052 /* fastpath tx rings: tx_buf tx_desc */
6053 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6054 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6055 bnx2x_fp(bp, i, tx_desc_mapping),
6056 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6057 }
a2fbb9ea
ET
6058 /* end of fastpath */
6059
6060 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6061 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6062
6063 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6064 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6065
6066#ifdef BCM_ISCSI
6067 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6068 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6069 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6070 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6071#endif
7a9b2557 6072 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6073
6074#undef BNX2X_PCI_FREE
6075#undef BNX2X_KFREE
6076}
6077
6078static int bnx2x_alloc_mem(struct bnx2x *bp)
6079{
6080
6081#define BNX2X_PCI_ALLOC(x, y, size) \
6082 do { \
6083 x = pci_alloc_consistent(bp->pdev, size, y); \
6084 if (x == NULL) \
6085 goto alloc_mem_err; \
6086 memset(x, 0, size); \
6087 } while (0)
6088
6089#define BNX2X_ALLOC(x, size) \
6090 do { \
6091 x = vmalloc(size); \
6092 if (x == NULL) \
6093 goto alloc_mem_err; \
6094 memset(x, 0, size); \
6095 } while (0)
6096
6097 int i;
6098
6099 /* fastpath */
555f6c78 6100 /* Common */
a2fbb9ea
ET
6101 for_each_queue(bp, i) {
6102 bnx2x_fp(bp, i, bp) = bp;
6103
555f6c78 6104 /* status blocks */
a2fbb9ea
ET
6105 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6106 &bnx2x_fp(bp, i, status_blk_mapping),
6107 sizeof(struct host_status_block) +
6108 sizeof(struct eth_tx_db_data));
555f6c78
EG
6109 }
6110 /* Rx */
6111 for_each_rx_queue(bp, i) {
a2fbb9ea 6112
555f6c78 6113 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6114 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6115 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6116 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6117 &bnx2x_fp(bp, i, rx_desc_mapping),
6118 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6119
6120 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6121 &bnx2x_fp(bp, i, rx_comp_mapping),
6122 sizeof(struct eth_fast_path_rx_cqe) *
6123 NUM_RCQ_BD);
6124
7a9b2557
VZ
6125 /* SGE ring */
6126 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6127 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6128 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6129 &bnx2x_fp(bp, i, rx_sge_mapping),
6130 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6131 }
555f6c78
EG
6132 /* Tx */
6133 for_each_tx_queue(bp, i) {
6134
6135 bnx2x_fp(bp, i, hw_tx_prods) =
6136 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6137
6138 bnx2x_fp(bp, i, tx_prods_mapping) =
6139 bnx2x_fp(bp, i, status_blk_mapping) +
6140 sizeof(struct host_status_block);
6141
6142 /* fastpath tx rings: tx_buf tx_desc */
6143 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6144 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6145 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6146 &bnx2x_fp(bp, i, tx_desc_mapping),
6147 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6148 }
a2fbb9ea
ET
6149 /* end of fastpath */
6150
6151 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6152 sizeof(struct host_def_status_block));
6153
6154 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6155 sizeof(struct bnx2x_slowpath));
6156
6157#ifdef BCM_ISCSI
6158 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6159
6160 /* Initialize T1 */
6161 for (i = 0; i < 64*1024; i += 64) {
6162 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6163 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6164 }
6165
6166 /* allocate searcher T2 table
6167 we allocate 1/4 of alloc num for T2
6168 (which is not entered into the ILT) */
6169 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6170
6171 /* Initialize T2 */
6172 for (i = 0; i < 16*1024; i += 64)
6173 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6174
c14423fe 6175 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6176 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6177
6178 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6179 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6180
6181 /* QM queues (128*MAX_CONN) */
6182 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6183#endif
6184
6185 /* Slow path ring */
6186 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6187
6188 return 0;
6189
6190alloc_mem_err:
6191 bnx2x_free_mem(bp);
6192 return -ENOMEM;
6193
6194#undef BNX2X_PCI_ALLOC
6195#undef BNX2X_ALLOC
6196}
6197
6198static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6199{
6200 int i;
6201
555f6c78 6202 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6203 struct bnx2x_fastpath *fp = &bp->fp[i];
6204
6205 u16 bd_cons = fp->tx_bd_cons;
6206 u16 sw_prod = fp->tx_pkt_prod;
6207 u16 sw_cons = fp->tx_pkt_cons;
6208
a2fbb9ea
ET
6209 while (sw_cons != sw_prod) {
6210 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6211 sw_cons++;
6212 }
6213 }
6214}
6215
6216static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6217{
6218 int i, j;
6219
555f6c78 6220 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6221 struct bnx2x_fastpath *fp = &bp->fp[j];
6222
a2fbb9ea
ET
6223 for (i = 0; i < NUM_RX_BD; i++) {
6224 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6225 struct sk_buff *skb = rx_buf->skb;
6226
6227 if (skb == NULL)
6228 continue;
6229
6230 pci_unmap_single(bp->pdev,
6231 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6232 bp->rx_buf_size,
a2fbb9ea
ET
6233 PCI_DMA_FROMDEVICE);
6234
6235 rx_buf->skb = NULL;
6236 dev_kfree_skb(skb);
6237 }
7a9b2557 6238 if (!fp->disable_tpa)
32626230
EG
6239 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6240 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6241 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6242 }
6243}
6244
6245static void bnx2x_free_skbs(struct bnx2x *bp)
6246{
6247 bnx2x_free_tx_skbs(bp);
6248 bnx2x_free_rx_skbs(bp);
6249}
6250
6251static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6252{
34f80b04 6253 int i, offset = 1;
a2fbb9ea
ET
6254
6255 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6256 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6257 bp->msix_table[0].vector);
6258
6259 for_each_queue(bp, i) {
c14423fe 6260 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6261 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6262 bnx2x_fp(bp, i, state));
6263
34f80b04 6264 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6265 }
a2fbb9ea
ET
6266}
6267
6268static void bnx2x_free_irq(struct bnx2x *bp)
6269{
a2fbb9ea 6270 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6271 bnx2x_free_msix_irqs(bp);
6272 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6273 bp->flags &= ~USING_MSIX_FLAG;
6274
8badd27a
EG
6275 } else if (bp->flags & USING_MSI_FLAG) {
6276 free_irq(bp->pdev->irq, bp->dev);
6277 pci_disable_msi(bp->pdev);
6278 bp->flags &= ~USING_MSI_FLAG;
6279
a2fbb9ea
ET
6280 } else
6281 free_irq(bp->pdev->irq, bp->dev);
6282}
6283
6284static int bnx2x_enable_msix(struct bnx2x *bp)
6285{
8badd27a
EG
6286 int i, rc, offset = 1;
6287 int igu_vec = 0;
a2fbb9ea 6288
8badd27a
EG
6289 bp->msix_table[0].entry = igu_vec;
6290 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6291
34f80b04 6292 for_each_queue(bp, i) {
8badd27a 6293 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6294 bp->msix_table[i + offset].entry = igu_vec;
6295 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6296 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6297 }
6298
34f80b04 6299 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6300 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6301 if (rc) {
8badd27a
EG
6302 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6303 return rc;
34f80b04 6304 }
8badd27a 6305
a2fbb9ea
ET
6306 bp->flags |= USING_MSIX_FLAG;
6307
6308 return 0;
a2fbb9ea
ET
6309}
6310
a2fbb9ea
ET
6311static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6312{
34f80b04 6313 int i, rc, offset = 1;
a2fbb9ea 6314
a2fbb9ea
ET
6315 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6316 bp->dev->name, bp->dev);
a2fbb9ea
ET
6317 if (rc) {
6318 BNX2X_ERR("request sp irq failed\n");
6319 return -EBUSY;
6320 }
6321
6322 for_each_queue(bp, i) {
555f6c78
EG
6323 struct bnx2x_fastpath *fp = &bp->fp[i];
6324
6325 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6326 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6327 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6328 if (rc) {
555f6c78 6329 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6330 bnx2x_free_msix_irqs(bp);
6331 return -EBUSY;
6332 }
6333
555f6c78 6334 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6335 }
6336
555f6c78
EG
6337 i = BNX2X_NUM_QUEUES(bp);
6338 if (is_multi(bp))
6339 printk(KERN_INFO PFX
6340 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6341 bp->dev->name, bp->msix_table[0].vector,
6342 bp->msix_table[offset].vector,
6343 bp->msix_table[offset + i - 1].vector);
6344 else
6345 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6346 bp->dev->name, bp->msix_table[0].vector,
6347 bp->msix_table[offset + i - 1].vector);
6348
a2fbb9ea 6349 return 0;
a2fbb9ea
ET
6350}
6351
8badd27a
EG
6352static int bnx2x_enable_msi(struct bnx2x *bp)
6353{
6354 int rc;
6355
6356 rc = pci_enable_msi(bp->pdev);
6357 if (rc) {
6358 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6359 return -1;
6360 }
6361 bp->flags |= USING_MSI_FLAG;
6362
6363 return 0;
6364}
6365
a2fbb9ea
ET
6366static int bnx2x_req_irq(struct bnx2x *bp)
6367{
8badd27a 6368 unsigned long flags;
34f80b04 6369 int rc;
a2fbb9ea 6370
8badd27a
EG
6371 if (bp->flags & USING_MSI_FLAG)
6372 flags = 0;
6373 else
6374 flags = IRQF_SHARED;
6375
6376 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6377 bp->dev->name, bp->dev);
a2fbb9ea
ET
6378 if (!rc)
6379 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6380
6381 return rc;
a2fbb9ea
ET
6382}
6383
65abd74d
YG
6384static void bnx2x_napi_enable(struct bnx2x *bp)
6385{
6386 int i;
6387
555f6c78 6388 for_each_rx_queue(bp, i)
65abd74d
YG
6389 napi_enable(&bnx2x_fp(bp, i, napi));
6390}
6391
6392static void bnx2x_napi_disable(struct bnx2x *bp)
6393{
6394 int i;
6395
555f6c78 6396 for_each_rx_queue(bp, i)
65abd74d
YG
6397 napi_disable(&bnx2x_fp(bp, i, napi));
6398}
6399
6400static void bnx2x_netif_start(struct bnx2x *bp)
6401{
6402 if (atomic_dec_and_test(&bp->intr_sem)) {
6403 if (netif_running(bp->dev)) {
65abd74d
YG
6404 bnx2x_napi_enable(bp);
6405 bnx2x_int_enable(bp);
555f6c78
EG
6406 if (bp->state == BNX2X_STATE_OPEN)
6407 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6408 }
6409 }
6410}
6411
f8ef6e44 6412static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6413{
f8ef6e44 6414 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6415 bnx2x_napi_disable(bp);
65abd74d 6416 if (netif_running(bp->dev)) {
65abd74d
YG
6417 netif_tx_disable(bp->dev);
6418 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6419 }
6420}
6421
a2fbb9ea
ET
6422/*
6423 * Init service functions
6424 */
6425
3101c2bc 6426static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6427{
6428 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6429 int port = BP_PORT(bp);
a2fbb9ea
ET
6430
6431 /* CAM allocation
6432 * unicasts 0-31:port0 32-63:port1
6433 * multicast 64-127:port0 128-191:port1
6434 */
8d9c5f34 6435 config->hdr.length = 2;
af246401 6436 config->hdr.offset = port ? 32 : 0;
34f80b04 6437 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6438 config->hdr.reserved1 = 0;
6439
6440 /* primary MAC */
6441 config->config_table[0].cam_entry.msb_mac_addr =
6442 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6443 config->config_table[0].cam_entry.middle_mac_addr =
6444 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6445 config->config_table[0].cam_entry.lsb_mac_addr =
6446 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6447 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6448 if (set)
6449 config->config_table[0].target_table_entry.flags = 0;
6450 else
6451 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6452 config->config_table[0].target_table_entry.client_id = 0;
6453 config->config_table[0].target_table_entry.vlan_id = 0;
6454
3101c2bc
YG
6455 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6456 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6457 config->config_table[0].cam_entry.msb_mac_addr,
6458 config->config_table[0].cam_entry.middle_mac_addr,
6459 config->config_table[0].cam_entry.lsb_mac_addr);
6460
6461 /* broadcast */
6462 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6463 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6464 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6465 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6466 if (set)
6467 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6468 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6469 else
6470 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6471 config->config_table[1].target_table_entry.client_id = 0;
6472 config->config_table[1].target_table_entry.vlan_id = 0;
6473
6474 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6475 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6476 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6477}
6478
3101c2bc 6479static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6480{
6481 struct mac_configuration_cmd_e1h *config =
6482 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6483
3101c2bc 6484 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6485 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6486 return;
6487 }
6488
6489 /* CAM allocation for E1H
6490 * unicasts: by func number
6491 * multicast: 20+FUNC*20, 20 each
6492 */
8d9c5f34 6493 config->hdr.length = 1;
34f80b04
EG
6494 config->hdr.offset = BP_FUNC(bp);
6495 config->hdr.client_id = BP_CL_ID(bp);
6496 config->hdr.reserved1 = 0;
6497
6498 /* primary MAC */
6499 config->config_table[0].msb_mac_addr =
6500 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6501 config->config_table[0].middle_mac_addr =
6502 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6503 config->config_table[0].lsb_mac_addr =
6504 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6505 config->config_table[0].client_id = BP_L_ID(bp);
6506 config->config_table[0].vlan_id = 0;
6507 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6508 if (set)
6509 config->config_table[0].flags = BP_PORT(bp);
6510 else
6511 config->config_table[0].flags =
6512 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6513
3101c2bc
YG
6514 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6515 (set ? "setting" : "clearing"),
34f80b04
EG
6516 config->config_table[0].msb_mac_addr,
6517 config->config_table[0].middle_mac_addr,
6518 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6519
6520 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6521 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6522 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6523}
6524
a2fbb9ea
ET
6525static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6526 int *state_p, int poll)
6527{
6528 /* can take a while if any port is running */
34f80b04 6529 int cnt = 500;
a2fbb9ea 6530
c14423fe
ET
6531 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6532 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6533
6534 might_sleep();
34f80b04 6535 while (cnt--) {
a2fbb9ea
ET
6536 if (poll) {
6537 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6538 /* if index is different from 0
6539 * the reply for some commands will
3101c2bc 6540 * be on the non default queue
a2fbb9ea
ET
6541 */
6542 if (idx)
6543 bnx2x_rx_int(&bp->fp[idx], 10);
6544 }
a2fbb9ea 6545
3101c2bc 6546 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6547 if (*state_p == state)
a2fbb9ea
ET
6548 return 0;
6549
a2fbb9ea 6550 msleep(1);
a2fbb9ea
ET
6551 }
6552
a2fbb9ea 6553 /* timeout! */
49d66772
ET
6554 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6555 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6556#ifdef BNX2X_STOP_ON_ERROR
6557 bnx2x_panic();
6558#endif
a2fbb9ea 6559
49d66772 6560 return -EBUSY;
a2fbb9ea
ET
6561}
6562
6563static int bnx2x_setup_leading(struct bnx2x *bp)
6564{
34f80b04 6565 int rc;
a2fbb9ea 6566
c14423fe 6567 /* reset IGU state */
34f80b04 6568 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6569
6570 /* SETUP ramrod */
6571 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6572
34f80b04
EG
6573 /* Wait for completion */
6574 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6575
34f80b04 6576 return rc;
a2fbb9ea
ET
6577}
6578
6579static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6580{
555f6c78
EG
6581 struct bnx2x_fastpath *fp = &bp->fp[index];
6582
a2fbb9ea 6583 /* reset IGU state */
555f6c78 6584 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6585
228241eb 6586 /* SETUP ramrod */
555f6c78
EG
6587 fp->state = BNX2X_FP_STATE_OPENING;
6588 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6589 fp->cl_id, 0);
a2fbb9ea
ET
6590
6591 /* Wait for completion */
6592 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6593 &(fp->state), 0);
a2fbb9ea
ET
6594}
6595
a2fbb9ea 6596static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6597
8badd27a 6598static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6599{
555f6c78 6600 int num_queues;
a2fbb9ea 6601
8badd27a
EG
6602 switch (int_mode) {
6603 case INT_MODE_INTx:
6604 case INT_MODE_MSI:
555f6c78
EG
6605 num_queues = 1;
6606 bp->num_rx_queues = num_queues;
6607 bp->num_tx_queues = num_queues;
6608 DP(NETIF_MSG_IFUP,
6609 "set number of queues to %d\n", num_queues);
8badd27a
EG
6610 break;
6611
6612 case INT_MODE_MSIX:
6613 default:
555f6c78
EG
6614 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6615 num_queues = min_t(u32, num_online_cpus(),
6616 BNX2X_MAX_QUEUES(bp));
34f80b04 6617 else
555f6c78
EG
6618 num_queues = 1;
6619 bp->num_rx_queues = num_queues;
6620 bp->num_tx_queues = num_queues;
6621 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6622 " number of tx queues to %d\n",
6623 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6624 /* if we can't use MSI-X we only need one fp,
6625 * so try to enable MSI-X with the requested number of fp's
6626 * and fallback to MSI or legacy INTx with one fp
6627 */
8badd27a 6628 if (bnx2x_enable_msix(bp)) {
34f80b04 6629 /* failed to enable MSI-X */
555f6c78
EG
6630 num_queues = 1;
6631 bp->num_rx_queues = num_queues;
6632 bp->num_tx_queues = num_queues;
6633 if (bp->multi_mode)
6634 BNX2X_ERR("Multi requested but failed to "
6635 "enable MSI-X set number of "
6636 "queues to %d\n", num_queues);
a2fbb9ea 6637 }
8badd27a 6638 break;
a2fbb9ea 6639 }
555f6c78 6640 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6641}
6642
6643static void bnx2x_set_rx_mode(struct net_device *dev);
6644
6645/* must be called with rtnl_lock */
6646static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6647{
6648 u32 load_code;
6649 int i, rc = 0;
6650#ifdef BNX2X_STOP_ON_ERROR
6651 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6652 if (unlikely(bp->panic))
6653 return -EPERM;
6654#endif
6655
6656 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6657
6658 bnx2x_set_int_mode(bp);
c14423fe 6659
a2fbb9ea
ET
6660 if (bnx2x_alloc_mem(bp))
6661 return -ENOMEM;
6662
555f6c78 6663 for_each_rx_queue(bp, i)
7a9b2557
VZ
6664 bnx2x_fp(bp, i, disable_tpa) =
6665 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6666
555f6c78 6667 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6668 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6669 bnx2x_poll, 128);
6670
6671#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6672 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6673 struct bnx2x_fastpath *fp = &bp->fp[i];
6674
6675 fp->poll_no_work = 0;
6676 fp->poll_calls = 0;
6677 fp->poll_max_calls = 0;
6678 fp->poll_complete = 0;
6679 fp->poll_exit = 0;
6680 }
6681#endif
6682 bnx2x_napi_enable(bp);
6683
34f80b04
EG
6684 if (bp->flags & USING_MSIX_FLAG) {
6685 rc = bnx2x_req_msix_irqs(bp);
6686 if (rc) {
6687 pci_disable_msix(bp->pdev);
2dfe0e1f 6688 goto load_error1;
34f80b04
EG
6689 }
6690 } else {
8badd27a
EG
6691 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6692 bnx2x_enable_msi(bp);
34f80b04
EG
6693 bnx2x_ack_int(bp);
6694 rc = bnx2x_req_irq(bp);
6695 if (rc) {
2dfe0e1f 6696 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6697 if (bp->flags & USING_MSI_FLAG)
6698 pci_disable_msi(bp->pdev);
2dfe0e1f 6699 goto load_error1;
a2fbb9ea 6700 }
8badd27a
EG
6701 if (bp->flags & USING_MSI_FLAG) {
6702 bp->dev->irq = bp->pdev->irq;
6703 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6704 bp->dev->name, bp->pdev->irq);
6705 }
a2fbb9ea
ET
6706 }
6707
2dfe0e1f
EG
6708 /* Send LOAD_REQUEST command to MCP
6709 Returns the type of LOAD command:
6710 if it is the first port to be initialized
6711 common blocks should be initialized, otherwise - not
6712 */
6713 if (!BP_NOMCP(bp)) {
6714 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6715 if (!load_code) {
6716 BNX2X_ERR("MCP response failure, aborting\n");
6717 rc = -EBUSY;
6718 goto load_error2;
6719 }
6720 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6721 rc = -EBUSY; /* other port in diagnostic mode */
6722 goto load_error2;
6723 }
6724
6725 } else {
6726 int port = BP_PORT(bp);
6727
6728 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6729 load_count[0], load_count[1], load_count[2]);
6730 load_count[0]++;
6731 load_count[1 + port]++;
6732 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6733 load_count[0], load_count[1], load_count[2]);
6734 if (load_count[0] == 1)
6735 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6736 else if (load_count[1 + port] == 1)
6737 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6738 else
6739 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6740 }
6741
6742 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6743 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6744 bp->port.pmf = 1;
6745 else
6746 bp->port.pmf = 0;
6747 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6748
a2fbb9ea 6749 /* Initialize HW */
34f80b04
EG
6750 rc = bnx2x_init_hw(bp, load_code);
6751 if (rc) {
a2fbb9ea 6752 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6753 goto load_error2;
a2fbb9ea
ET
6754 }
6755
a2fbb9ea 6756 /* Setup NIC internals and enable interrupts */
471de716 6757 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6758
6759 /* Send LOAD_DONE command to MCP */
34f80b04 6760 if (!BP_NOMCP(bp)) {
228241eb
ET
6761 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6762 if (!load_code) {
da5a662a 6763 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6764 rc = -EBUSY;
2dfe0e1f 6765 goto load_error3;
a2fbb9ea
ET
6766 }
6767 }
6768
6769 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6770
34f80b04
EG
6771 rc = bnx2x_setup_leading(bp);
6772 if (rc) {
da5a662a 6773 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6774 goto load_error3;
34f80b04 6775 }
a2fbb9ea 6776
34f80b04
EG
6777 if (CHIP_IS_E1H(bp))
6778 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6779 BNX2X_ERR("!!! mf_cfg function disabled\n");
6780 bp->state = BNX2X_STATE_DISABLED;
6781 }
a2fbb9ea 6782
34f80b04
EG
6783 if (bp->state == BNX2X_STATE_OPEN)
6784 for_each_nondefault_queue(bp, i) {
6785 rc = bnx2x_setup_multi(bp, i);
6786 if (rc)
2dfe0e1f 6787 goto load_error3;
34f80b04 6788 }
a2fbb9ea 6789
34f80b04 6790 if (CHIP_IS_E1(bp))
3101c2bc 6791 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6792 else
3101c2bc 6793 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6794
6795 if (bp->port.pmf)
6796 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6797
6798 /* Start fast path */
34f80b04
EG
6799 switch (load_mode) {
6800 case LOAD_NORMAL:
6801 /* Tx queue should be only reenabled */
555f6c78 6802 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6803 /* Initialize the receive filter. */
34f80b04
EG
6804 bnx2x_set_rx_mode(bp->dev);
6805 break;
6806
6807 case LOAD_OPEN:
555f6c78 6808 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6809 /* Initialize the receive filter. */
34f80b04 6810 bnx2x_set_rx_mode(bp->dev);
34f80b04 6811 break;
a2fbb9ea 6812
34f80b04 6813 case LOAD_DIAG:
2dfe0e1f 6814 /* Initialize the receive filter. */
a2fbb9ea 6815 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6816 bp->state = BNX2X_STATE_DIAG;
6817 break;
6818
6819 default:
6820 break;
a2fbb9ea
ET
6821 }
6822
34f80b04
EG
6823 if (!bp->port.pmf)
6824 bnx2x__link_status_update(bp);
6825
a2fbb9ea
ET
6826 /* start the timer */
6827 mod_timer(&bp->timer, jiffies + bp->current_interval);
6828
34f80b04 6829
a2fbb9ea
ET
6830 return 0;
6831
2dfe0e1f
EG
6832load_error3:
6833 bnx2x_int_disable_sync(bp, 1);
6834 if (!BP_NOMCP(bp)) {
6835 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6836 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6837 }
6838 bp->port.pmf = 0;
7a9b2557
VZ
6839 /* Free SKBs, SGEs, TPA pool and driver internals */
6840 bnx2x_free_skbs(bp);
555f6c78 6841 for_each_rx_queue(bp, i)
3196a88a 6842 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6843load_error2:
d1014634
YG
6844 /* Release IRQs */
6845 bnx2x_free_irq(bp);
2dfe0e1f
EG
6846load_error1:
6847 bnx2x_napi_disable(bp);
555f6c78 6848 for_each_rx_queue(bp, i)
7cde1c8b 6849 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6850 bnx2x_free_mem(bp);
6851
6852 /* TBD we really need to reset the chip
6853 if we want to recover from this */
34f80b04 6854 return rc;
a2fbb9ea
ET
6855}
6856
6857static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6858{
555f6c78 6859 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
6860 int rc;
6861
c14423fe 6862 /* halt the connection */
555f6c78
EG
6863 fp->state = BNX2X_FP_STATE_HALTING;
6864 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 6865
34f80b04 6866 /* Wait for completion */
a2fbb9ea 6867 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 6868 &(fp->state), 1);
c14423fe 6869 if (rc) /* timeout */
a2fbb9ea
ET
6870 return rc;
6871
6872 /* delete cfc entry */
6873 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6874
34f80b04
EG
6875 /* Wait for completion */
6876 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 6877 &(fp->state), 1);
34f80b04 6878 return rc;
a2fbb9ea
ET
6879}
6880
da5a662a 6881static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6882{
49d66772 6883 u16 dsb_sp_prod_idx;
c14423fe 6884 /* if the other port is handling traffic,
a2fbb9ea 6885 this can take a lot of time */
34f80b04
EG
6886 int cnt = 500;
6887 int rc;
a2fbb9ea
ET
6888
6889 might_sleep();
6890
6891 /* Send HALT ramrod */
6892 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6893 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6894
34f80b04
EG
6895 /* Wait for completion */
6896 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6897 &(bp->fp[0].state), 1);
6898 if (rc) /* timeout */
da5a662a 6899 return rc;
a2fbb9ea 6900
49d66772 6901 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6902
228241eb 6903 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6904 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6905
49d66772 6906 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6907 we are going to reset the chip anyway
6908 so there is not much to do if this times out
6909 */
34f80b04 6910 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6911 if (!cnt) {
6912 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6913 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6914 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6915#ifdef BNX2X_STOP_ON_ERROR
6916 bnx2x_panic();
da5a662a
VZ
6917#else
6918 rc = -EBUSY;
34f80b04
EG
6919#endif
6920 break;
6921 }
6922 cnt--;
da5a662a 6923 msleep(1);
5650d9d4 6924 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
6925 }
6926 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6927 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6928
6929 return rc;
a2fbb9ea
ET
6930}
6931
34f80b04
EG
6932static void bnx2x_reset_func(struct bnx2x *bp)
6933{
6934 int port = BP_PORT(bp);
6935 int func = BP_FUNC(bp);
6936 int base, i;
6937
6938 /* Configure IGU */
6939 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6940 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6941
34f80b04
EG
6942 /* Clear ILT */
6943 base = FUNC_ILT_BASE(func);
6944 for (i = base; i < base + ILT_PER_FUNC; i++)
6945 bnx2x_ilt_wr(bp, i, 0);
6946}
6947
6948static void bnx2x_reset_port(struct bnx2x *bp)
6949{
6950 int port = BP_PORT(bp);
6951 u32 val;
6952
6953 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6954
6955 /* Do not rcv packets to BRB */
6956 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6957 /* Do not direct rcv packets that are not for MCP to the BRB */
6958 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6959 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6960
6961 /* Configure AEU */
6962 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6963
6964 msleep(100);
6965 /* Check for BRB port occupancy */
6966 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6967 if (val)
6968 DP(NETIF_MSG_IFDOWN,
33471629 6969 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6970
6971 /* TODO: Close Doorbell port? */
6972}
6973
34f80b04
EG
6974static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6975{
6976 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6977 BP_FUNC(bp), reset_code);
6978
6979 switch (reset_code) {
6980 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6981 bnx2x_reset_port(bp);
6982 bnx2x_reset_func(bp);
6983 bnx2x_reset_common(bp);
6984 break;
6985
6986 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6987 bnx2x_reset_port(bp);
6988 bnx2x_reset_func(bp);
6989 break;
6990
6991 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6992 bnx2x_reset_func(bp);
6993 break;
49d66772 6994
34f80b04
EG
6995 default:
6996 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6997 break;
6998 }
6999}
7000
33471629 7001/* must be called with rtnl_lock */
34f80b04 7002static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7003{
da5a662a 7004 int port = BP_PORT(bp);
a2fbb9ea 7005 u32 reset_code = 0;
da5a662a 7006 int i, cnt, rc;
a2fbb9ea
ET
7007
7008 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7009
228241eb
ET
7010 bp->rx_mode = BNX2X_RX_MODE_NONE;
7011 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7012
f8ef6e44 7013 bnx2x_netif_stop(bp, 1);
e94d8af3 7014
34f80b04
EG
7015 del_timer_sync(&bp->timer);
7016 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7017 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7018 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7019
70b9986c
EG
7020 /* Release IRQs */
7021 bnx2x_free_irq(bp);
7022
555f6c78
EG
7023 /* Wait until tx fastpath tasks complete */
7024 for_each_tx_queue(bp, i) {
228241eb
ET
7025 struct bnx2x_fastpath *fp = &bp->fp[i];
7026
34f80b04
EG
7027 cnt = 1000;
7028 smp_rmb();
e8b5fc51 7029 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7030
65abd74d 7031 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7032 if (!cnt) {
7033 BNX2X_ERR("timeout waiting for queue[%d]\n",
7034 i);
7035#ifdef BNX2X_STOP_ON_ERROR
7036 bnx2x_panic();
7037 return -EBUSY;
7038#else
7039 break;
7040#endif
7041 }
7042 cnt--;
da5a662a 7043 msleep(1);
34f80b04
EG
7044 smp_rmb();
7045 }
228241eb 7046 }
da5a662a
VZ
7047 /* Give HW time to discard old tx messages */
7048 msleep(1);
a2fbb9ea 7049
3101c2bc
YG
7050 if (CHIP_IS_E1(bp)) {
7051 struct mac_configuration_cmd *config =
7052 bnx2x_sp(bp, mcast_config);
7053
7054 bnx2x_set_mac_addr_e1(bp, 0);
7055
8d9c5f34 7056 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7057 CAM_INVALIDATE(config->config_table[i]);
7058
8d9c5f34 7059 config->hdr.length = i;
3101c2bc
YG
7060 if (CHIP_REV_IS_SLOW(bp))
7061 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7062 else
7063 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7064 config->hdr.client_id = BP_CL_ID(bp);
7065 config->hdr.reserved1 = 0;
7066
7067 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7068 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7069 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7070
7071 } else { /* E1H */
65abd74d
YG
7072 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7073
3101c2bc
YG
7074 bnx2x_set_mac_addr_e1h(bp, 0);
7075
7076 for (i = 0; i < MC_HASH_SIZE; i++)
7077 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7078 }
7079
65abd74d
YG
7080 if (unload_mode == UNLOAD_NORMAL)
7081 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7082
7083 else if (bp->flags & NO_WOL_FLAG) {
7084 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7085 if (CHIP_IS_E1H(bp))
7086 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7087
7088 } else if (bp->wol) {
7089 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7090 u8 *mac_addr = bp->dev->dev_addr;
7091 u32 val;
7092 /* The mac address is written to entries 1-4 to
7093 preserve entry 0 which is used by the PMF */
7094 u8 entry = (BP_E1HVN(bp) + 1)*8;
7095
7096 val = (mac_addr[0] << 8) | mac_addr[1];
7097 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7098
7099 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7100 (mac_addr[4] << 8) | mac_addr[5];
7101 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7102
7103 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7104
7105 } else
7106 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7107
34f80b04
EG
7108 /* Close multi and leading connections
7109 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7110 for_each_nondefault_queue(bp, i)
7111 if (bnx2x_stop_multi(bp, i))
228241eb 7112 goto unload_error;
a2fbb9ea 7113
da5a662a
VZ
7114 rc = bnx2x_stop_leading(bp);
7115 if (rc) {
34f80b04 7116 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7117#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7118 return -EBUSY;
da5a662a
VZ
7119#else
7120 goto unload_error;
34f80b04 7121#endif
228241eb
ET
7122 }
7123
7124unload_error:
34f80b04 7125 if (!BP_NOMCP(bp))
228241eb 7126 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7127 else {
7128 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7129 load_count[0], load_count[1], load_count[2]);
7130 load_count[0]--;
da5a662a 7131 load_count[1 + port]--;
34f80b04
EG
7132 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7133 load_count[0], load_count[1], load_count[2]);
7134 if (load_count[0] == 0)
7135 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7136 else if (load_count[1 + port] == 0)
34f80b04
EG
7137 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7138 else
7139 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7140 }
a2fbb9ea 7141
34f80b04
EG
7142 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7143 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7144 bnx2x__link_reset(bp);
a2fbb9ea
ET
7145
7146 /* Reset the chip */
228241eb 7147 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7148
7149 /* Report UNLOAD_DONE to MCP */
34f80b04 7150 if (!BP_NOMCP(bp))
a2fbb9ea 7151 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7152 bp->port.pmf = 0;
a2fbb9ea 7153
7a9b2557 7154 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7155 bnx2x_free_skbs(bp);
555f6c78 7156 for_each_rx_queue(bp, i)
3196a88a 7157 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7158 for_each_rx_queue(bp, i)
7cde1c8b 7159 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7160 bnx2x_free_mem(bp);
7161
7162 bp->state = BNX2X_STATE_CLOSED;
228241eb 7163
a2fbb9ea
ET
7164 netif_carrier_off(bp->dev);
7165
7166 return 0;
7167}
7168
34f80b04
EG
7169static void bnx2x_reset_task(struct work_struct *work)
7170{
7171 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7172
7173#ifdef BNX2X_STOP_ON_ERROR
7174 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7175 " so reset not done to allow debug dump,\n"
7176 KERN_ERR " you will need to reboot when done\n");
7177 return;
7178#endif
7179
7180 rtnl_lock();
7181
7182 if (!netif_running(bp->dev))
7183 goto reset_task_exit;
7184
7185 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7186 bnx2x_nic_load(bp, LOAD_NORMAL);
7187
7188reset_task_exit:
7189 rtnl_unlock();
7190}
7191
a2fbb9ea
ET
7192/* end of nic load/unload */
7193
7194/* ethtool_ops */
7195
7196/*
7197 * Init service functions
7198 */
7199
f1ef27ef
EG
7200static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7201{
7202 switch (func) {
7203 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7204 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7205 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7206 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7207 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7208 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7209 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7210 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7211 default:
7212 BNX2X_ERR("Unsupported function index: %d\n", func);
7213 return (u32)(-1);
7214 }
7215}
7216
7217static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7218{
7219 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7220
7221 /* Flush all outstanding writes */
7222 mmiowb();
7223
7224 /* Pretend to be function 0 */
7225 REG_WR(bp, reg, 0);
7226 /* Flush the GRC transaction (in the chip) */
7227 new_val = REG_RD(bp, reg);
7228 if (new_val != 0) {
7229 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7230 new_val);
7231 BUG();
7232 }
7233
7234 /* From now we are in the "like-E1" mode */
7235 bnx2x_int_disable(bp);
7236
7237 /* Flush all outstanding writes */
7238 mmiowb();
7239
7240 /* Restore the original funtion settings */
7241 REG_WR(bp, reg, orig_func);
7242 new_val = REG_RD(bp, reg);
7243 if (new_val != orig_func) {
7244 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7245 orig_func, new_val);
7246 BUG();
7247 }
7248}
7249
7250static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7251{
7252 if (CHIP_IS_E1H(bp))
7253 bnx2x_undi_int_disable_e1h(bp, func);
7254 else
7255 bnx2x_int_disable(bp);
7256}
7257
34f80b04
EG
7258static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7259{
7260 u32 val;
7261
7262 /* Check if there is any driver already loaded */
7263 val = REG_RD(bp, MISC_REG_UNPREPARED);
7264 if (val == 0x1) {
7265 /* Check if it is the UNDI driver
7266 * UNDI driver initializes CID offset for normal bell to 0x7
7267 */
4a37fb66 7268 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7269 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7270 if (val == 0x7) {
7271 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7272 /* save our func */
34f80b04 7273 int func = BP_FUNC(bp);
da5a662a
VZ
7274 u32 swap_en;
7275 u32 swap_val;
34f80b04 7276
b4661739
EG
7277 /* clear the UNDI indication */
7278 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7279
34f80b04
EG
7280 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7281
7282 /* try unload UNDI on port 0 */
7283 bp->func = 0;
da5a662a
VZ
7284 bp->fw_seq =
7285 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7286 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7287 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7288
7289 /* if UNDI is loaded on the other port */
7290 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7291
da5a662a
VZ
7292 /* send "DONE" for previous unload */
7293 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7294
7295 /* unload UNDI on port 1 */
34f80b04 7296 bp->func = 1;
da5a662a
VZ
7297 bp->fw_seq =
7298 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7299 DRV_MSG_SEQ_NUMBER_MASK);
7300 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7301
7302 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7303 }
7304
b4661739
EG
7305 /* now it's safe to release the lock */
7306 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7307
f1ef27ef 7308 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7309
7310 /* close input traffic and wait for it */
7311 /* Do not rcv packets to BRB */
7312 REG_WR(bp,
7313 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7314 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7315 /* Do not direct rcv packets that are not for MCP to
7316 * the BRB */
7317 REG_WR(bp,
7318 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7319 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7320 /* clear AEU */
7321 REG_WR(bp,
7322 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7323 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7324 msleep(10);
7325
7326 /* save NIG port swap info */
7327 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7328 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7329 /* reset device */
7330 REG_WR(bp,
7331 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7332 0xd3ffffff);
34f80b04
EG
7333 REG_WR(bp,
7334 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7335 0x1403);
da5a662a
VZ
7336 /* take the NIG out of reset and restore swap values */
7337 REG_WR(bp,
7338 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7339 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7340 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7341 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7342
7343 /* send unload done to the MCP */
7344 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7345
7346 /* restore our func and fw_seq */
7347 bp->func = func;
7348 bp->fw_seq =
7349 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7350 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7351
7352 } else
7353 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7354 }
7355}
7356
7357static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7358{
7359 u32 val, val2, val3, val4, id;
72ce58c3 7360 u16 pmc;
34f80b04
EG
7361
7362 /* Get the chip revision id and number. */
7363 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7364 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7365 id = ((val & 0xffff) << 16);
7366 val = REG_RD(bp, MISC_REG_CHIP_REV);
7367 id |= ((val & 0xf) << 12);
7368 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7369 id |= ((val & 0xff) << 4);
5a40e08e 7370 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7371 id |= (val & 0xf);
7372 bp->common.chip_id = id;
7373 bp->link_params.chip_id = bp->common.chip_id;
7374 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7375
7376 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7377 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7378 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7379 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7380 bp->common.flash_size, bp->common.flash_size);
7381
7382 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7383 bp->link_params.shmem_base = bp->common.shmem_base;
7384 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7385
7386 if (!bp->common.shmem_base ||
7387 (bp->common.shmem_base < 0xA0000) ||
7388 (bp->common.shmem_base >= 0xC0000)) {
7389 BNX2X_DEV_INFO("MCP not active\n");
7390 bp->flags |= NO_MCP_FLAG;
7391 return;
7392 }
7393
7394 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7395 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7396 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7397 BNX2X_ERR("BAD MCP validity signature\n");
7398
7399 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7400 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7401
7402 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7403 bp->common.hw_config, bp->common.board);
7404
7405 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7406 SHARED_HW_CFG_LED_MODE_MASK) >>
7407 SHARED_HW_CFG_LED_MODE_SHIFT);
7408
7409 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7410 bp->common.bc_ver = val;
7411 BNX2X_DEV_INFO("bc_ver %X\n", val);
7412 if (val < BNX2X_BC_VER) {
7413 /* for now only warn
7414 * later we might need to enforce this */
7415 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7416 " please upgrade BC\n", BNX2X_BC_VER, val);
7417 }
72ce58c3
EG
7418
7419 if (BP_E1HVN(bp) == 0) {
7420 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7421 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7422 } else {
7423 /* no WOL capability for E1HVN != 0 */
7424 bp->flags |= NO_WOL_FLAG;
7425 }
7426 BNX2X_DEV_INFO("%sWoL capable\n",
7427 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7428
7429 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7430 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7431 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7432 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7433
7434 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7435 val, val2, val3, val4);
7436}
7437
7438static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7439 u32 switch_cfg)
a2fbb9ea 7440{
34f80b04 7441 int port = BP_PORT(bp);
a2fbb9ea
ET
7442 u32 ext_phy_type;
7443
a2fbb9ea
ET
7444 switch (switch_cfg) {
7445 case SWITCH_CFG_1G:
7446 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7447
c18487ee
YR
7448 ext_phy_type =
7449 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7450 switch (ext_phy_type) {
7451 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7452 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7453 ext_phy_type);
7454
34f80b04
EG
7455 bp->port.supported |= (SUPPORTED_10baseT_Half |
7456 SUPPORTED_10baseT_Full |
7457 SUPPORTED_100baseT_Half |
7458 SUPPORTED_100baseT_Full |
7459 SUPPORTED_1000baseT_Full |
7460 SUPPORTED_2500baseX_Full |
7461 SUPPORTED_TP |
7462 SUPPORTED_FIBRE |
7463 SUPPORTED_Autoneg |
7464 SUPPORTED_Pause |
7465 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7466 break;
7467
7468 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7469 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7470 ext_phy_type);
7471
34f80b04
EG
7472 bp->port.supported |= (SUPPORTED_10baseT_Half |
7473 SUPPORTED_10baseT_Full |
7474 SUPPORTED_100baseT_Half |
7475 SUPPORTED_100baseT_Full |
7476 SUPPORTED_1000baseT_Full |
7477 SUPPORTED_TP |
7478 SUPPORTED_FIBRE |
7479 SUPPORTED_Autoneg |
7480 SUPPORTED_Pause |
7481 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7482 break;
7483
7484 default:
7485 BNX2X_ERR("NVRAM config error. "
7486 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7487 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7488 return;
7489 }
7490
34f80b04
EG
7491 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7492 port*0x10);
7493 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7494 break;
7495
7496 case SWITCH_CFG_10G:
7497 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7498
c18487ee
YR
7499 ext_phy_type =
7500 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7501 switch (ext_phy_type) {
7502 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7503 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7504 ext_phy_type);
7505
34f80b04
EG
7506 bp->port.supported |= (SUPPORTED_10baseT_Half |
7507 SUPPORTED_10baseT_Full |
7508 SUPPORTED_100baseT_Half |
7509 SUPPORTED_100baseT_Full |
7510 SUPPORTED_1000baseT_Full |
7511 SUPPORTED_2500baseX_Full |
7512 SUPPORTED_10000baseT_Full |
7513 SUPPORTED_TP |
7514 SUPPORTED_FIBRE |
7515 SUPPORTED_Autoneg |
7516 SUPPORTED_Pause |
7517 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7518 break;
7519
7520 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7521 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7522 ext_phy_type);
f1410647 7523
34f80b04
EG
7524 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7525 SUPPORTED_FIBRE |
7526 SUPPORTED_Pause |
7527 SUPPORTED_Asym_Pause);
f1410647
ET
7528 break;
7529
a2fbb9ea 7530 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7531 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7532 ext_phy_type);
7533
34f80b04
EG
7534 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7535 SUPPORTED_1000baseT_Full |
7536 SUPPORTED_FIBRE |
7537 SUPPORTED_Pause |
7538 SUPPORTED_Asym_Pause);
f1410647
ET
7539 break;
7540
7541 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7542 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7543 ext_phy_type);
7544
34f80b04
EG
7545 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7546 SUPPORTED_1000baseT_Full |
7547 SUPPORTED_FIBRE |
7548 SUPPORTED_Autoneg |
7549 SUPPORTED_Pause |
7550 SUPPORTED_Asym_Pause);
f1410647
ET
7551 break;
7552
c18487ee
YR
7553 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7554 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7555 ext_phy_type);
7556
34f80b04
EG
7557 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7558 SUPPORTED_2500baseX_Full |
7559 SUPPORTED_1000baseT_Full |
7560 SUPPORTED_FIBRE |
7561 SUPPORTED_Autoneg |
7562 SUPPORTED_Pause |
7563 SUPPORTED_Asym_Pause);
c18487ee
YR
7564 break;
7565
f1410647
ET
7566 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7567 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7568 ext_phy_type);
7569
34f80b04
EG
7570 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7571 SUPPORTED_TP |
7572 SUPPORTED_Autoneg |
7573 SUPPORTED_Pause |
7574 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7575 break;
7576
c18487ee
YR
7577 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7578 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7579 bp->link_params.ext_phy_config);
7580 break;
7581
a2fbb9ea
ET
7582 default:
7583 BNX2X_ERR("NVRAM config error. "
7584 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7585 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7586 return;
7587 }
7588
34f80b04
EG
7589 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7590 port*0x18);
7591 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7592
a2fbb9ea
ET
7593 break;
7594
7595 default:
7596 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7597 bp->port.link_config);
a2fbb9ea
ET
7598 return;
7599 }
34f80b04 7600 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7601
7602 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7603 if (!(bp->link_params.speed_cap_mask &
7604 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7605 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7606
c18487ee
YR
7607 if (!(bp->link_params.speed_cap_mask &
7608 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7609 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7610
c18487ee
YR
7611 if (!(bp->link_params.speed_cap_mask &
7612 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7613 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7614
c18487ee
YR
7615 if (!(bp->link_params.speed_cap_mask &
7616 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7617 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7618
c18487ee
YR
7619 if (!(bp->link_params.speed_cap_mask &
7620 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7621 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7622 SUPPORTED_1000baseT_Full);
a2fbb9ea 7623
c18487ee
YR
7624 if (!(bp->link_params.speed_cap_mask &
7625 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7626 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7627
c18487ee
YR
7628 if (!(bp->link_params.speed_cap_mask &
7629 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7630 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7631
34f80b04 7632 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7633}
7634
34f80b04 7635static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7636{
c18487ee 7637 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7638
34f80b04 7639 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7640 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7641 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7642 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7643 bp->port.advertising = bp->port.supported;
a2fbb9ea 7644 } else {
c18487ee
YR
7645 u32 ext_phy_type =
7646 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7647
7648 if ((ext_phy_type ==
7649 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7650 (ext_phy_type ==
7651 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7652 /* force 10G, no AN */
c18487ee 7653 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7654 bp->port.advertising =
a2fbb9ea
ET
7655 (ADVERTISED_10000baseT_Full |
7656 ADVERTISED_FIBRE);
7657 break;
7658 }
7659 BNX2X_ERR("NVRAM config error. "
7660 "Invalid link_config 0x%x"
7661 " Autoneg not supported\n",
34f80b04 7662 bp->port.link_config);
a2fbb9ea
ET
7663 return;
7664 }
7665 break;
7666
7667 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7668 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7669 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7670 bp->port.advertising = (ADVERTISED_10baseT_Full |
7671 ADVERTISED_TP);
a2fbb9ea
ET
7672 } else {
7673 BNX2X_ERR("NVRAM config error. "
7674 "Invalid link_config 0x%x"
7675 " speed_cap_mask 0x%x\n",
34f80b04 7676 bp->port.link_config,
c18487ee 7677 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7678 return;
7679 }
7680 break;
7681
7682 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7683 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7684 bp->link_params.req_line_speed = SPEED_10;
7685 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7686 bp->port.advertising = (ADVERTISED_10baseT_Half |
7687 ADVERTISED_TP);
a2fbb9ea
ET
7688 } else {
7689 BNX2X_ERR("NVRAM config error. "
7690 "Invalid link_config 0x%x"
7691 " speed_cap_mask 0x%x\n",
34f80b04 7692 bp->port.link_config,
c18487ee 7693 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7694 return;
7695 }
7696 break;
7697
7698 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7699 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7700 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7701 bp->port.advertising = (ADVERTISED_100baseT_Full |
7702 ADVERTISED_TP);
a2fbb9ea
ET
7703 } else {
7704 BNX2X_ERR("NVRAM config error. "
7705 "Invalid link_config 0x%x"
7706 " speed_cap_mask 0x%x\n",
34f80b04 7707 bp->port.link_config,
c18487ee 7708 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7709 return;
7710 }
7711 break;
7712
7713 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7714 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7715 bp->link_params.req_line_speed = SPEED_100;
7716 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7717 bp->port.advertising = (ADVERTISED_100baseT_Half |
7718 ADVERTISED_TP);
a2fbb9ea
ET
7719 } else {
7720 BNX2X_ERR("NVRAM config error. "
7721 "Invalid link_config 0x%x"
7722 " speed_cap_mask 0x%x\n",
34f80b04 7723 bp->port.link_config,
c18487ee 7724 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7725 return;
7726 }
7727 break;
7728
7729 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7730 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7731 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7732 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7733 ADVERTISED_TP);
a2fbb9ea
ET
7734 } else {
7735 BNX2X_ERR("NVRAM config error. "
7736 "Invalid link_config 0x%x"
7737 " speed_cap_mask 0x%x\n",
34f80b04 7738 bp->port.link_config,
c18487ee 7739 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7740 return;
7741 }
7742 break;
7743
7744 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7745 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7746 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7747 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7748 ADVERTISED_TP);
a2fbb9ea
ET
7749 } else {
7750 BNX2X_ERR("NVRAM config error. "
7751 "Invalid link_config 0x%x"
7752 " speed_cap_mask 0x%x\n",
34f80b04 7753 bp->port.link_config,
c18487ee 7754 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7755 return;
7756 }
7757 break;
7758
7759 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7760 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7761 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7762 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7763 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7764 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7765 ADVERTISED_FIBRE);
a2fbb9ea
ET
7766 } else {
7767 BNX2X_ERR("NVRAM config error. "
7768 "Invalid link_config 0x%x"
7769 " speed_cap_mask 0x%x\n",
34f80b04 7770 bp->port.link_config,
c18487ee 7771 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7772 return;
7773 }
7774 break;
7775
7776 default:
7777 BNX2X_ERR("NVRAM config error. "
7778 "BAD link speed link_config 0x%x\n",
34f80b04 7779 bp->port.link_config);
c18487ee 7780 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7781 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7782 break;
7783 }
a2fbb9ea 7784
34f80b04
EG
7785 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7786 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7787 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7788 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7789 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7790
c18487ee 7791 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7792 " advertising 0x%x\n",
c18487ee
YR
7793 bp->link_params.req_line_speed,
7794 bp->link_params.req_duplex,
34f80b04 7795 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7796}
7797
34f80b04 7798static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7799{
34f80b04
EG
7800 int port = BP_PORT(bp);
7801 u32 val, val2;
a2fbb9ea 7802
c18487ee 7803 bp->link_params.bp = bp;
34f80b04 7804 bp->link_params.port = port;
c18487ee 7805
c18487ee 7806 bp->link_params.serdes_config =
f1410647 7807 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7808 bp->link_params.lane_config =
a2fbb9ea 7809 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7810 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7811 SHMEM_RD(bp,
7812 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7813 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7814 SHMEM_RD(bp,
7815 dev_info.port_hw_config[port].speed_capability_mask);
7816
34f80b04 7817 bp->port.link_config =
a2fbb9ea
ET
7818 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7819
34f80b04
EG
7820 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7821 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7822 " link_config 0x%08x\n",
c18487ee
YR
7823 bp->link_params.serdes_config,
7824 bp->link_params.lane_config,
7825 bp->link_params.ext_phy_config,
34f80b04 7826 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7827
34f80b04 7828 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7829 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7830 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7831
7832 bnx2x_link_settings_requested(bp);
7833
7834 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7835 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7836 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7837 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7838 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7839 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7840 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7841 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7842 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7843 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7844}
7845
7846static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7847{
7848 int func = BP_FUNC(bp);
7849 u32 val, val2;
7850 int rc = 0;
a2fbb9ea 7851
34f80b04 7852 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7853
34f80b04
EG
7854 bp->e1hov = 0;
7855 bp->e1hmf = 0;
7856 if (CHIP_IS_E1H(bp)) {
7857 bp->mf_config =
7858 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7859
3196a88a
EG
7860 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7861 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7862 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7863
34f80b04
EG
7864 bp->e1hov = val;
7865 bp->e1hmf = 1;
7866 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7867 "(0x%04x)\n",
7868 func, bp->e1hov, bp->e1hov);
7869 } else {
7870 BNX2X_DEV_INFO("Single function mode\n");
7871 if (BP_E1HVN(bp)) {
7872 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7873 " aborting\n", func);
7874 rc = -EPERM;
7875 }
7876 }
7877 }
a2fbb9ea 7878
34f80b04
EG
7879 if (!BP_NOMCP(bp)) {
7880 bnx2x_get_port_hwinfo(bp);
7881
7882 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7883 DRV_MSG_SEQ_NUMBER_MASK);
7884 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7885 }
7886
7887 if (IS_E1HMF(bp)) {
7888 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7889 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7890 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7891 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7892 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7893 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7894 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7895 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7896 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7897 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7898 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7899 ETH_ALEN);
7900 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7901 ETH_ALEN);
a2fbb9ea 7902 }
34f80b04
EG
7903
7904 return rc;
a2fbb9ea
ET
7905 }
7906
34f80b04
EG
7907 if (BP_NOMCP(bp)) {
7908 /* only supposed to happen on emulation/FPGA */
33471629 7909 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7910 random_ether_addr(bp->dev->dev_addr);
7911 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7912 }
a2fbb9ea 7913
34f80b04
EG
7914 return rc;
7915}
7916
7917static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7918{
7919 int func = BP_FUNC(bp);
7920 int rc;
7921
da5a662a
VZ
7922 /* Disable interrupt handling until HW is initialized */
7923 atomic_set(&bp->intr_sem, 1);
7924
34f80b04 7925 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7926
1cf167f2 7927 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7928 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7929
7930 rc = bnx2x_get_hwinfo(bp);
7931
7932 /* need to reset chip if undi was active */
7933 if (!BP_NOMCP(bp))
7934 bnx2x_undi_unload(bp);
7935
7936 if (CHIP_REV_IS_FPGA(bp))
7937 printk(KERN_ERR PFX "FPGA detected\n");
7938
7939 if (BP_NOMCP(bp) && (func == 0))
7940 printk(KERN_ERR PFX
7941 "MCP disabled, must load devices in order!\n");
7942
555f6c78 7943 /* Set multi queue mode */
8badd27a
EG
7944 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7945 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 7946 printk(KERN_ERR PFX
8badd27a 7947 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
7948 multi_mode = ETH_RSS_MODE_DISABLED;
7949 }
7950 bp->multi_mode = multi_mode;
7951
7952
7a9b2557
VZ
7953 /* Set TPA flags */
7954 if (disable_tpa) {
7955 bp->flags &= ~TPA_ENABLE_FLAG;
7956 bp->dev->features &= ~NETIF_F_LRO;
7957 } else {
7958 bp->flags |= TPA_ENABLE_FLAG;
7959 bp->dev->features |= NETIF_F_LRO;
7960 }
7961
7962
34f80b04
EG
7963 bp->tx_ring_size = MAX_TX_AVAIL;
7964 bp->rx_ring_size = MAX_RX_AVAIL;
7965
7966 bp->rx_csum = 1;
7967 bp->rx_offset = 0;
7968
7969 bp->tx_ticks = 50;
7970 bp->rx_ticks = 25;
7971
34f80b04
EG
7972 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7973 bp->current_interval = (poll ? poll : bp->timer_interval);
7974
7975 init_timer(&bp->timer);
7976 bp->timer.expires = jiffies + bp->current_interval;
7977 bp->timer.data = (unsigned long) bp;
7978 bp->timer.function = bnx2x_timer;
7979
7980 return rc;
a2fbb9ea
ET
7981}
7982
7983/*
7984 * ethtool service functions
7985 */
7986
7987/* All ethtool functions called with rtnl_lock */
7988
7989static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7990{
7991 struct bnx2x *bp = netdev_priv(dev);
7992
34f80b04
EG
7993 cmd->supported = bp->port.supported;
7994 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7995
7996 if (netif_carrier_ok(dev)) {
c18487ee
YR
7997 cmd->speed = bp->link_vars.line_speed;
7998 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7999 } else {
c18487ee
YR
8000 cmd->speed = bp->link_params.req_line_speed;
8001 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8002 }
34f80b04
EG
8003 if (IS_E1HMF(bp)) {
8004 u16 vn_max_rate;
8005
8006 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8007 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8008 if (vn_max_rate < cmd->speed)
8009 cmd->speed = vn_max_rate;
8010 }
a2fbb9ea 8011
c18487ee
YR
8012 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8013 u32 ext_phy_type =
8014 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8015
8016 switch (ext_phy_type) {
8017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8018 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8020 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8021 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
8022 cmd->port = PORT_FIBRE;
8023 break;
8024
8025 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8026 cmd->port = PORT_TP;
8027 break;
8028
c18487ee
YR
8029 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8030 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8031 bp->link_params.ext_phy_config);
8032 break;
8033
f1410647
ET
8034 default:
8035 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8036 bp->link_params.ext_phy_config);
8037 break;
f1410647
ET
8038 }
8039 } else
a2fbb9ea 8040 cmd->port = PORT_TP;
a2fbb9ea 8041
34f80b04 8042 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8043 cmd->transceiver = XCVR_INTERNAL;
8044
c18487ee 8045 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8046 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8047 else
a2fbb9ea 8048 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8049
8050 cmd->maxtxpkt = 0;
8051 cmd->maxrxpkt = 0;
8052
8053 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8054 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8055 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8056 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8057 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8058 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8059 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8060
8061 return 0;
8062}
8063
8064static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8065{
8066 struct bnx2x *bp = netdev_priv(dev);
8067 u32 advertising;
8068
34f80b04
EG
8069 if (IS_E1HMF(bp))
8070 return 0;
8071
a2fbb9ea
ET
8072 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8073 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8074 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8075 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8076 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8077 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8078 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8079
a2fbb9ea 8080 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8081 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8082 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8083 return -EINVAL;
f1410647 8084 }
a2fbb9ea
ET
8085
8086 /* advertise the requested speed and duplex if supported */
34f80b04 8087 cmd->advertising &= bp->port.supported;
a2fbb9ea 8088
c18487ee
YR
8089 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8090 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8091 bp->port.advertising |= (ADVERTISED_Autoneg |
8092 cmd->advertising);
a2fbb9ea
ET
8093
8094 } else { /* forced speed */
8095 /* advertise the requested speed and duplex if supported */
8096 switch (cmd->speed) {
8097 case SPEED_10:
8098 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8099 if (!(bp->port.supported &
f1410647
ET
8100 SUPPORTED_10baseT_Full)) {
8101 DP(NETIF_MSG_LINK,
8102 "10M full not supported\n");
a2fbb9ea 8103 return -EINVAL;
f1410647 8104 }
a2fbb9ea
ET
8105
8106 advertising = (ADVERTISED_10baseT_Full |
8107 ADVERTISED_TP);
8108 } else {
34f80b04 8109 if (!(bp->port.supported &
f1410647
ET
8110 SUPPORTED_10baseT_Half)) {
8111 DP(NETIF_MSG_LINK,
8112 "10M half not supported\n");
a2fbb9ea 8113 return -EINVAL;
f1410647 8114 }
a2fbb9ea
ET
8115
8116 advertising = (ADVERTISED_10baseT_Half |
8117 ADVERTISED_TP);
8118 }
8119 break;
8120
8121 case SPEED_100:
8122 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8123 if (!(bp->port.supported &
f1410647
ET
8124 SUPPORTED_100baseT_Full)) {
8125 DP(NETIF_MSG_LINK,
8126 "100M full not supported\n");
a2fbb9ea 8127 return -EINVAL;
f1410647 8128 }
a2fbb9ea
ET
8129
8130 advertising = (ADVERTISED_100baseT_Full |
8131 ADVERTISED_TP);
8132 } else {
34f80b04 8133 if (!(bp->port.supported &
f1410647
ET
8134 SUPPORTED_100baseT_Half)) {
8135 DP(NETIF_MSG_LINK,
8136 "100M half not supported\n");
a2fbb9ea 8137 return -EINVAL;
f1410647 8138 }
a2fbb9ea
ET
8139
8140 advertising = (ADVERTISED_100baseT_Half |
8141 ADVERTISED_TP);
8142 }
8143 break;
8144
8145 case SPEED_1000:
f1410647
ET
8146 if (cmd->duplex != DUPLEX_FULL) {
8147 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8148 return -EINVAL;
f1410647 8149 }
a2fbb9ea 8150
34f80b04 8151 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8152 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8153 return -EINVAL;
f1410647 8154 }
a2fbb9ea
ET
8155
8156 advertising = (ADVERTISED_1000baseT_Full |
8157 ADVERTISED_TP);
8158 break;
8159
8160 case SPEED_2500:
f1410647
ET
8161 if (cmd->duplex != DUPLEX_FULL) {
8162 DP(NETIF_MSG_LINK,
8163 "2.5G half not supported\n");
a2fbb9ea 8164 return -EINVAL;
f1410647 8165 }
a2fbb9ea 8166
34f80b04 8167 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8168 DP(NETIF_MSG_LINK,
8169 "2.5G full not supported\n");
a2fbb9ea 8170 return -EINVAL;
f1410647 8171 }
a2fbb9ea 8172
f1410647 8173 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8174 ADVERTISED_TP);
8175 break;
8176
8177 case SPEED_10000:
f1410647
ET
8178 if (cmd->duplex != DUPLEX_FULL) {
8179 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8180 return -EINVAL;
f1410647 8181 }
a2fbb9ea 8182
34f80b04 8183 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8184 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8185 return -EINVAL;
f1410647 8186 }
a2fbb9ea
ET
8187
8188 advertising = (ADVERTISED_10000baseT_Full |
8189 ADVERTISED_FIBRE);
8190 break;
8191
8192 default:
f1410647 8193 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8194 return -EINVAL;
8195 }
8196
c18487ee
YR
8197 bp->link_params.req_line_speed = cmd->speed;
8198 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8199 bp->port.advertising = advertising;
a2fbb9ea
ET
8200 }
8201
c18487ee 8202 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8203 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8204 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8205 bp->port.advertising);
a2fbb9ea 8206
34f80b04 8207 if (netif_running(dev)) {
bb2a0f7a 8208 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8209 bnx2x_link_set(bp);
8210 }
a2fbb9ea
ET
8211
8212 return 0;
8213}
8214
c18487ee
YR
8215#define PHY_FW_VER_LEN 10
8216
a2fbb9ea
ET
8217static void bnx2x_get_drvinfo(struct net_device *dev,
8218 struct ethtool_drvinfo *info)
8219{
8220 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8221 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8222
8223 strcpy(info->driver, DRV_MODULE_NAME);
8224 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8225
8226 phy_fw_ver[0] = '\0';
34f80b04 8227 if (bp->port.pmf) {
4a37fb66 8228 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8229 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8230 (bp->state != BNX2X_STATE_CLOSED),
8231 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8232 bnx2x_release_phy_lock(bp);
34f80b04 8233 }
c18487ee 8234
f0e53a84
EG
8235 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8236 (bp->common.bc_ver & 0xff0000) >> 16,
8237 (bp->common.bc_ver & 0xff00) >> 8,
8238 (bp->common.bc_ver & 0xff),
8239 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8240 strcpy(info->bus_info, pci_name(bp->pdev));
8241 info->n_stats = BNX2X_NUM_STATS;
8242 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8243 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8244 info->regdump_len = 0;
8245}
8246
8247static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8248{
8249 struct bnx2x *bp = netdev_priv(dev);
8250
8251 if (bp->flags & NO_WOL_FLAG) {
8252 wol->supported = 0;
8253 wol->wolopts = 0;
8254 } else {
8255 wol->supported = WAKE_MAGIC;
8256 if (bp->wol)
8257 wol->wolopts = WAKE_MAGIC;
8258 else
8259 wol->wolopts = 0;
8260 }
8261 memset(&wol->sopass, 0, sizeof(wol->sopass));
8262}
8263
8264static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8265{
8266 struct bnx2x *bp = netdev_priv(dev);
8267
8268 if (wol->wolopts & ~WAKE_MAGIC)
8269 return -EINVAL;
8270
8271 if (wol->wolopts & WAKE_MAGIC) {
8272 if (bp->flags & NO_WOL_FLAG)
8273 return -EINVAL;
8274
8275 bp->wol = 1;
34f80b04 8276 } else
a2fbb9ea 8277 bp->wol = 0;
34f80b04 8278
a2fbb9ea
ET
8279 return 0;
8280}
8281
8282static u32 bnx2x_get_msglevel(struct net_device *dev)
8283{
8284 struct bnx2x *bp = netdev_priv(dev);
8285
8286 return bp->msglevel;
8287}
8288
8289static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8290{
8291 struct bnx2x *bp = netdev_priv(dev);
8292
8293 if (capable(CAP_NET_ADMIN))
8294 bp->msglevel = level;
8295}
8296
8297static int bnx2x_nway_reset(struct net_device *dev)
8298{
8299 struct bnx2x *bp = netdev_priv(dev);
8300
34f80b04
EG
8301 if (!bp->port.pmf)
8302 return 0;
a2fbb9ea 8303
34f80b04 8304 if (netif_running(dev)) {
bb2a0f7a 8305 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8306 bnx2x_link_set(bp);
8307 }
a2fbb9ea
ET
8308
8309 return 0;
8310}
8311
8312static int bnx2x_get_eeprom_len(struct net_device *dev)
8313{
8314 struct bnx2x *bp = netdev_priv(dev);
8315
34f80b04 8316 return bp->common.flash_size;
a2fbb9ea
ET
8317}
8318
8319static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8320{
34f80b04 8321 int port = BP_PORT(bp);
a2fbb9ea
ET
8322 int count, i;
8323 u32 val = 0;
8324
8325 /* adjust timeout for emulation/FPGA */
8326 count = NVRAM_TIMEOUT_COUNT;
8327 if (CHIP_REV_IS_SLOW(bp))
8328 count *= 100;
8329
8330 /* request access to nvram interface */
8331 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8332 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8333
8334 for (i = 0; i < count*10; i++) {
8335 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8336 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8337 break;
8338
8339 udelay(5);
8340 }
8341
8342 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8343 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8344 return -EBUSY;
8345 }
8346
8347 return 0;
8348}
8349
8350static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8351{
34f80b04 8352 int port = BP_PORT(bp);
a2fbb9ea
ET
8353 int count, i;
8354 u32 val = 0;
8355
8356 /* adjust timeout for emulation/FPGA */
8357 count = NVRAM_TIMEOUT_COUNT;
8358 if (CHIP_REV_IS_SLOW(bp))
8359 count *= 100;
8360
8361 /* relinquish nvram interface */
8362 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8363 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8364
8365 for (i = 0; i < count*10; i++) {
8366 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8367 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8368 break;
8369
8370 udelay(5);
8371 }
8372
8373 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8374 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8375 return -EBUSY;
8376 }
8377
8378 return 0;
8379}
8380
8381static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8382{
8383 u32 val;
8384
8385 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8386
8387 /* enable both bits, even on read */
8388 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8389 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8390 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8391}
8392
8393static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8394{
8395 u32 val;
8396
8397 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8398
8399 /* disable both bits, even after read */
8400 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8401 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8402 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8403}
8404
8405static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8406 u32 cmd_flags)
8407{
f1410647 8408 int count, i, rc;
a2fbb9ea
ET
8409 u32 val;
8410
8411 /* build the command word */
8412 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8413
8414 /* need to clear DONE bit separately */
8415 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8416
8417 /* address of the NVRAM to read from */
8418 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8419 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8420
8421 /* issue a read command */
8422 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8423
8424 /* adjust timeout for emulation/FPGA */
8425 count = NVRAM_TIMEOUT_COUNT;
8426 if (CHIP_REV_IS_SLOW(bp))
8427 count *= 100;
8428
8429 /* wait for completion */
8430 *ret_val = 0;
8431 rc = -EBUSY;
8432 for (i = 0; i < count; i++) {
8433 udelay(5);
8434 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8435
8436 if (val & MCPR_NVM_COMMAND_DONE) {
8437 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8438 /* we read nvram data in cpu order
8439 * but ethtool sees it as an array of bytes
8440 * converting to big-endian will do the work */
8441 val = cpu_to_be32(val);
8442 *ret_val = val;
8443 rc = 0;
8444 break;
8445 }
8446 }
8447
8448 return rc;
8449}
8450
8451static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8452 int buf_size)
8453{
8454 int rc;
8455 u32 cmd_flags;
8456 u32 val;
8457
8458 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8459 DP(BNX2X_MSG_NVM,
c14423fe 8460 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8461 offset, buf_size);
8462 return -EINVAL;
8463 }
8464
34f80b04
EG
8465 if (offset + buf_size > bp->common.flash_size) {
8466 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8467 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8468 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8469 return -EINVAL;
8470 }
8471
8472 /* request access to nvram interface */
8473 rc = bnx2x_acquire_nvram_lock(bp);
8474 if (rc)
8475 return rc;
8476
8477 /* enable access to nvram interface */
8478 bnx2x_enable_nvram_access(bp);
8479
8480 /* read the first word(s) */
8481 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8482 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8483 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8484 memcpy(ret_buf, &val, 4);
8485
8486 /* advance to the next dword */
8487 offset += sizeof(u32);
8488 ret_buf += sizeof(u32);
8489 buf_size -= sizeof(u32);
8490 cmd_flags = 0;
8491 }
8492
8493 if (rc == 0) {
8494 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8495 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8496 memcpy(ret_buf, &val, 4);
8497 }
8498
8499 /* disable access to nvram interface */
8500 bnx2x_disable_nvram_access(bp);
8501 bnx2x_release_nvram_lock(bp);
8502
8503 return rc;
8504}
8505
8506static int bnx2x_get_eeprom(struct net_device *dev,
8507 struct ethtool_eeprom *eeprom, u8 *eebuf)
8508{
8509 struct bnx2x *bp = netdev_priv(dev);
8510 int rc;
8511
2add3acb
EG
8512 if (!netif_running(dev))
8513 return -EAGAIN;
8514
34f80b04 8515 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8516 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8517 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8518 eeprom->len, eeprom->len);
8519
8520 /* parameters already validated in ethtool_get_eeprom */
8521
8522 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8523
8524 return rc;
8525}
8526
8527static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8528 u32 cmd_flags)
8529{
f1410647 8530 int count, i, rc;
a2fbb9ea
ET
8531
8532 /* build the command word */
8533 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8534
8535 /* need to clear DONE bit separately */
8536 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8537
8538 /* write the data */
8539 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8540
8541 /* address of the NVRAM to write to */
8542 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8543 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8544
8545 /* issue the write command */
8546 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8547
8548 /* adjust timeout for emulation/FPGA */
8549 count = NVRAM_TIMEOUT_COUNT;
8550 if (CHIP_REV_IS_SLOW(bp))
8551 count *= 100;
8552
8553 /* wait for completion */
8554 rc = -EBUSY;
8555 for (i = 0; i < count; i++) {
8556 udelay(5);
8557 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8558 if (val & MCPR_NVM_COMMAND_DONE) {
8559 rc = 0;
8560 break;
8561 }
8562 }
8563
8564 return rc;
8565}
8566
f1410647 8567#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8568
8569static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8570 int buf_size)
8571{
8572 int rc;
8573 u32 cmd_flags;
8574 u32 align_offset;
8575 u32 val;
8576
34f80b04
EG
8577 if (offset + buf_size > bp->common.flash_size) {
8578 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8579 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8580 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8581 return -EINVAL;
8582 }
8583
8584 /* request access to nvram interface */
8585 rc = bnx2x_acquire_nvram_lock(bp);
8586 if (rc)
8587 return rc;
8588
8589 /* enable access to nvram interface */
8590 bnx2x_enable_nvram_access(bp);
8591
8592 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8593 align_offset = (offset & ~0x03);
8594 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8595
8596 if (rc == 0) {
8597 val &= ~(0xff << BYTE_OFFSET(offset));
8598 val |= (*data_buf << BYTE_OFFSET(offset));
8599
8600 /* nvram data is returned as an array of bytes
8601 * convert it back to cpu order */
8602 val = be32_to_cpu(val);
8603
a2fbb9ea
ET
8604 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8605 cmd_flags);
8606 }
8607
8608 /* disable access to nvram interface */
8609 bnx2x_disable_nvram_access(bp);
8610 bnx2x_release_nvram_lock(bp);
8611
8612 return rc;
8613}
8614
8615static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8616 int buf_size)
8617{
8618 int rc;
8619 u32 cmd_flags;
8620 u32 val;
8621 u32 written_so_far;
8622
34f80b04 8623 if (buf_size == 1) /* ethtool */
a2fbb9ea 8624 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8625
8626 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8627 DP(BNX2X_MSG_NVM,
c14423fe 8628 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8629 offset, buf_size);
8630 return -EINVAL;
8631 }
8632
34f80b04
EG
8633 if (offset + buf_size > bp->common.flash_size) {
8634 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8635 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8636 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8637 return -EINVAL;
8638 }
8639
8640 /* request access to nvram interface */
8641 rc = bnx2x_acquire_nvram_lock(bp);
8642 if (rc)
8643 return rc;
8644
8645 /* enable access to nvram interface */
8646 bnx2x_enable_nvram_access(bp);
8647
8648 written_so_far = 0;
8649 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8650 while ((written_so_far < buf_size) && (rc == 0)) {
8651 if (written_so_far == (buf_size - sizeof(u32)))
8652 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8653 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8654 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8655 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8656 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8657
8658 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8659
8660 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8661
8662 /* advance to the next dword */
8663 offset += sizeof(u32);
8664 data_buf += sizeof(u32);
8665 written_so_far += sizeof(u32);
8666 cmd_flags = 0;
8667 }
8668
8669 /* disable access to nvram interface */
8670 bnx2x_disable_nvram_access(bp);
8671 bnx2x_release_nvram_lock(bp);
8672
8673 return rc;
8674}
8675
8676static int bnx2x_set_eeprom(struct net_device *dev,
8677 struct ethtool_eeprom *eeprom, u8 *eebuf)
8678{
8679 struct bnx2x *bp = netdev_priv(dev);
8680 int rc;
8681
9f4c9583
EG
8682 if (!netif_running(dev))
8683 return -EAGAIN;
8684
34f80b04 8685 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8686 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8687 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8688 eeprom->len, eeprom->len);
8689
8690 /* parameters already validated in ethtool_set_eeprom */
8691
c18487ee 8692 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8693 if (eeprom->magic == 0x00504859)
8694 if (bp->port.pmf) {
8695
4a37fb66 8696 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8697 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8698 bp->link_params.ext_phy_config,
8699 (bp->state != BNX2X_STATE_CLOSED),
8700 eebuf, eeprom->len);
bb2a0f7a
YG
8701 if ((bp->state == BNX2X_STATE_OPEN) ||
8702 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8703 rc |= bnx2x_link_reset(&bp->link_params,
8704 &bp->link_vars);
8705 rc |= bnx2x_phy_init(&bp->link_params,
8706 &bp->link_vars);
bb2a0f7a 8707 }
4a37fb66 8708 bnx2x_release_phy_lock(bp);
34f80b04
EG
8709
8710 } else /* Only the PMF can access the PHY */
8711 return -EINVAL;
8712 else
c18487ee 8713 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8714
8715 return rc;
8716}
8717
8718static int bnx2x_get_coalesce(struct net_device *dev,
8719 struct ethtool_coalesce *coal)
8720{
8721 struct bnx2x *bp = netdev_priv(dev);
8722
8723 memset(coal, 0, sizeof(struct ethtool_coalesce));
8724
8725 coal->rx_coalesce_usecs = bp->rx_ticks;
8726 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8727
8728 return 0;
8729}
8730
8731static int bnx2x_set_coalesce(struct net_device *dev,
8732 struct ethtool_coalesce *coal)
8733{
8734 struct bnx2x *bp = netdev_priv(dev);
8735
8736 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8737 if (bp->rx_ticks > 3000)
8738 bp->rx_ticks = 3000;
8739
8740 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8741 if (bp->tx_ticks > 0x3000)
8742 bp->tx_ticks = 0x3000;
8743
34f80b04 8744 if (netif_running(dev))
a2fbb9ea
ET
8745 bnx2x_update_coalesce(bp);
8746
8747 return 0;
8748}
8749
8750static void bnx2x_get_ringparam(struct net_device *dev,
8751 struct ethtool_ringparam *ering)
8752{
8753 struct bnx2x *bp = netdev_priv(dev);
8754
8755 ering->rx_max_pending = MAX_RX_AVAIL;
8756 ering->rx_mini_max_pending = 0;
8757 ering->rx_jumbo_max_pending = 0;
8758
8759 ering->rx_pending = bp->rx_ring_size;
8760 ering->rx_mini_pending = 0;
8761 ering->rx_jumbo_pending = 0;
8762
8763 ering->tx_max_pending = MAX_TX_AVAIL;
8764 ering->tx_pending = bp->tx_ring_size;
8765}
8766
8767static int bnx2x_set_ringparam(struct net_device *dev,
8768 struct ethtool_ringparam *ering)
8769{
8770 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8771 int rc = 0;
a2fbb9ea
ET
8772
8773 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8774 (ering->tx_pending > MAX_TX_AVAIL) ||
8775 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8776 return -EINVAL;
8777
8778 bp->rx_ring_size = ering->rx_pending;
8779 bp->tx_ring_size = ering->tx_pending;
8780
34f80b04
EG
8781 if (netif_running(dev)) {
8782 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8783 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8784 }
8785
34f80b04 8786 return rc;
a2fbb9ea
ET
8787}
8788
8789static void bnx2x_get_pauseparam(struct net_device *dev,
8790 struct ethtool_pauseparam *epause)
8791{
8792 struct bnx2x *bp = netdev_priv(dev);
8793
c0700f90 8794 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8795 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8796
c0700f90
DM
8797 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8798 BNX2X_FLOW_CTRL_RX);
8799 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8800 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8801
8802 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8803 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8804 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8805}
8806
8807static int bnx2x_set_pauseparam(struct net_device *dev,
8808 struct ethtool_pauseparam *epause)
8809{
8810 struct bnx2x *bp = netdev_priv(dev);
8811
34f80b04
EG
8812 if (IS_E1HMF(bp))
8813 return 0;
8814
a2fbb9ea
ET
8815 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8816 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8817 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8818
c0700f90 8819 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8820
f1410647 8821 if (epause->rx_pause)
c0700f90 8822 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8823
f1410647 8824 if (epause->tx_pause)
c0700f90 8825 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8826
c0700f90
DM
8827 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8828 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8829
c18487ee 8830 if (epause->autoneg) {
34f80b04 8831 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8832 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8833 return -EINVAL;
8834 }
a2fbb9ea 8835
c18487ee 8836 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8837 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8838 }
a2fbb9ea 8839
c18487ee
YR
8840 DP(NETIF_MSG_LINK,
8841 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8842
8843 if (netif_running(dev)) {
bb2a0f7a 8844 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8845 bnx2x_link_set(bp);
8846 }
a2fbb9ea
ET
8847
8848 return 0;
8849}
8850
df0f2343
VZ
8851static int bnx2x_set_flags(struct net_device *dev, u32 data)
8852{
8853 struct bnx2x *bp = netdev_priv(dev);
8854 int changed = 0;
8855 int rc = 0;
8856
8857 /* TPA requires Rx CSUM offloading */
8858 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8859 if (!(dev->features & NETIF_F_LRO)) {
8860 dev->features |= NETIF_F_LRO;
8861 bp->flags |= TPA_ENABLE_FLAG;
8862 changed = 1;
8863 }
8864
8865 } else if (dev->features & NETIF_F_LRO) {
8866 dev->features &= ~NETIF_F_LRO;
8867 bp->flags &= ~TPA_ENABLE_FLAG;
8868 changed = 1;
8869 }
8870
8871 if (changed && netif_running(dev)) {
8872 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8873 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8874 }
8875
8876 return rc;
8877}
8878
a2fbb9ea
ET
8879static u32 bnx2x_get_rx_csum(struct net_device *dev)
8880{
8881 struct bnx2x *bp = netdev_priv(dev);
8882
8883 return bp->rx_csum;
8884}
8885
8886static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8887{
8888 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8889 int rc = 0;
a2fbb9ea
ET
8890
8891 bp->rx_csum = data;
df0f2343
VZ
8892
8893 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8894 TPA'ed packets will be discarded due to wrong TCP CSUM */
8895 if (!data) {
8896 u32 flags = ethtool_op_get_flags(dev);
8897
8898 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8899 }
8900
8901 return rc;
a2fbb9ea
ET
8902}
8903
8904static int bnx2x_set_tso(struct net_device *dev, u32 data)
8905{
755735eb 8906 if (data) {
a2fbb9ea 8907 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8908 dev->features |= NETIF_F_TSO6;
8909 } else {
a2fbb9ea 8910 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8911 dev->features &= ~NETIF_F_TSO6;
8912 }
8913
a2fbb9ea
ET
8914 return 0;
8915}
8916
f3c87cdd 8917static const struct {
a2fbb9ea
ET
8918 char string[ETH_GSTRING_LEN];
8919} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8920 { "register_test (offline)" },
8921 { "memory_test (offline)" },
8922 { "loopback_test (offline)" },
8923 { "nvram_test (online)" },
8924 { "interrupt_test (online)" },
8925 { "link_test (online)" },
d3d4f495 8926 { "idle check (online)" }
a2fbb9ea
ET
8927};
8928
8929static int bnx2x_self_test_count(struct net_device *dev)
8930{
8931 return BNX2X_NUM_TESTS;
8932}
8933
f3c87cdd
YG
8934static int bnx2x_test_registers(struct bnx2x *bp)
8935{
8936 int idx, i, rc = -ENODEV;
8937 u32 wr_val = 0;
9dabc424 8938 int port = BP_PORT(bp);
f3c87cdd
YG
8939 static const struct {
8940 u32 offset0;
8941 u32 offset1;
8942 u32 mask;
8943 } reg_tbl[] = {
8944/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8945 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8946 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8947 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8948 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8949 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8950 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8951 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8952 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8953 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8954/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8955 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8956 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8957 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8958 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8959 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8960 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8961 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8962 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8963 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8964/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8965 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8966 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8967 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8968 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8969 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8970 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8971 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8972 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8973 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8974/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8975 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8976 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8977 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8978 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8979 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8980 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8981 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8982
8983 { 0xffffffff, 0, 0x00000000 }
8984 };
8985
8986 if (!netif_running(bp->dev))
8987 return rc;
8988
8989 /* Repeat the test twice:
8990 First by writing 0x00000000, second by writing 0xffffffff */
8991 for (idx = 0; idx < 2; idx++) {
8992
8993 switch (idx) {
8994 case 0:
8995 wr_val = 0;
8996 break;
8997 case 1:
8998 wr_val = 0xffffffff;
8999 break;
9000 }
9001
9002 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9003 u32 offset, mask, save_val, val;
f3c87cdd
YG
9004
9005 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9006 mask = reg_tbl[i].mask;
9007
9008 save_val = REG_RD(bp, offset);
9009
9010 REG_WR(bp, offset, wr_val);
9011 val = REG_RD(bp, offset);
9012
9013 /* Restore the original register's value */
9014 REG_WR(bp, offset, save_val);
9015
9016 /* verify that value is as expected value */
9017 if ((val & mask) != (wr_val & mask))
9018 goto test_reg_exit;
9019 }
9020 }
9021
9022 rc = 0;
9023
9024test_reg_exit:
9025 return rc;
9026}
9027
9028static int bnx2x_test_memory(struct bnx2x *bp)
9029{
9030 int i, j, rc = -ENODEV;
9031 u32 val;
9032 static const struct {
9033 u32 offset;
9034 int size;
9035 } mem_tbl[] = {
9036 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9037 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9038 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9039 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9040 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9041 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9042 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9043
9044 { 0xffffffff, 0 }
9045 };
9046 static const struct {
9047 char *name;
9048 u32 offset;
9dabc424
YG
9049 u32 e1_mask;
9050 u32 e1h_mask;
f3c87cdd 9051 } prty_tbl[] = {
9dabc424
YG
9052 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9053 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9054 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9055 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9056 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9057 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9058
9059 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9060 };
9061
9062 if (!netif_running(bp->dev))
9063 return rc;
9064
9065 /* Go through all the memories */
9066 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9067 for (j = 0; j < mem_tbl[i].size; j++)
9068 REG_RD(bp, mem_tbl[i].offset + j*4);
9069
9070 /* Check the parity status */
9071 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9072 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9073 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9074 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9075 DP(NETIF_MSG_HW,
9076 "%s is 0x%x\n", prty_tbl[i].name, val);
9077 goto test_mem_exit;
9078 }
9079 }
9080
9081 rc = 0;
9082
9083test_mem_exit:
9084 return rc;
9085}
9086
f3c87cdd
YG
9087static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9088{
9089 int cnt = 1000;
9090
9091 if (link_up)
9092 while (bnx2x_link_test(bp) && cnt--)
9093 msleep(10);
9094}
9095
9096static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9097{
9098 unsigned int pkt_size, num_pkts, i;
9099 struct sk_buff *skb;
9100 unsigned char *packet;
9101 struct bnx2x_fastpath *fp = &bp->fp[0];
9102 u16 tx_start_idx, tx_idx;
9103 u16 rx_start_idx, rx_idx;
9104 u16 pkt_prod;
9105 struct sw_tx_bd *tx_buf;
9106 struct eth_tx_bd *tx_bd;
9107 dma_addr_t mapping;
9108 union eth_rx_cqe *cqe;
9109 u8 cqe_fp_flags;
9110 struct sw_rx_bd *rx_buf;
9111 u16 len;
9112 int rc = -ENODEV;
9113
9114 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9115 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9116 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
9117
9118 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 9119 u16 cnt = 1000;
f3c87cdd 9120 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 9121 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 9122 /* wait until link state is restored */
3910c8ae
EG
9123 if (link_up)
9124 while (cnt-- && bnx2x_test_link(&bp->link_params,
9125 &bp->link_vars))
9126 msleep(10);
f3c87cdd
YG
9127 } else
9128 return -EINVAL;
9129
9130 pkt_size = 1514;
9131 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9132 if (!skb) {
9133 rc = -ENOMEM;
9134 goto test_loopback_exit;
9135 }
9136 packet = skb_put(skb, pkt_size);
9137 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9138 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9139 for (i = ETH_HLEN; i < pkt_size; i++)
9140 packet[i] = (unsigned char) (i & 0xff);
9141
9142 num_pkts = 0;
9143 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9144 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9145
9146 pkt_prod = fp->tx_pkt_prod++;
9147 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9148 tx_buf->first_bd = fp->tx_bd_prod;
9149 tx_buf->skb = skb;
9150
9151 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9152 mapping = pci_map_single(bp->pdev, skb->data,
9153 skb_headlen(skb), PCI_DMA_TODEVICE);
9154 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9155 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9156 tx_bd->nbd = cpu_to_le16(1);
9157 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9158 tx_bd->vlan = cpu_to_le16(pkt_prod);
9159 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9160 ETH_TX_BD_FLAGS_END_BD);
9161 tx_bd->general_data = ((UNICAST_ADDRESS <<
9162 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9163
58f4c4cf
EG
9164 wmb();
9165
f3c87cdd
YG
9166 fp->hw_tx_prods->bds_prod =
9167 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9168 mb(); /* FW restriction: must not reorder writing nbd and packets */
9169 fp->hw_tx_prods->packets_prod =
9170 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9171 DOORBELL(bp, FP_IDX(fp), 0);
9172
9173 mmiowb();
9174
9175 num_pkts++;
9176 fp->tx_bd_prod++;
9177 bp->dev->trans_start = jiffies;
9178
9179 udelay(100);
9180
9181 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9182 if (tx_idx != tx_start_idx + num_pkts)
9183 goto test_loopback_exit;
9184
9185 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9186 if (rx_idx != rx_start_idx + num_pkts)
9187 goto test_loopback_exit;
9188
9189 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9190 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9191 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9192 goto test_loopback_rx_exit;
9193
9194 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9195 if (len != pkt_size)
9196 goto test_loopback_rx_exit;
9197
9198 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9199 skb = rx_buf->skb;
9200 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9201 for (i = ETH_HLEN; i < pkt_size; i++)
9202 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9203 goto test_loopback_rx_exit;
9204
9205 rc = 0;
9206
9207test_loopback_rx_exit:
f3c87cdd
YG
9208
9209 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9210 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9211 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9212 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9213
9214 /* Update producers */
9215 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9216 fp->rx_sge_prod);
f3c87cdd
YG
9217
9218test_loopback_exit:
9219 bp->link_params.loopback_mode = LOOPBACK_NONE;
9220
9221 return rc;
9222}
9223
9224static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9225{
9226 int rc = 0;
9227
9228 if (!netif_running(bp->dev))
9229 return BNX2X_LOOPBACK_FAILED;
9230
f8ef6e44 9231 bnx2x_netif_stop(bp, 1);
3910c8ae 9232 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
9233
9234 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9235 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9236 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9237 }
9238
9239 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9240 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9241 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9242 }
9243
3910c8ae 9244 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9245 bnx2x_netif_start(bp);
9246
9247 return rc;
9248}
9249
9250#define CRC32_RESIDUAL 0xdebb20e3
9251
9252static int bnx2x_test_nvram(struct bnx2x *bp)
9253{
9254 static const struct {
9255 int offset;
9256 int size;
9257 } nvram_tbl[] = {
9258 { 0, 0x14 }, /* bootstrap */
9259 { 0x14, 0xec }, /* dir */
9260 { 0x100, 0x350 }, /* manuf_info */
9261 { 0x450, 0xf0 }, /* feature_info */
9262 { 0x640, 0x64 }, /* upgrade_key_info */
9263 { 0x6a4, 0x64 },
9264 { 0x708, 0x70 }, /* manuf_key_info */
9265 { 0x778, 0x70 },
9266 { 0, 0 }
9267 };
9268 u32 buf[0x350 / 4];
9269 u8 *data = (u8 *)buf;
9270 int i, rc;
9271 u32 magic, csum;
9272
9273 rc = bnx2x_nvram_read(bp, 0, data, 4);
9274 if (rc) {
9275 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9276 goto test_nvram_exit;
9277 }
9278
9279 magic = be32_to_cpu(buf[0]);
9280 if (magic != 0x669955aa) {
9281 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9282 rc = -ENODEV;
9283 goto test_nvram_exit;
9284 }
9285
9286 for (i = 0; nvram_tbl[i].size; i++) {
9287
9288 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9289 nvram_tbl[i].size);
9290 if (rc) {
9291 DP(NETIF_MSG_PROBE,
9292 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9293 goto test_nvram_exit;
9294 }
9295
9296 csum = ether_crc_le(nvram_tbl[i].size, data);
9297 if (csum != CRC32_RESIDUAL) {
9298 DP(NETIF_MSG_PROBE,
9299 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9300 rc = -ENODEV;
9301 goto test_nvram_exit;
9302 }
9303 }
9304
9305test_nvram_exit:
9306 return rc;
9307}
9308
9309static int bnx2x_test_intr(struct bnx2x *bp)
9310{
9311 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9312 int i, rc;
9313
9314 if (!netif_running(bp->dev))
9315 return -ENODEV;
9316
8d9c5f34 9317 config->hdr.length = 0;
af246401
EG
9318 if (CHIP_IS_E1(bp))
9319 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9320 else
9321 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9322 config->hdr.client_id = BP_CL_ID(bp);
9323 config->hdr.reserved1 = 0;
9324
9325 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9326 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9327 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9328 if (rc == 0) {
9329 bp->set_mac_pending++;
9330 for (i = 0; i < 10; i++) {
9331 if (!bp->set_mac_pending)
9332 break;
9333 msleep_interruptible(10);
9334 }
9335 if (i == 10)
9336 rc = -ENODEV;
9337 }
9338
9339 return rc;
9340}
9341
a2fbb9ea
ET
9342static void bnx2x_self_test(struct net_device *dev,
9343 struct ethtool_test *etest, u64 *buf)
9344{
9345 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9346
9347 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9348
f3c87cdd 9349 if (!netif_running(dev))
a2fbb9ea 9350 return;
a2fbb9ea 9351
33471629 9352 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9353 if (IS_E1HMF(bp))
9354 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9355
9356 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9357 u8 link_up;
9358
9359 link_up = bp->link_vars.link_up;
9360 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9361 bnx2x_nic_load(bp, LOAD_DIAG);
9362 /* wait until link state is restored */
9363 bnx2x_wait_for_link(bp, link_up);
9364
9365 if (bnx2x_test_registers(bp) != 0) {
9366 buf[0] = 1;
9367 etest->flags |= ETH_TEST_FL_FAILED;
9368 }
9369 if (bnx2x_test_memory(bp) != 0) {
9370 buf[1] = 1;
9371 etest->flags |= ETH_TEST_FL_FAILED;
9372 }
9373 buf[2] = bnx2x_test_loopback(bp, link_up);
9374 if (buf[2] != 0)
9375 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9376
f3c87cdd
YG
9377 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9378 bnx2x_nic_load(bp, LOAD_NORMAL);
9379 /* wait until link state is restored */
9380 bnx2x_wait_for_link(bp, link_up);
9381 }
9382 if (bnx2x_test_nvram(bp) != 0) {
9383 buf[3] = 1;
a2fbb9ea
ET
9384 etest->flags |= ETH_TEST_FL_FAILED;
9385 }
f3c87cdd
YG
9386 if (bnx2x_test_intr(bp) != 0) {
9387 buf[4] = 1;
9388 etest->flags |= ETH_TEST_FL_FAILED;
9389 }
9390 if (bp->port.pmf)
9391 if (bnx2x_link_test(bp) != 0) {
9392 buf[5] = 1;
9393 etest->flags |= ETH_TEST_FL_FAILED;
9394 }
f3c87cdd
YG
9395
9396#ifdef BNX2X_EXTRA_DEBUG
9397 bnx2x_panic_dump(bp);
9398#endif
a2fbb9ea
ET
9399}
9400
de832a55
EG
9401static const struct {
9402 long offset;
9403 int size;
9404 u8 string[ETH_GSTRING_LEN];
9405} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9406/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9407 { Q_STATS_OFFSET32(error_bytes_received_hi),
9408 8, "[%d]: rx_error_bytes" },
9409 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9410 8, "[%d]: rx_ucast_packets" },
9411 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9412 8, "[%d]: rx_mcast_packets" },
9413 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9414 8, "[%d]: rx_bcast_packets" },
9415 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9416 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9417 4, "[%d]: rx_phy_ip_err_discards"},
9418 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9419 4, "[%d]: rx_skb_alloc_discard" },
9420 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9421
9422/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9423 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9424 8, "[%d]: tx_packets" }
9425};
9426
bb2a0f7a
YG
9427static const struct {
9428 long offset;
9429 int size;
9430 u32 flags;
66e855f3
YG
9431#define STATS_FLAGS_PORT 1
9432#define STATS_FLAGS_FUNC 2
de832a55 9433#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9434 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9435} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9436/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9437 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9438 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9439 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9440 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9441 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9442 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9443 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9444 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9445 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9446 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9447 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9448 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9449 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9450 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9451 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9452 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9453 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9454/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9455 8, STATS_FLAGS_PORT, "rx_fragments" },
9456 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9457 8, STATS_FLAGS_PORT, "rx_jabbers" },
9458 { STATS_OFFSET32(no_buff_discard_hi),
9459 8, STATS_FLAGS_BOTH, "rx_discards" },
9460 { STATS_OFFSET32(mac_filter_discard),
9461 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9462 { STATS_OFFSET32(xxoverflow_discard),
9463 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9464 { STATS_OFFSET32(brb_drop_hi),
9465 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9466 { STATS_OFFSET32(brb_truncate_hi),
9467 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9468 { STATS_OFFSET32(pause_frames_received_hi),
9469 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9470 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9471 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9472 { STATS_OFFSET32(nig_timer_max),
9473 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9474/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9475 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9476 { STATS_OFFSET32(rx_skb_alloc_failed),
9477 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9478 { STATS_OFFSET32(hw_csum_err),
9479 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9480
9481 { STATS_OFFSET32(total_bytes_transmitted_hi),
9482 8, STATS_FLAGS_BOTH, "tx_bytes" },
9483 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9484 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9485 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9486 8, STATS_FLAGS_BOTH, "tx_packets" },
9487 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9488 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9489 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9490 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9491 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9492 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9493 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9494 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9495/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9496 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9497 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9498 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9499 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9500 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9501 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9502 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9503 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9504 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9505 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9506 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9507 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9508 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9509 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9510 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9511 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9512 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9513 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9514 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9515/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9516 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9517 { STATS_OFFSET32(pause_frames_sent_hi),
9518 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9519};
9520
de832a55
EG
9521#define IS_PORT_STAT(i) \
9522 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9523#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9524#define IS_E1HMF_MODE_STAT(bp) \
9525 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9526
a2fbb9ea
ET
9527static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9528{
bb2a0f7a 9529 struct bnx2x *bp = netdev_priv(dev);
de832a55 9530 int i, j, k;
bb2a0f7a 9531
a2fbb9ea
ET
9532 switch (stringset) {
9533 case ETH_SS_STATS:
de832a55
EG
9534 if (is_multi(bp)) {
9535 k = 0;
9536 for_each_queue(bp, i) {
9537 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9538 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9539 bnx2x_q_stats_arr[j].string, i);
9540 k += BNX2X_NUM_Q_STATS;
9541 }
9542 if (IS_E1HMF_MODE_STAT(bp))
9543 break;
9544 for (j = 0; j < BNX2X_NUM_STATS; j++)
9545 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9546 bnx2x_stats_arr[j].string);
9547 } else {
9548 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9549 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9550 continue;
9551 strcpy(buf + j*ETH_GSTRING_LEN,
9552 bnx2x_stats_arr[i].string);
9553 j++;
9554 }
bb2a0f7a 9555 }
a2fbb9ea
ET
9556 break;
9557
9558 case ETH_SS_TEST:
9559 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9560 break;
9561 }
9562}
9563
9564static int bnx2x_get_stats_count(struct net_device *dev)
9565{
bb2a0f7a 9566 struct bnx2x *bp = netdev_priv(dev);
de832a55 9567 int i, num_stats;
bb2a0f7a 9568
de832a55
EG
9569 if (is_multi(bp)) {
9570 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9571 if (!IS_E1HMF_MODE_STAT(bp))
9572 num_stats += BNX2X_NUM_STATS;
9573 } else {
9574 if (IS_E1HMF_MODE_STAT(bp)) {
9575 num_stats = 0;
9576 for (i = 0; i < BNX2X_NUM_STATS; i++)
9577 if (IS_FUNC_STAT(i))
9578 num_stats++;
9579 } else
9580 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9581 }
de832a55 9582
bb2a0f7a 9583 return num_stats;
a2fbb9ea
ET
9584}
9585
9586static void bnx2x_get_ethtool_stats(struct net_device *dev,
9587 struct ethtool_stats *stats, u64 *buf)
9588{
9589 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9590 u32 *hw_stats, *offset;
9591 int i, j, k;
bb2a0f7a 9592
de832a55
EG
9593 if (is_multi(bp)) {
9594 k = 0;
9595 for_each_queue(bp, i) {
9596 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9597 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9598 if (bnx2x_q_stats_arr[j].size == 0) {
9599 /* skip this counter */
9600 buf[k + j] = 0;
9601 continue;
9602 }
9603 offset = (hw_stats +
9604 bnx2x_q_stats_arr[j].offset);
9605 if (bnx2x_q_stats_arr[j].size == 4) {
9606 /* 4-byte counter */
9607 buf[k + j] = (u64) *offset;
9608 continue;
9609 }
9610 /* 8-byte counter */
9611 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9612 }
9613 k += BNX2X_NUM_Q_STATS;
9614 }
9615 if (IS_E1HMF_MODE_STAT(bp))
9616 return;
9617 hw_stats = (u32 *)&bp->eth_stats;
9618 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9619 if (bnx2x_stats_arr[j].size == 0) {
9620 /* skip this counter */
9621 buf[k + j] = 0;
9622 continue;
9623 }
9624 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9625 if (bnx2x_stats_arr[j].size == 4) {
9626 /* 4-byte counter */
9627 buf[k + j] = (u64) *offset;
9628 continue;
9629 }
9630 /* 8-byte counter */
9631 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9632 }
de832a55
EG
9633 } else {
9634 hw_stats = (u32 *)&bp->eth_stats;
9635 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9636 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9637 continue;
9638 if (bnx2x_stats_arr[i].size == 0) {
9639 /* skip this counter */
9640 buf[j] = 0;
9641 j++;
9642 continue;
9643 }
9644 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9645 if (bnx2x_stats_arr[i].size == 4) {
9646 /* 4-byte counter */
9647 buf[j] = (u64) *offset;
9648 j++;
9649 continue;
9650 }
9651 /* 8-byte counter */
9652 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9653 j++;
a2fbb9ea 9654 }
a2fbb9ea
ET
9655 }
9656}
9657
9658static int bnx2x_phys_id(struct net_device *dev, u32 data)
9659{
9660 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9661 int port = BP_PORT(bp);
a2fbb9ea
ET
9662 int i;
9663
34f80b04
EG
9664 if (!netif_running(dev))
9665 return 0;
9666
9667 if (!bp->port.pmf)
9668 return 0;
9669
a2fbb9ea
ET
9670 if (data == 0)
9671 data = 2;
9672
9673 for (i = 0; i < (data * 2); i++) {
c18487ee 9674 if ((i % 2) == 0)
34f80b04 9675 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9676 bp->link_params.hw_led_mode,
9677 bp->link_params.chip_id);
9678 else
34f80b04 9679 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9680 bp->link_params.hw_led_mode,
9681 bp->link_params.chip_id);
9682
a2fbb9ea
ET
9683 msleep_interruptible(500);
9684 if (signal_pending(current))
9685 break;
9686 }
9687
c18487ee 9688 if (bp->link_vars.link_up)
34f80b04 9689 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9690 bp->link_vars.line_speed,
9691 bp->link_params.hw_led_mode,
9692 bp->link_params.chip_id);
a2fbb9ea
ET
9693
9694 return 0;
9695}
9696
9697static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9698 .get_settings = bnx2x_get_settings,
9699 .set_settings = bnx2x_set_settings,
9700 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9701 .get_wol = bnx2x_get_wol,
9702 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9703 .get_msglevel = bnx2x_get_msglevel,
9704 .set_msglevel = bnx2x_set_msglevel,
9705 .nway_reset = bnx2x_nway_reset,
9706 .get_link = ethtool_op_get_link,
9707 .get_eeprom_len = bnx2x_get_eeprom_len,
9708 .get_eeprom = bnx2x_get_eeprom,
9709 .set_eeprom = bnx2x_set_eeprom,
9710 .get_coalesce = bnx2x_get_coalesce,
9711 .set_coalesce = bnx2x_set_coalesce,
9712 .get_ringparam = bnx2x_get_ringparam,
9713 .set_ringparam = bnx2x_set_ringparam,
9714 .get_pauseparam = bnx2x_get_pauseparam,
9715 .set_pauseparam = bnx2x_set_pauseparam,
9716 .get_rx_csum = bnx2x_get_rx_csum,
9717 .set_rx_csum = bnx2x_set_rx_csum,
9718 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9719 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9720 .set_flags = bnx2x_set_flags,
9721 .get_flags = ethtool_op_get_flags,
9722 .get_sg = ethtool_op_get_sg,
9723 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9724 .get_tso = ethtool_op_get_tso,
9725 .set_tso = bnx2x_set_tso,
9726 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9727 .self_test = bnx2x_self_test,
9728 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9729 .phys_id = bnx2x_phys_id,
9730 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9731 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9732};
9733
9734/* end of ethtool_ops */
9735
9736/****************************************************************************
9737* General service functions
9738****************************************************************************/
9739
9740static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9741{
9742 u16 pmcsr;
9743
9744 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9745
9746 switch (state) {
9747 case PCI_D0:
34f80b04 9748 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9749 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9750 PCI_PM_CTRL_PME_STATUS));
9751
9752 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9753 /* delay required during transition out of D3hot */
a2fbb9ea 9754 msleep(20);
34f80b04 9755 break;
a2fbb9ea 9756
34f80b04
EG
9757 case PCI_D3hot:
9758 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9759 pmcsr |= 3;
a2fbb9ea 9760
34f80b04
EG
9761 if (bp->wol)
9762 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9763
34f80b04
EG
9764 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9765 pmcsr);
a2fbb9ea 9766
34f80b04
EG
9767 /* No more memory access after this point until
9768 * device is brought back to D0.
9769 */
9770 break;
9771
9772 default:
9773 return -EINVAL;
9774 }
9775 return 0;
a2fbb9ea
ET
9776}
9777
237907c1
EG
9778static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9779{
9780 u16 rx_cons_sb;
9781
9782 /* Tell compiler that status block fields can change */
9783 barrier();
9784 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9785 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9786 rx_cons_sb++;
9787 return (fp->rx_comp_cons != rx_cons_sb);
9788}
9789
34f80b04
EG
9790/*
9791 * net_device service functions
9792 */
9793
a2fbb9ea
ET
9794static int bnx2x_poll(struct napi_struct *napi, int budget)
9795{
9796 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9797 napi);
9798 struct bnx2x *bp = fp->bp;
9799 int work_done = 0;
9800
9801#ifdef BNX2X_STOP_ON_ERROR
9802 if (unlikely(bp->panic))
34f80b04 9803 goto poll_panic;
a2fbb9ea
ET
9804#endif
9805
9806 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9807 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9808 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9809
9810 bnx2x_update_fpsb_idx(fp);
9811
237907c1 9812 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9813 bnx2x_tx_int(fp, budget);
9814
237907c1 9815 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9816 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9817 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9818
9819 /* must not complete if we consumed full budget */
da5a662a 9820 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9821
9822#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9823poll_panic:
a2fbb9ea 9824#endif
288379f0 9825 napi_complete(napi);
a2fbb9ea 9826
34f80b04 9827 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9828 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9829 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9830 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9831 }
a2fbb9ea
ET
9832 return work_done;
9833}
9834
755735eb
EG
9835
9836/* we split the first BD into headers and data BDs
33471629 9837 * to ease the pain of our fellow microcode engineers
755735eb
EG
9838 * we use one mapping for both BDs
9839 * So far this has only been observed to happen
9840 * in Other Operating Systems(TM)
9841 */
9842static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9843 struct bnx2x_fastpath *fp,
9844 struct eth_tx_bd **tx_bd, u16 hlen,
9845 u16 bd_prod, int nbd)
9846{
9847 struct eth_tx_bd *h_tx_bd = *tx_bd;
9848 struct eth_tx_bd *d_tx_bd;
9849 dma_addr_t mapping;
9850 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9851
9852 /* first fix first BD */
9853 h_tx_bd->nbd = cpu_to_le16(nbd);
9854 h_tx_bd->nbytes = cpu_to_le16(hlen);
9855
9856 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9857 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9858 h_tx_bd->addr_lo, h_tx_bd->nbd);
9859
9860 /* now get a new data BD
9861 * (after the pbd) and fill it */
9862 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9863 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9864
9865 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9866 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9867
9868 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9869 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9870 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9871 d_tx_bd->vlan = 0;
9872 /* this marks the BD as one that has no individual mapping
9873 * the FW ignores this flag in a BD not marked start
9874 */
9875 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9876 DP(NETIF_MSG_TX_QUEUED,
9877 "TSO split data size is %d (%x:%x)\n",
9878 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9879
9880 /* update tx_bd for marking the last BD flag */
9881 *tx_bd = d_tx_bd;
9882
9883 return bd_prod;
9884}
9885
9886static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9887{
9888 if (fix > 0)
9889 csum = (u16) ~csum_fold(csum_sub(csum,
9890 csum_partial(t_header - fix, fix, 0)));
9891
9892 else if (fix < 0)
9893 csum = (u16) ~csum_fold(csum_add(csum,
9894 csum_partial(t_header, -fix, 0)));
9895
9896 return swab16(csum);
9897}
9898
9899static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9900{
9901 u32 rc;
9902
9903 if (skb->ip_summed != CHECKSUM_PARTIAL)
9904 rc = XMIT_PLAIN;
9905
9906 else {
9907 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9908 rc = XMIT_CSUM_V6;
9909 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9910 rc |= XMIT_CSUM_TCP;
9911
9912 } else {
9913 rc = XMIT_CSUM_V4;
9914 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9915 rc |= XMIT_CSUM_TCP;
9916 }
9917 }
9918
9919 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9920 rc |= XMIT_GSO_V4;
9921
9922 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9923 rc |= XMIT_GSO_V6;
9924
9925 return rc;
9926}
9927
632da4d6 9928#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
9929/* check if packet requires linearization (packet is too fragmented) */
9930static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9931 u32 xmit_type)
9932{
9933 int to_copy = 0;
9934 int hlen = 0;
9935 int first_bd_sz = 0;
9936
9937 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9938 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9939
9940 if (xmit_type & XMIT_GSO) {
9941 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9942 /* Check if LSO packet needs to be copied:
9943 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9944 int wnd_size = MAX_FETCH_BD - 3;
33471629 9945 /* Number of windows to check */
755735eb
EG
9946 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9947 int wnd_idx = 0;
9948 int frag_idx = 0;
9949 u32 wnd_sum = 0;
9950
9951 /* Headers length */
9952 hlen = (int)(skb_transport_header(skb) - skb->data) +
9953 tcp_hdrlen(skb);
9954
9955 /* Amount of data (w/o headers) on linear part of SKB*/
9956 first_bd_sz = skb_headlen(skb) - hlen;
9957
9958 wnd_sum = first_bd_sz;
9959
9960 /* Calculate the first sum - it's special */
9961 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9962 wnd_sum +=
9963 skb_shinfo(skb)->frags[frag_idx].size;
9964
9965 /* If there was data on linear skb data - check it */
9966 if (first_bd_sz > 0) {
9967 if (unlikely(wnd_sum < lso_mss)) {
9968 to_copy = 1;
9969 goto exit_lbl;
9970 }
9971
9972 wnd_sum -= first_bd_sz;
9973 }
9974
9975 /* Others are easier: run through the frag list and
9976 check all windows */
9977 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9978 wnd_sum +=
9979 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9980
9981 if (unlikely(wnd_sum < lso_mss)) {
9982 to_copy = 1;
9983 break;
9984 }
9985 wnd_sum -=
9986 skb_shinfo(skb)->frags[wnd_idx].size;
9987 }
9988
9989 } else {
9990 /* in non-LSO too fragmented packet should always
9991 be linearized */
9992 to_copy = 1;
9993 }
9994 }
9995
9996exit_lbl:
9997 if (unlikely(to_copy))
9998 DP(NETIF_MSG_TX_QUEUED,
9999 "Linearization IS REQUIRED for %s packet. "
10000 "num_frags %d hlen %d first_bd_sz %d\n",
10001 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10002 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10003
10004 return to_copy;
10005}
632da4d6 10006#endif
755735eb
EG
10007
10008/* called with netif_tx_lock
a2fbb9ea 10009 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10010 * netif_wake_queue()
a2fbb9ea
ET
10011 */
10012static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10013{
10014 struct bnx2x *bp = netdev_priv(dev);
10015 struct bnx2x_fastpath *fp;
555f6c78 10016 struct netdev_queue *txq;
a2fbb9ea
ET
10017 struct sw_tx_bd *tx_buf;
10018 struct eth_tx_bd *tx_bd;
10019 struct eth_tx_parse_bd *pbd = NULL;
10020 u16 pkt_prod, bd_prod;
755735eb 10021 int nbd, fp_index;
a2fbb9ea 10022 dma_addr_t mapping;
755735eb
EG
10023 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10024 int vlan_off = (bp->e1hov ? 4 : 0);
10025 int i;
10026 u8 hlen = 0;
a2fbb9ea
ET
10027
10028#ifdef BNX2X_STOP_ON_ERROR
10029 if (unlikely(bp->panic))
10030 return NETDEV_TX_BUSY;
10031#endif
10032
555f6c78
EG
10033 fp_index = skb_get_queue_mapping(skb);
10034 txq = netdev_get_tx_queue(dev, fp_index);
10035
a2fbb9ea 10036 fp = &bp->fp[fp_index];
755735eb 10037
231fd58a 10038 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10039 fp->eth_q_stats.driver_xoff++,
555f6c78 10040 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10041 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10042 return NETDEV_TX_BUSY;
10043 }
10044
755735eb
EG
10045 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10046 " gso type %x xmit_type %x\n",
10047 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10048 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10049
632da4d6 10050#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10051 /* First, check if we need to linearize the skb
755735eb
EG
10052 (due to FW restrictions) */
10053 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10054 /* Statistics of linearization */
10055 bp->lin_cnt++;
10056 if (skb_linearize(skb) != 0) {
10057 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10058 "silently dropping this SKB\n");
10059 dev_kfree_skb_any(skb);
da5a662a 10060 return NETDEV_TX_OK;
755735eb
EG
10061 }
10062 }
632da4d6 10063#endif
755735eb 10064
a2fbb9ea 10065 /*
755735eb 10066 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10067 then for TSO or xsum we have a parsing info BD,
755735eb 10068 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10069 (don't forget to mark the last one as last,
10070 and to unmap only AFTER you write to the BD ...)
755735eb 10071 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10072 */
10073
10074 pkt_prod = fp->tx_pkt_prod++;
755735eb 10075 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10076
755735eb 10077 /* get a tx_buf and first BD */
a2fbb9ea
ET
10078 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10079 tx_bd = &fp->tx_desc_ring[bd_prod];
10080
10081 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10082 tx_bd->general_data = (UNICAST_ADDRESS <<
10083 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10084 /* header nbd */
10085 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10086
755735eb
EG
10087 /* remember the first BD of the packet */
10088 tx_buf->first_bd = fp->tx_bd_prod;
10089 tx_buf->skb = skb;
a2fbb9ea
ET
10090
10091 DP(NETIF_MSG_TX_QUEUED,
10092 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10093 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10094
0c6671b0
EG
10095#ifdef BCM_VLAN
10096 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10097 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10098 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10099 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10100 vlan_off += 4;
10101 } else
0c6671b0 10102#endif
755735eb 10103 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10104
755735eb 10105 if (xmit_type) {
755735eb 10106 /* turn on parsing and get a BD */
a2fbb9ea
ET
10107 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10108 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10109
10110 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10111 }
10112
10113 if (xmit_type & XMIT_CSUM) {
10114 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10115
10116 /* for now NS flag is not used in Linux */
755735eb 10117 pbd->global_data = (hlen |
96fc1784 10118 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 10119 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10120
755735eb
EG
10121 pbd->ip_hlen = (skb_transport_header(skb) -
10122 skb_network_header(skb)) / 2;
10123
10124 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10125
755735eb
EG
10126 pbd->total_hlen = cpu_to_le16(hlen);
10127 hlen = hlen*2 - vlan_off;
a2fbb9ea 10128
755735eb
EG
10129 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10130
10131 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10132 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10133 ETH_TX_BD_FLAGS_IP_CSUM;
10134 else
10135 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10136
10137 if (xmit_type & XMIT_CSUM_TCP) {
10138 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10139
10140 } else {
10141 s8 fix = SKB_CS_OFF(skb); /* signed! */
10142
a2fbb9ea 10143 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10144 pbd->cs_offset = fix / 2;
a2fbb9ea 10145
755735eb
EG
10146 DP(NETIF_MSG_TX_QUEUED,
10147 "hlen %d offset %d fix %d csum before fix %x\n",
10148 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10149 SKB_CS(skb));
10150
10151 /* HW bug: fixup the CSUM */
10152 pbd->tcp_pseudo_csum =
10153 bnx2x_csum_fix(skb_transport_header(skb),
10154 SKB_CS(skb), fix);
10155
10156 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10157 pbd->tcp_pseudo_csum);
10158 }
a2fbb9ea
ET
10159 }
10160
10161 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10162 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10163
10164 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10165 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10166 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10167 tx_bd->nbd = cpu_to_le16(nbd);
10168 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10169
10170 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10171 " nbytes %d flags %x vlan %x\n",
10172 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10173 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10174 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10175
755735eb 10176 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10177
10178 DP(NETIF_MSG_TX_QUEUED,
10179 "TSO packet len %d hlen %d total len %d tso size %d\n",
10180 skb->len, hlen, skb_headlen(skb),
10181 skb_shinfo(skb)->gso_size);
10182
10183 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10184
755735eb
EG
10185 if (unlikely(skb_headlen(skb) > hlen))
10186 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10187 bd_prod, ++nbd);
a2fbb9ea
ET
10188
10189 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10190 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10191 pbd->tcp_flags = pbd_tcp_flags(skb);
10192
10193 if (xmit_type & XMIT_GSO_V4) {
10194 pbd->ip_id = swab16(ip_hdr(skb)->id);
10195 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10196 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10197 ip_hdr(skb)->daddr,
10198 0, IPPROTO_TCP, 0));
755735eb
EG
10199
10200 } else
10201 pbd->tcp_pseudo_csum =
10202 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10203 &ipv6_hdr(skb)->daddr,
10204 0, IPPROTO_TCP, 0));
10205
a2fbb9ea
ET
10206 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10207 }
10208
755735eb
EG
10209 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10210 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10211
755735eb
EG
10212 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10213 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10214
755735eb
EG
10215 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10216 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10217
755735eb
EG
10218 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10219 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10220 tx_bd->nbytes = cpu_to_le16(frag->size);
10221 tx_bd->vlan = cpu_to_le16(pkt_prod);
10222 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10223
755735eb
EG
10224 DP(NETIF_MSG_TX_QUEUED,
10225 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10226 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10227 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10228 }
10229
755735eb 10230 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10231 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10232
10233 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10234 tx_bd, tx_bd->bd_flags.as_bitfield);
10235
a2fbb9ea
ET
10236 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10237
755735eb 10238 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10239 * if the packet contains or ends with it
10240 */
10241 if (TX_BD_POFF(bd_prod) < nbd)
10242 nbd++;
10243
10244 if (pbd)
10245 DP(NETIF_MSG_TX_QUEUED,
10246 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10247 " tcp_flags %x xsum %x seq %u hlen %u\n",
10248 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10249 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10250 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10251
755735eb 10252 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10253
58f4c4cf
EG
10254 /*
10255 * Make sure that the BD data is updated before updating the producer
10256 * since FW might read the BD right after the producer is updated.
10257 * This is only applicable for weak-ordered memory model archs such
10258 * as IA-64. The following barrier is also mandatory since FW will
10259 * assumes packets must have BDs.
10260 */
10261 wmb();
10262
96fc1784
ET
10263 fp->hw_tx_prods->bds_prod =
10264 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 10265 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
10266 fp->hw_tx_prods->packets_prod =
10267 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 10268 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
10269
10270 mmiowb();
10271
755735eb 10272 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10273 dev->trans_start = jiffies;
10274
10275 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10276 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10277 if we put Tx into XOFF state. */
10278 smp_mb();
555f6c78 10279 netif_tx_stop_queue(txq);
de832a55 10280 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10281 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10282 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10283 }
10284 fp->tx_pkt++;
10285
10286 return NETDEV_TX_OK;
10287}
10288
bb2a0f7a 10289/* called with rtnl_lock */
a2fbb9ea
ET
10290static int bnx2x_open(struct net_device *dev)
10291{
10292 struct bnx2x *bp = netdev_priv(dev);
10293
6eccabb3
EG
10294 netif_carrier_off(dev);
10295
a2fbb9ea
ET
10296 bnx2x_set_power_state(bp, PCI_D0);
10297
bb2a0f7a 10298 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10299}
10300
bb2a0f7a 10301/* called with rtnl_lock */
a2fbb9ea
ET
10302static int bnx2x_close(struct net_device *dev)
10303{
a2fbb9ea
ET
10304 struct bnx2x *bp = netdev_priv(dev);
10305
10306 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10307 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10308 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10309 if (!CHIP_REV_IS_SLOW(bp))
10310 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10311
10312 return 0;
10313}
10314
34f80b04
EG
10315/* called with netif_tx_lock from set_multicast */
10316static void bnx2x_set_rx_mode(struct net_device *dev)
10317{
10318 struct bnx2x *bp = netdev_priv(dev);
10319 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10320 int port = BP_PORT(bp);
10321
10322 if (bp->state != BNX2X_STATE_OPEN) {
10323 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10324 return;
10325 }
10326
10327 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10328
10329 if (dev->flags & IFF_PROMISC)
10330 rx_mode = BNX2X_RX_MODE_PROMISC;
10331
10332 else if ((dev->flags & IFF_ALLMULTI) ||
10333 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10334 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10335
10336 else { /* some multicasts */
10337 if (CHIP_IS_E1(bp)) {
10338 int i, old, offset;
10339 struct dev_mc_list *mclist;
10340 struct mac_configuration_cmd *config =
10341 bnx2x_sp(bp, mcast_config);
10342
10343 for (i = 0, mclist = dev->mc_list;
10344 mclist && (i < dev->mc_count);
10345 i++, mclist = mclist->next) {
10346
10347 config->config_table[i].
10348 cam_entry.msb_mac_addr =
10349 swab16(*(u16 *)&mclist->dmi_addr[0]);
10350 config->config_table[i].
10351 cam_entry.middle_mac_addr =
10352 swab16(*(u16 *)&mclist->dmi_addr[2]);
10353 config->config_table[i].
10354 cam_entry.lsb_mac_addr =
10355 swab16(*(u16 *)&mclist->dmi_addr[4]);
10356 config->config_table[i].cam_entry.flags =
10357 cpu_to_le16(port);
10358 config->config_table[i].
10359 target_table_entry.flags = 0;
10360 config->config_table[i].
10361 target_table_entry.client_id = 0;
10362 config->config_table[i].
10363 target_table_entry.vlan_id = 0;
10364
10365 DP(NETIF_MSG_IFUP,
10366 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10367 config->config_table[i].
10368 cam_entry.msb_mac_addr,
10369 config->config_table[i].
10370 cam_entry.middle_mac_addr,
10371 config->config_table[i].
10372 cam_entry.lsb_mac_addr);
10373 }
8d9c5f34 10374 old = config->hdr.length;
34f80b04
EG
10375 if (old > i) {
10376 for (; i < old; i++) {
10377 if (CAM_IS_INVALID(config->
10378 config_table[i])) {
af246401 10379 /* already invalidated */
34f80b04
EG
10380 break;
10381 }
10382 /* invalidate */
10383 CAM_INVALIDATE(config->
10384 config_table[i]);
10385 }
10386 }
10387
10388 if (CHIP_REV_IS_SLOW(bp))
10389 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10390 else
10391 offset = BNX2X_MAX_MULTICAST*(1 + port);
10392
8d9c5f34 10393 config->hdr.length = i;
34f80b04 10394 config->hdr.offset = offset;
8d9c5f34 10395 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10396 config->hdr.reserved1 = 0;
10397
10398 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10399 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10400 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10401 0);
10402 } else { /* E1H */
10403 /* Accept one or more multicasts */
10404 struct dev_mc_list *mclist;
10405 u32 mc_filter[MC_HASH_SIZE];
10406 u32 crc, bit, regidx;
10407 int i;
10408
10409 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10410
10411 for (i = 0, mclist = dev->mc_list;
10412 mclist && (i < dev->mc_count);
10413 i++, mclist = mclist->next) {
10414
7c510e4b
JB
10415 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10416 mclist->dmi_addr);
34f80b04
EG
10417
10418 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10419 bit = (crc >> 24) & 0xff;
10420 regidx = bit >> 5;
10421 bit &= 0x1f;
10422 mc_filter[regidx] |= (1 << bit);
10423 }
10424
10425 for (i = 0; i < MC_HASH_SIZE; i++)
10426 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10427 mc_filter[i]);
10428 }
10429 }
10430
10431 bp->rx_mode = rx_mode;
10432 bnx2x_set_storm_rx_mode(bp);
10433}
10434
10435/* called with rtnl_lock */
a2fbb9ea
ET
10436static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10437{
10438 struct sockaddr *addr = p;
10439 struct bnx2x *bp = netdev_priv(dev);
10440
34f80b04 10441 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10442 return -EINVAL;
10443
10444 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10445 if (netif_running(dev)) {
10446 if (CHIP_IS_E1(bp))
3101c2bc 10447 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10448 else
3101c2bc 10449 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10450 }
a2fbb9ea
ET
10451
10452 return 0;
10453}
10454
c18487ee 10455/* called with rtnl_lock */
a2fbb9ea
ET
10456static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10457{
10458 struct mii_ioctl_data *data = if_mii(ifr);
10459 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10460 int port = BP_PORT(bp);
a2fbb9ea
ET
10461 int err;
10462
10463 switch (cmd) {
10464 case SIOCGMIIPHY:
34f80b04 10465 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10466
c14423fe 10467 /* fallthrough */
c18487ee 10468
a2fbb9ea 10469 case SIOCGMIIREG: {
c18487ee 10470 u16 mii_regval;
a2fbb9ea 10471
c18487ee
YR
10472 if (!netif_running(dev))
10473 return -EAGAIN;
a2fbb9ea 10474
34f80b04 10475 mutex_lock(&bp->port.phy_mutex);
3196a88a 10476 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10477 DEFAULT_PHY_DEV_ADDR,
10478 (data->reg_num & 0x1f), &mii_regval);
10479 data->val_out = mii_regval;
34f80b04 10480 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10481 return err;
10482 }
10483
10484 case SIOCSMIIREG:
10485 if (!capable(CAP_NET_ADMIN))
10486 return -EPERM;
10487
c18487ee
YR
10488 if (!netif_running(dev))
10489 return -EAGAIN;
10490
34f80b04 10491 mutex_lock(&bp->port.phy_mutex);
3196a88a 10492 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10493 DEFAULT_PHY_DEV_ADDR,
10494 (data->reg_num & 0x1f), data->val_in);
34f80b04 10495 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10496 return err;
10497
10498 default:
10499 /* do nothing */
10500 break;
10501 }
10502
10503 return -EOPNOTSUPP;
10504}
10505
34f80b04 10506/* called with rtnl_lock */
a2fbb9ea
ET
10507static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10508{
10509 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10510 int rc = 0;
a2fbb9ea
ET
10511
10512 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10513 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10514 return -EINVAL;
10515
10516 /* This does not race with packet allocation
c14423fe 10517 * because the actual alloc size is
a2fbb9ea
ET
10518 * only updated as part of load
10519 */
10520 dev->mtu = new_mtu;
10521
10522 if (netif_running(dev)) {
34f80b04
EG
10523 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10524 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10525 }
34f80b04
EG
10526
10527 return rc;
a2fbb9ea
ET
10528}
10529
10530static void bnx2x_tx_timeout(struct net_device *dev)
10531{
10532 struct bnx2x *bp = netdev_priv(dev);
10533
10534#ifdef BNX2X_STOP_ON_ERROR
10535 if (!bp->panic)
10536 bnx2x_panic();
10537#endif
10538 /* This allows the netif to be shutdown gracefully before resetting */
10539 schedule_work(&bp->reset_task);
10540}
10541
10542#ifdef BCM_VLAN
34f80b04 10543/* called with rtnl_lock */
a2fbb9ea
ET
10544static void bnx2x_vlan_rx_register(struct net_device *dev,
10545 struct vlan_group *vlgrp)
10546{
10547 struct bnx2x *bp = netdev_priv(dev);
10548
10549 bp->vlgrp = vlgrp;
0c6671b0
EG
10550
10551 /* Set flags according to the required capabilities */
10552 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10553
10554 if (dev->features & NETIF_F_HW_VLAN_TX)
10555 bp->flags |= HW_VLAN_TX_FLAG;
10556
10557 if (dev->features & NETIF_F_HW_VLAN_RX)
10558 bp->flags |= HW_VLAN_RX_FLAG;
10559
a2fbb9ea 10560 if (netif_running(dev))
49d66772 10561 bnx2x_set_client_config(bp);
a2fbb9ea 10562}
34f80b04 10563
a2fbb9ea
ET
10564#endif
10565
10566#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10567static void poll_bnx2x(struct net_device *dev)
10568{
10569 struct bnx2x *bp = netdev_priv(dev);
10570
10571 disable_irq(bp->pdev->irq);
10572 bnx2x_interrupt(bp->pdev->irq, dev);
10573 enable_irq(bp->pdev->irq);
10574}
10575#endif
10576
c64213cd
SH
10577static const struct net_device_ops bnx2x_netdev_ops = {
10578 .ndo_open = bnx2x_open,
10579 .ndo_stop = bnx2x_close,
10580 .ndo_start_xmit = bnx2x_start_xmit,
10581 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10582 .ndo_set_mac_address = bnx2x_change_mac_addr,
10583 .ndo_validate_addr = eth_validate_addr,
10584 .ndo_do_ioctl = bnx2x_ioctl,
10585 .ndo_change_mtu = bnx2x_change_mtu,
10586 .ndo_tx_timeout = bnx2x_tx_timeout,
10587#ifdef BCM_VLAN
10588 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10589#endif
10590#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10591 .ndo_poll_controller = poll_bnx2x,
10592#endif
10593};
10594
10595
34f80b04
EG
10596static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10597 struct net_device *dev)
a2fbb9ea
ET
10598{
10599 struct bnx2x *bp;
10600 int rc;
10601
10602 SET_NETDEV_DEV(dev, &pdev->dev);
10603 bp = netdev_priv(dev);
10604
34f80b04
EG
10605 bp->dev = dev;
10606 bp->pdev = pdev;
a2fbb9ea 10607 bp->flags = 0;
34f80b04 10608 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10609
10610 rc = pci_enable_device(pdev);
10611 if (rc) {
10612 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10613 goto err_out;
10614 }
10615
10616 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10617 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10618 " aborting\n");
10619 rc = -ENODEV;
10620 goto err_out_disable;
10621 }
10622
10623 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10624 printk(KERN_ERR PFX "Cannot find second PCI device"
10625 " base address, aborting\n");
10626 rc = -ENODEV;
10627 goto err_out_disable;
10628 }
10629
34f80b04
EG
10630 if (atomic_read(&pdev->enable_cnt) == 1) {
10631 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10632 if (rc) {
10633 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10634 " aborting\n");
10635 goto err_out_disable;
10636 }
a2fbb9ea 10637
34f80b04
EG
10638 pci_set_master(pdev);
10639 pci_save_state(pdev);
10640 }
a2fbb9ea
ET
10641
10642 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10643 if (bp->pm_cap == 0) {
10644 printk(KERN_ERR PFX "Cannot find power management"
10645 " capability, aborting\n");
10646 rc = -EIO;
10647 goto err_out_release;
10648 }
10649
10650 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10651 if (bp->pcie_cap == 0) {
10652 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10653 " aborting\n");
10654 rc = -EIO;
10655 goto err_out_release;
10656 }
10657
10658 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10659 bp->flags |= USING_DAC_FLAG;
10660 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10661 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10662 " failed, aborting\n");
10663 rc = -EIO;
10664 goto err_out_release;
10665 }
10666
10667 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10668 printk(KERN_ERR PFX "System does not support DMA,"
10669 " aborting\n");
10670 rc = -EIO;
10671 goto err_out_release;
10672 }
10673
34f80b04
EG
10674 dev->mem_start = pci_resource_start(pdev, 0);
10675 dev->base_addr = dev->mem_start;
10676 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10677
10678 dev->irq = pdev->irq;
10679
275f165f 10680 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10681 if (!bp->regview) {
10682 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10683 rc = -ENOMEM;
10684 goto err_out_release;
10685 }
10686
34f80b04
EG
10687 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10688 min_t(u64, BNX2X_DB_SIZE,
10689 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10690 if (!bp->doorbells) {
10691 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10692 rc = -ENOMEM;
10693 goto err_out_unmap;
10694 }
10695
10696 bnx2x_set_power_state(bp, PCI_D0);
10697
34f80b04
EG
10698 /* clean indirect addresses */
10699 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10700 PCICFG_VENDOR_ID_OFFSET);
10701 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10702 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10703 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10704 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10705
34f80b04 10706 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10707
c64213cd 10708 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10709 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10710 dev->features |= NETIF_F_SG;
10711 dev->features |= NETIF_F_HW_CSUM;
10712 if (bp->flags & USING_DAC_FLAG)
10713 dev->features |= NETIF_F_HIGHDMA;
10714#ifdef BCM_VLAN
10715 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10716 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10717#endif
10718 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10719 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10720
10721 return 0;
10722
10723err_out_unmap:
10724 if (bp->regview) {
10725 iounmap(bp->regview);
10726 bp->regview = NULL;
10727 }
a2fbb9ea
ET
10728 if (bp->doorbells) {
10729 iounmap(bp->doorbells);
10730 bp->doorbells = NULL;
10731 }
10732
10733err_out_release:
34f80b04
EG
10734 if (atomic_read(&pdev->enable_cnt) == 1)
10735 pci_release_regions(pdev);
a2fbb9ea
ET
10736
10737err_out_disable:
10738 pci_disable_device(pdev);
10739 pci_set_drvdata(pdev, NULL);
10740
10741err_out:
10742 return rc;
10743}
10744
25047950
ET
10745static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10746{
10747 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10748
10749 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10750 return val;
10751}
10752
10753/* return value of 1=2.5GHz 2=5GHz */
10754static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10755{
10756 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10757
10758 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10759 return val;
10760}
10761
a2fbb9ea
ET
10762static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10763 const struct pci_device_id *ent)
10764{
10765 static int version_printed;
10766 struct net_device *dev = NULL;
10767 struct bnx2x *bp;
25047950 10768 int rc;
a2fbb9ea
ET
10769
10770 if (version_printed++ == 0)
10771 printk(KERN_INFO "%s", version);
10772
10773 /* dev zeroed in init_etherdev */
555f6c78 10774 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10775 if (!dev) {
10776 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10777 return -ENOMEM;
34f80b04 10778 }
a2fbb9ea 10779
a2fbb9ea
ET
10780 bp = netdev_priv(dev);
10781 bp->msglevel = debug;
10782
34f80b04 10783 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10784 if (rc < 0) {
10785 free_netdev(dev);
10786 return rc;
10787 }
10788
a2fbb9ea
ET
10789 pci_set_drvdata(pdev, dev);
10790
34f80b04 10791 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10792 if (rc)
10793 goto init_one_exit;
10794
10795 rc = register_netdev(dev);
34f80b04 10796 if (rc) {
693fc0d1 10797 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10798 goto init_one_exit;
10799 }
10800
10801 bp->common.name = board_info[ent->driver_data].name;
25047950 10802 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10803 " IRQ %d, ", dev->name, bp->common.name,
10804 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10805 bnx2x_get_pcie_width(bp),
10806 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10807 dev->base_addr, bp->pdev->irq);
e174961c 10808 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10809 return 0;
34f80b04
EG
10810
10811init_one_exit:
10812 if (bp->regview)
10813 iounmap(bp->regview);
10814
10815 if (bp->doorbells)
10816 iounmap(bp->doorbells);
10817
10818 free_netdev(dev);
10819
10820 if (atomic_read(&pdev->enable_cnt) == 1)
10821 pci_release_regions(pdev);
10822
10823 pci_disable_device(pdev);
10824 pci_set_drvdata(pdev, NULL);
10825
10826 return rc;
a2fbb9ea
ET
10827}
10828
10829static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10830{
10831 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10832 struct bnx2x *bp;
10833
10834 if (!dev) {
228241eb
ET
10835 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10836 return;
10837 }
228241eb 10838 bp = netdev_priv(dev);
a2fbb9ea 10839
a2fbb9ea
ET
10840 unregister_netdev(dev);
10841
10842 if (bp->regview)
10843 iounmap(bp->regview);
10844
10845 if (bp->doorbells)
10846 iounmap(bp->doorbells);
10847
10848 free_netdev(dev);
34f80b04
EG
10849
10850 if (atomic_read(&pdev->enable_cnt) == 1)
10851 pci_release_regions(pdev);
10852
a2fbb9ea
ET
10853 pci_disable_device(pdev);
10854 pci_set_drvdata(pdev, NULL);
10855}
10856
10857static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10858{
10859 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10860 struct bnx2x *bp;
10861
34f80b04
EG
10862 if (!dev) {
10863 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10864 return -ENODEV;
10865 }
10866 bp = netdev_priv(dev);
a2fbb9ea 10867
34f80b04 10868 rtnl_lock();
a2fbb9ea 10869
34f80b04 10870 pci_save_state(pdev);
228241eb 10871
34f80b04
EG
10872 if (!netif_running(dev)) {
10873 rtnl_unlock();
10874 return 0;
10875 }
a2fbb9ea
ET
10876
10877 netif_device_detach(dev);
a2fbb9ea 10878
da5a662a 10879 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10880
a2fbb9ea 10881 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10882
34f80b04
EG
10883 rtnl_unlock();
10884
a2fbb9ea
ET
10885 return 0;
10886}
10887
10888static int bnx2x_resume(struct pci_dev *pdev)
10889{
10890 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10891 struct bnx2x *bp;
a2fbb9ea
ET
10892 int rc;
10893
228241eb
ET
10894 if (!dev) {
10895 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10896 return -ENODEV;
10897 }
228241eb 10898 bp = netdev_priv(dev);
a2fbb9ea 10899
34f80b04
EG
10900 rtnl_lock();
10901
228241eb 10902 pci_restore_state(pdev);
34f80b04
EG
10903
10904 if (!netif_running(dev)) {
10905 rtnl_unlock();
10906 return 0;
10907 }
10908
a2fbb9ea
ET
10909 bnx2x_set_power_state(bp, PCI_D0);
10910 netif_device_attach(dev);
10911
da5a662a 10912 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10913
34f80b04
EG
10914 rtnl_unlock();
10915
10916 return rc;
a2fbb9ea
ET
10917}
10918
f8ef6e44
YG
10919static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10920{
10921 int i;
10922
10923 bp->state = BNX2X_STATE_ERROR;
10924
10925 bp->rx_mode = BNX2X_RX_MODE_NONE;
10926
10927 bnx2x_netif_stop(bp, 0);
10928
10929 del_timer_sync(&bp->timer);
10930 bp->stats_state = STATS_STATE_DISABLED;
10931 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10932
10933 /* Release IRQs */
10934 bnx2x_free_irq(bp);
10935
10936 if (CHIP_IS_E1(bp)) {
10937 struct mac_configuration_cmd *config =
10938 bnx2x_sp(bp, mcast_config);
10939
8d9c5f34 10940 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
10941 CAM_INVALIDATE(config->config_table[i]);
10942 }
10943
10944 /* Free SKBs, SGEs, TPA pool and driver internals */
10945 bnx2x_free_skbs(bp);
555f6c78 10946 for_each_rx_queue(bp, i)
f8ef6e44 10947 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 10948 for_each_rx_queue(bp, i)
7cde1c8b 10949 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
10950 bnx2x_free_mem(bp);
10951
10952 bp->state = BNX2X_STATE_CLOSED;
10953
10954 netif_carrier_off(bp->dev);
10955
10956 return 0;
10957}
10958
10959static void bnx2x_eeh_recover(struct bnx2x *bp)
10960{
10961 u32 val;
10962
10963 mutex_init(&bp->port.phy_mutex);
10964
10965 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10966 bp->link_params.shmem_base = bp->common.shmem_base;
10967 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10968
10969 if (!bp->common.shmem_base ||
10970 (bp->common.shmem_base < 0xA0000) ||
10971 (bp->common.shmem_base >= 0xC0000)) {
10972 BNX2X_DEV_INFO("MCP not active\n");
10973 bp->flags |= NO_MCP_FLAG;
10974 return;
10975 }
10976
10977 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10978 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10979 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10980 BNX2X_ERR("BAD MCP validity signature\n");
10981
10982 if (!BP_NOMCP(bp)) {
10983 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10984 & DRV_MSG_SEQ_NUMBER_MASK);
10985 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10986 }
10987}
10988
493adb1f
WX
10989/**
10990 * bnx2x_io_error_detected - called when PCI error is detected
10991 * @pdev: Pointer to PCI device
10992 * @state: The current pci connection state
10993 *
10994 * This function is called after a PCI bus error affecting
10995 * this device has been detected.
10996 */
10997static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10998 pci_channel_state_t state)
10999{
11000 struct net_device *dev = pci_get_drvdata(pdev);
11001 struct bnx2x *bp = netdev_priv(dev);
11002
11003 rtnl_lock();
11004
11005 netif_device_detach(dev);
11006
11007 if (netif_running(dev))
f8ef6e44 11008 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11009
11010 pci_disable_device(pdev);
11011
11012 rtnl_unlock();
11013
11014 /* Request a slot reset */
11015 return PCI_ERS_RESULT_NEED_RESET;
11016}
11017
11018/**
11019 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11020 * @pdev: Pointer to PCI device
11021 *
11022 * Restart the card from scratch, as if from a cold-boot.
11023 */
11024static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11025{
11026 struct net_device *dev = pci_get_drvdata(pdev);
11027 struct bnx2x *bp = netdev_priv(dev);
11028
11029 rtnl_lock();
11030
11031 if (pci_enable_device(pdev)) {
11032 dev_err(&pdev->dev,
11033 "Cannot re-enable PCI device after reset\n");
11034 rtnl_unlock();
11035 return PCI_ERS_RESULT_DISCONNECT;
11036 }
11037
11038 pci_set_master(pdev);
11039 pci_restore_state(pdev);
11040
11041 if (netif_running(dev))
11042 bnx2x_set_power_state(bp, PCI_D0);
11043
11044 rtnl_unlock();
11045
11046 return PCI_ERS_RESULT_RECOVERED;
11047}
11048
11049/**
11050 * bnx2x_io_resume - called when traffic can start flowing again
11051 * @pdev: Pointer to PCI device
11052 *
11053 * This callback is called when the error recovery driver tells us that
11054 * its OK to resume normal operation.
11055 */
11056static void bnx2x_io_resume(struct pci_dev *pdev)
11057{
11058 struct net_device *dev = pci_get_drvdata(pdev);
11059 struct bnx2x *bp = netdev_priv(dev);
11060
11061 rtnl_lock();
11062
f8ef6e44
YG
11063 bnx2x_eeh_recover(bp);
11064
493adb1f 11065 if (netif_running(dev))
f8ef6e44 11066 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11067
11068 netif_device_attach(dev);
11069
11070 rtnl_unlock();
11071}
11072
11073static struct pci_error_handlers bnx2x_err_handler = {
11074 .error_detected = bnx2x_io_error_detected,
11075 .slot_reset = bnx2x_io_slot_reset,
11076 .resume = bnx2x_io_resume,
11077};
11078
a2fbb9ea 11079static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11080 .name = DRV_MODULE_NAME,
11081 .id_table = bnx2x_pci_tbl,
11082 .probe = bnx2x_init_one,
11083 .remove = __devexit_p(bnx2x_remove_one),
11084 .suspend = bnx2x_suspend,
11085 .resume = bnx2x_resume,
11086 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11087};
11088
11089static int __init bnx2x_init(void)
11090{
1cf167f2
EG
11091 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11092 if (bnx2x_wq == NULL) {
11093 printk(KERN_ERR PFX "Cannot create workqueue\n");
11094 return -ENOMEM;
11095 }
11096
a2fbb9ea
ET
11097 return pci_register_driver(&bnx2x_pci_driver);
11098}
11099
11100static void __exit bnx2x_cleanup(void)
11101{
11102 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11103
11104 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11105}
11106
11107module_init(bnx2x_init);
11108module_exit(bnx2x_cleanup);
11109