]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Missing brackets
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
ca8eac55
EG
60#define DRV_MODULE_VERSION "1.45.23"
61#define DRV_MODULE_RELDATE "2008/11/03"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
19680c48 76static int disable_tpa;
a2fbb9ea
ET
77static int use_inta;
78static int poll;
a2fbb9ea 79static int debug;
34f80b04 80static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
81static int use_multi;
82
19680c48 83module_param(disable_tpa, int, 0);
a2fbb9ea
ET
84module_param(use_inta, int, 0);
85module_param(poll, int, 0);
a2fbb9ea 86module_param(debug, int, 0);
19680c48 87MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
88MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 90MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
91
92#ifdef BNX2X_MULTI
93module_param(use_multi, int, 0);
94MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95#endif
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594 if (msix) {
595 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598 } else {
599 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
601 HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 603
615f8fd9
ET
604 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val, port, addr, msix);
606
607 REG_WR(bp, addr, val);
608
a2fbb9ea
ET
609 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610 }
611
615f8fd9 612 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
613 val, port, addr, msix);
614
615 REG_WR(bp, addr, val);
34f80b04
EG
616
617 if (CHIP_IS_E1H(bp)) {
618 /* init leading/trailing edge */
619 if (IS_E1HMF(bp)) {
620 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621 if (bp->port.pmf)
622 /* enable nig attention */
623 val |= 0x0100;
624 } else
625 val = 0xffff;
626
627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629 }
a2fbb9ea
ET
630}
631
615f8fd9 632static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 633{
34f80b04 634 int port = BP_PORT(bp);
a2fbb9ea
ET
635 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 u32 val = REG_RD(bp, addr);
637
638 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
645
646 REG_WR(bp, addr, val);
647 if (REG_RD(bp, addr) != val)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649}
650
f8ef6e44 651static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 652{
a2fbb9ea
ET
653 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654 int i;
655
34f80b04 656 /* disable interrupt handling */
a2fbb9ea 657 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
658 if (disable_hw)
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
a2fbb9ea
ET
661
662 /* make sure all ISRs are done */
663 if (msix) {
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
666
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
669 } else
670 synchronize_irq(bp->pdev->irq);
671
672 /* make sure sp_task is not running */
1cf167f2
EG
673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
675}
676
34f80b04 677/* fast path */
a2fbb9ea
ET
678
679/*
34f80b04 680 * General service functions
a2fbb9ea
ET
681 */
682
34f80b04 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
684 u8 storm, u16 index, u8 op, u8 update)
685{
5c862848
EG
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
34f80b04 692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
5c862848
EG
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
700}
701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703{
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717}
718
a2fbb9ea
ET
719static u16 bnx2x_ack_int(struct bnx2x *bp)
720{
5c862848
EG
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 724
5c862848
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
a2fbb9ea 727
a2fbb9ea
ET
728 return result;
729}
730
731
732/*
733 * fast path service functions
734 */
735
736/* free skb in the packet ring at pos idx
737 * return idx of last bd freed
738 */
739static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740 u16 idx)
741{
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
34f80b04 745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
746 int nbd;
747
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
749 idx, tx_buf, skb);
750
751 /* unmap first bd */
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 758 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
759#ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 761 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
762 bnx2x_panic();
763 }
764#endif
765
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
768 if (nbd)
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
774 if (--nbd)
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779 if (--nbd)
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781 }
782 }
783
784 /* now free frags */
785 while (nbd > 0) {
786
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791 if (--nbd)
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793 }
794
795 /* release skb */
53e5e96e 796 WARN_ON(!skb);
a2fbb9ea
ET
797 dev_kfree_skb(skb);
798 tx_buf->first_bd = 0;
799 tx_buf->skb = NULL;
800
34f80b04 801 return new_cons;
a2fbb9ea
ET
802}
803
34f80b04 804static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 805{
34f80b04
EG
806 s16 used;
807 u16 prod;
808 u16 cons;
a2fbb9ea 809
34f80b04 810 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
813
34f80b04
EG
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 817
34f80b04 818#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
819 WARN_ON(used < 0);
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 822#endif
a2fbb9ea 823
34f80b04 824 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
825}
826
827static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828{
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831 int done = 0;
832
833#ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
835 return;
836#endif
837
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
840
841 while (sw_cons != hw_cons) {
842 u16 pkt_cons;
843
844 pkt_cons = TX_BD(sw_cons);
845
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
34f80b04 848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
849 hw_cons, sw_cons, pkt_cons);
850
34f80b04 851/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
852 rmb();
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854 }
855*/
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857 sw_cons++;
858 done++;
859
860 if (done == work)
861 break;
862 }
863
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
866
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
871 */
872 smp_mb();
873
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
876
877 netif_tx_lock(bp->dev);
878
879 if (netif_queue_stopped(bp->dev) &&
da5a662a 880 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
883
884 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
885 }
886}
887
3196a88a 888
a2fbb9ea
ET
889static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890 union eth_rx_cqe *rr_cqe)
891{
892 struct bnx2x *bp = fp->bp;
893 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
34f80b04 896 DP(BNX2X_MSG_SP,
a2fbb9ea 897 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
898 FP_IDX(fp), cid, command, bp->state,
899 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
900
901 bp->spq_left++;
902
34f80b04 903 if (FP_IDX(fp)) {
a2fbb9ea
ET
904 switch (command | fp->state) {
905 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906 BNX2X_FP_STATE_OPENING):
907 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908 cid);
909 fp->state = BNX2X_FP_STATE_OPEN;
910 break;
911
912 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914 cid);
915 fp->state = BNX2X_FP_STATE_HALTED;
916 break;
917
918 default:
34f80b04
EG
919 BNX2X_ERR("unexpected MC reply (%d) "
920 "fp->state is %x\n", command, fp->state);
921 break;
a2fbb9ea 922 }
34f80b04 923 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
924 return;
925 }
c14423fe 926
a2fbb9ea
ET
927 switch (command | bp->state) {
928 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930 bp->state = BNX2X_STATE_OPEN;
931 break;
932
933 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936 fp->state = BNX2X_FP_STATE_HALTED;
937 break;
938
a2fbb9ea 939 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 940 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
942 break;
943
3196a88a 944
a2fbb9ea 945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 948 bp->set_mac_pending = 0;
a2fbb9ea
ET
949 break;
950
49d66772 951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 952 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
953 break;
954
a2fbb9ea 955 default:
34f80b04 956 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 957 command, bp->state);
34f80b04 958 break;
a2fbb9ea 959 }
34f80b04 960 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
961}
962
7a9b2557
VZ
963static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964 struct bnx2x_fastpath *fp, u16 index)
965{
966 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967 struct page *page = sw_buf->page;
968 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970 /* Skip "next page" elements */
971 if (!page)
972 return;
973
974 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 975 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
976 __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978 sw_buf->page = NULL;
979 sge->addr_hi = 0;
980 sge->addr_lo = 0;
981}
982
983static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984 struct bnx2x_fastpath *fp, int last)
985{
986 int i;
987
988 for (i = 0; i < last; i++)
989 bnx2x_free_rx_sge(bp, fp, i);
990}
991
992static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998 dma_addr_t mapping;
999
1000 if (unlikely(page == NULL))
1001 return -ENOMEM;
1002
4f40f2cb 1003 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1004 PCI_DMA_FROMDEVICE);
8d8bb39b 1005 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1006 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 return -ENOMEM;
1008 }
1009
1010 sw_buf->page = page;
1011 pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016 return 0;
1017}
1018
a2fbb9ea
ET
1019static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020 struct bnx2x_fastpath *fp, u16 index)
1021{
1022 struct sk_buff *skb;
1023 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025 dma_addr_t mapping;
1026
1027 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028 if (unlikely(skb == NULL))
1029 return -ENOMEM;
1030
437cf2f1 1031 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1032 PCI_DMA_FROMDEVICE);
8d8bb39b 1033 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1034 dev_kfree_skb(skb);
1035 return -ENOMEM;
1036 }
1037
1038 rx_buf->skb = skb;
1039 pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044 return 0;
1045}
1046
1047/* note that we are not allocating a new skb,
1048 * we are just moving one from cons to prod
1049 * we are not creating a new mapping,
1050 * so there is no need to check for dma_mapping_error().
1051 */
1052static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053 struct sk_buff *skb, u16 cons, u16 prod)
1054{
1055 struct bnx2x *bp = fp->bp;
1056 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061 pci_dma_sync_single_for_device(bp->pdev,
1062 pci_unmap_addr(cons_rx_buf, mapping),
1063 bp->rx_offset + RX_COPY_THRESH,
1064 PCI_DMA_FROMDEVICE);
1065
1066 prod_rx_buf->skb = cons_rx_buf->skb;
1067 pci_unmap_addr_set(prod_rx_buf, mapping,
1068 pci_unmap_addr(cons_rx_buf, mapping));
1069 *prod_bd = *cons_bd;
1070}
1071
7a9b2557
VZ
1072static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073 u16 idx)
1074{
1075 u16 last_max = fp->last_max_sge;
1076
1077 if (SUB_S16(idx, last_max) > 0)
1078 fp->last_max_sge = idx;
1079}
1080
1081static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082{
1083 int i, j;
1084
1085 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086 int idx = RX_SGE_CNT * i - 1;
1087
1088 for (j = 0; j < 2; j++) {
1089 SGE_MASK_CLEAR_BIT(fp, idx);
1090 idx--;
1091 }
1092 }
1093}
1094
1095static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096 struct eth_fast_path_rx_cqe *fp_cqe)
1097{
1098 struct bnx2x *bp = fp->bp;
4f40f2cb 1099 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1100 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1101 SGE_PAGE_SHIFT;
7a9b2557
VZ
1102 u16 last_max, last_elem, first_elem;
1103 u16 delta = 0;
1104 u16 i;
1105
1106 if (!sge_len)
1107 return;
1108
1109 /* First mark all used pages */
1110 for (i = 0; i < sge_len; i++)
1111 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116 /* Here we assume that the last SGE index is the biggest */
1117 prefetch((void *)(fp->sge_mask));
1118 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120 last_max = RX_SGE(fp->last_max_sge);
1121 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124 /* If ring is not full */
1125 if (last_elem + 1 != first_elem)
1126 last_elem++;
1127
1128 /* Now update the prod */
1129 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130 if (likely(fp->sge_mask[i]))
1131 break;
1132
1133 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134 delta += RX_SGE_MASK_ELEM_SZ;
1135 }
1136
1137 if (delta > 0) {
1138 fp->rx_sge_prod += delta;
1139 /* clear page-end entries */
1140 bnx2x_clear_sge_mask_next_elems(fp);
1141 }
1142
1143 DP(NETIF_MSG_RX_STATUS,
1144 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1145 fp->last_max_sge, fp->rx_sge_prod);
1146}
1147
1148static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149{
1150 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151 memset(fp->sge_mask, 0xff,
1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
33471629
EG
1154 /* Clear the two last indices in the page to 1:
1155 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1156 hence will never be indicated and should be removed from
1157 the calculations. */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1159}
1160
1161static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162 struct sk_buff *skb, u16 cons, u16 prod)
1163{
1164 struct bnx2x *bp = fp->bp;
1165 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168 dma_addr_t mapping;
1169
1170 /* move empty skb from pool to prod and map it */
1171 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1173 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1174 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176 /* move partial skb from cons to pool (don't unmap yet) */
1177 fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179 /* mark bin state as start - print error if current state != stop */
1180 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183 fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185 /* point prod_bd to new skb */
1186 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189#ifdef BNX2X_STOP_ON_ERROR
1190 fp->tpa_queue_used |= (1 << queue);
1191#ifdef __powerpc64__
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193#else
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195#endif
1196 fp->tpa_queue_used);
1197#endif
1198}
1199
1200static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201 struct sk_buff *skb,
1202 struct eth_fast_path_rx_cqe *fp_cqe,
1203 u16 cqe_idx)
1204{
1205 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1206 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207 u32 i, frag_len, frag_size, pages;
1208 int err;
1209 int j;
1210
1211 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1212 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1213
1214 /* This is needed in order to enable forwarding support */
1215 if (frag_size)
4f40f2cb 1216 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1217 max(frag_size, (u32)len_on_bd));
1218
1219#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1220 if (pages >
1221 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223 pages, cqe_idx);
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1225 fp_cqe->pkt_len, len_on_bd);
1226 bnx2x_panic();
1227 return -EINVAL;
1228 }
1229#endif
1230
1231 /* Run through the SGL and compose the fragmented skb */
1232 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1237 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1238 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1239 old_rx_pg = *rx_pg;
1240
1241 /* If we fail to allocate a substitute page, we simply stop
1242 where we are and drop the whole packet */
1243 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244 if (unlikely(err)) {
66e855f3 1245 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1246 return err;
1247 }
1248
1249 /* Unmap the page as we r going to pass it to the stack */
1250 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1251 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1252
1253 /* Add one frag and update the appropriate fields in the skb */
1254 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1255
1256 skb->data_len += frag_len;
1257 skb->truesize += frag_len;
1258 skb->len += frag_len;
1259
1260 frag_size -= frag_len;
1261 }
1262
1263 return 0;
1264}
1265
1266static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1268 u16 cqe_idx)
1269{
1270 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271 struct sk_buff *skb = rx_buf->skb;
1272 /* alloc new skb */
1273 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1274
1275 /* Unmap skb in the pool anyway, as we are going to change
1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277 fails. */
1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1279 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1280
7a9b2557 1281 if (likely(new_skb)) {
66e855f3
YG
1282 /* fix ip xsum and give it to the stack */
1283 /* (no need to map the new skb) */
0c6671b0
EG
1284#ifdef BCM_VLAN
1285 int is_vlan_cqe =
1286 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1287 PARSING_FLAGS_VLAN);
1288 int is_not_hwaccel_vlan_cqe =
1289 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1290#endif
7a9b2557
VZ
1291
1292 prefetch(skb);
1293 prefetch(((char *)(skb)) + 128);
1294
7a9b2557
VZ
1295#ifdef BNX2X_STOP_ON_ERROR
1296 if (pad + len > bp->rx_buf_size) {
1297 BNX2X_ERR("skb_put is about to fail... "
1298 "pad %d len %d rx_buf_size %d\n",
1299 pad, len, bp->rx_buf_size);
1300 bnx2x_panic();
1301 return;
1302 }
1303#endif
1304
1305 skb_reserve(skb, pad);
1306 skb_put(skb, len);
1307
1308 skb->protocol = eth_type_trans(skb, bp->dev);
1309 skb->ip_summed = CHECKSUM_UNNECESSARY;
1310
1311 {
1312 struct iphdr *iph;
1313
1314 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1315#ifdef BCM_VLAN
1316 /* If there is no Rx VLAN offloading -
1317 take VLAN tag into an account */
1318 if (unlikely(is_not_hwaccel_vlan_cqe))
1319 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1320#endif
7a9b2557
VZ
1321 iph->check = 0;
1322 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1323 }
1324
1325 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1326 &cqe->fast_path_cqe, cqe_idx)) {
1327#ifdef BCM_VLAN
0c6671b0
EG
1328 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1329 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1330 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1331 le16_to_cpu(cqe->fast_path_cqe.
1332 vlan_tag));
1333 else
1334#endif
1335 netif_receive_skb(skb);
1336 } else {
1337 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1338 " - dropping packet!\n");
1339 dev_kfree_skb(skb);
1340 }
1341
7a9b2557
VZ
1342
1343 /* put new skb in bin */
1344 fp->tpa_pool[queue].skb = new_skb;
1345
1346 } else {
66e855f3 1347 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1348 DP(NETIF_MSG_RX_STATUS,
1349 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1350 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1351 }
1352
1353 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1354}
1355
1356static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1357 struct bnx2x_fastpath *fp,
1358 u16 bd_prod, u16 rx_comp_prod,
1359 u16 rx_sge_prod)
1360{
1361 struct tstorm_eth_rx_producers rx_prods = {0};
1362 int i;
1363
1364 /* Update producers */
1365 rx_prods.bd_prod = bd_prod;
1366 rx_prods.cqe_prod = rx_comp_prod;
1367 rx_prods.sge_prod = rx_sge_prod;
1368
58f4c4cf
EG
1369 /*
1370 * Make sure that the BD and SGE data is updated before updating the
1371 * producers since FW might read the BD/SGE right after the producer
1372 * is updated.
1373 * This is only applicable for weak-ordered memory model archs such
1374 * as IA-64. The following barrier is also mandatory since FW will
1375 * assumes BDs must have buffers.
1376 */
1377 wmb();
1378
7a9b2557
VZ
1379 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1380 REG_WR(bp, BAR_TSTRORM_INTMEM +
1381 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1382 ((u32 *)&rx_prods)[i]);
1383
58f4c4cf
EG
1384 mmiowb(); /* keep prod updates ordered */
1385
7a9b2557
VZ
1386 DP(NETIF_MSG_RX_STATUS,
1387 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1388 bd_prod, rx_comp_prod, rx_sge_prod);
1389}
1390
a2fbb9ea
ET
1391static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1392{
1393 struct bnx2x *bp = fp->bp;
34f80b04 1394 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1395 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1396 int rx_pkt = 0;
1397
1398#ifdef BNX2X_STOP_ON_ERROR
1399 if (unlikely(bp->panic))
1400 return 0;
1401#endif
1402
34f80b04
EG
1403 /* CQ "next element" is of the size of the regular element,
1404 that's why it's ok here */
a2fbb9ea
ET
1405 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1406 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1407 hw_comp_cons++;
1408
1409 bd_cons = fp->rx_bd_cons;
1410 bd_prod = fp->rx_bd_prod;
34f80b04 1411 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1412 sw_comp_cons = fp->rx_comp_cons;
1413 sw_comp_prod = fp->rx_comp_prod;
1414
1415 /* Memory barrier necessary as speculative reads of the rx
1416 * buffer can be ahead of the index in the status block
1417 */
1418 rmb();
1419
1420 DP(NETIF_MSG_RX_STATUS,
1421 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1422 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1423
1424 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1425 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1426 struct sk_buff *skb;
1427 union eth_rx_cqe *cqe;
34f80b04
EG
1428 u8 cqe_fp_flags;
1429 u16 len, pad;
a2fbb9ea
ET
1430
1431 comp_ring_cons = RCQ_BD(sw_comp_cons);
1432 bd_prod = RX_BD(bd_prod);
1433 bd_cons = RX_BD(bd_cons);
1434
1435 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1436 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1437
a2fbb9ea 1438 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1439 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1440 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1441 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1442 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1443 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1444
1445 /* is this a slowpath msg? */
34f80b04 1446 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1447 bnx2x_sp_event(fp, cqe);
1448 goto next_cqe;
1449
1450 /* this is an rx packet */
1451 } else {
1452 rx_buf = &fp->rx_buf_ring[bd_cons];
1453 skb = rx_buf->skb;
a2fbb9ea
ET
1454 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1455 pad = cqe->fast_path_cqe.placement_offset;
1456
7a9b2557
VZ
1457 /* If CQE is marked both TPA_START and TPA_END
1458 it is a non-TPA CQE */
1459 if ((!fp->disable_tpa) &&
1460 (TPA_TYPE(cqe_fp_flags) !=
1461 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1462 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1463
1464 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1465 DP(NETIF_MSG_RX_STATUS,
1466 "calling tpa_start on queue %d\n",
1467 queue);
1468
1469 bnx2x_tpa_start(fp, queue, skb,
1470 bd_cons, bd_prod);
1471 goto next_rx;
1472 }
1473
1474 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1475 DP(NETIF_MSG_RX_STATUS,
1476 "calling tpa_stop on queue %d\n",
1477 queue);
1478
1479 if (!BNX2X_RX_SUM_FIX(cqe))
1480 BNX2X_ERR("STOP on none TCP "
1481 "data\n");
1482
1483 /* This is a size of the linear data
1484 on this skb */
1485 len = le16_to_cpu(cqe->fast_path_cqe.
1486 len_on_bd);
1487 bnx2x_tpa_stop(bp, fp, queue, pad,
1488 len, cqe, comp_ring_cons);
1489#ifdef BNX2X_STOP_ON_ERROR
1490 if (bp->panic)
1491 return -EINVAL;
1492#endif
1493
1494 bnx2x_update_sge_prod(fp,
1495 &cqe->fast_path_cqe);
1496 goto next_cqe;
1497 }
1498 }
1499
a2fbb9ea
ET
1500 pci_dma_sync_single_for_device(bp->pdev,
1501 pci_unmap_addr(rx_buf, mapping),
1502 pad + RX_COPY_THRESH,
1503 PCI_DMA_FROMDEVICE);
1504 prefetch(skb);
1505 prefetch(((char *)(skb)) + 128);
1506
1507 /* is this an error packet? */
34f80b04 1508 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1509 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1510 "ERROR flags %x rx packet %u\n",
1511 cqe_fp_flags, sw_comp_cons);
66e855f3 1512 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1513 goto reuse_rx;
1514 }
1515
1516 /* Since we don't have a jumbo ring
1517 * copy small packets if mtu > 1500
1518 */
1519 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1520 (len <= RX_COPY_THRESH)) {
1521 struct sk_buff *new_skb;
1522
1523 new_skb = netdev_alloc_skb(bp->dev,
1524 len + pad);
1525 if (new_skb == NULL) {
1526 DP(NETIF_MSG_RX_ERR,
34f80b04 1527 "ERROR packet dropped "
a2fbb9ea 1528 "because of alloc failure\n");
66e855f3 1529 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1530 goto reuse_rx;
1531 }
1532
1533 /* aligned copy */
1534 skb_copy_from_linear_data_offset(skb, pad,
1535 new_skb->data + pad, len);
1536 skb_reserve(new_skb, pad);
1537 skb_put(new_skb, len);
1538
1539 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1540
1541 skb = new_skb;
1542
1543 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1544 pci_unmap_single(bp->pdev,
1545 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1546 bp->rx_buf_size,
a2fbb9ea
ET
1547 PCI_DMA_FROMDEVICE);
1548 skb_reserve(skb, pad);
1549 skb_put(skb, len);
1550
1551 } else {
1552 DP(NETIF_MSG_RX_ERR,
34f80b04 1553 "ERROR packet dropped because "
a2fbb9ea 1554 "of alloc failure\n");
66e855f3 1555 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1556reuse_rx:
1557 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1558 goto next_rx;
1559 }
1560
1561 skb->protocol = eth_type_trans(skb, bp->dev);
1562
1563 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1564 if (bp->rx_csum) {
1adcd8be
EG
1565 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1566 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1567 else
1568 bp->eth_stats.hw_csum_err++;
1569 }
a2fbb9ea
ET
1570 }
1571
1572#ifdef BCM_VLAN
0c6671b0 1573 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1574 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1575 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1576 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1577 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1578 else
1579#endif
34f80b04 1580 netif_receive_skb(skb);
a2fbb9ea 1581
a2fbb9ea
ET
1582
1583next_rx:
1584 rx_buf->skb = NULL;
1585
1586 bd_cons = NEXT_RX_IDX(bd_cons);
1587 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1588 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1589 rx_pkt++;
a2fbb9ea
ET
1590next_cqe:
1591 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1592 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1593
34f80b04 1594 if (rx_pkt == budget)
a2fbb9ea
ET
1595 break;
1596 } /* while */
1597
1598 fp->rx_bd_cons = bd_cons;
34f80b04 1599 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1600 fp->rx_comp_cons = sw_comp_cons;
1601 fp->rx_comp_prod = sw_comp_prod;
1602
7a9b2557
VZ
1603 /* Update producers */
1604 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1605 fp->rx_sge_prod);
a2fbb9ea
ET
1606
1607 fp->rx_pkt += rx_pkt;
1608 fp->rx_calls++;
1609
1610 return rx_pkt;
1611}
1612
1613static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1614{
1615 struct bnx2x_fastpath *fp = fp_cookie;
1616 struct bnx2x *bp = fp->bp;
34f80b04 1617 int index = FP_IDX(fp);
a2fbb9ea 1618
da5a662a
VZ
1619 /* Return here if interrupt is disabled */
1620 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1621 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1622 return IRQ_HANDLED;
1623 }
1624
34f80b04
EG
1625 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1626 index, FP_SB_ID(fp));
1627 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1628
1629#ifdef BNX2X_STOP_ON_ERROR
1630 if (unlikely(bp->panic))
1631 return IRQ_HANDLED;
1632#endif
1633
1634 prefetch(fp->rx_cons_sb);
1635 prefetch(fp->tx_cons_sb);
1636 prefetch(&fp->status_blk->c_status_block.status_block_index);
1637 prefetch(&fp->status_blk->u_status_block.status_block_index);
1638
908a7a16 1639 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1640
a2fbb9ea
ET
1641 return IRQ_HANDLED;
1642}
1643
1644static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1645{
1646 struct net_device *dev = dev_instance;
1647 struct bnx2x *bp = netdev_priv(dev);
1648 u16 status = bnx2x_ack_int(bp);
34f80b04 1649 u16 mask;
a2fbb9ea 1650
34f80b04 1651 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1652 if (unlikely(status == 0)) {
1653 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1654 return IRQ_NONE;
1655 }
34f80b04 1656 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1657
34f80b04 1658 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1659 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1661 return IRQ_HANDLED;
1662 }
1663
3196a88a
EG
1664#ifdef BNX2X_STOP_ON_ERROR
1665 if (unlikely(bp->panic))
1666 return IRQ_HANDLED;
1667#endif
1668
34f80b04
EG
1669 mask = 0x2 << bp->fp[0].sb_id;
1670 if (status & mask) {
a2fbb9ea
ET
1671 struct bnx2x_fastpath *fp = &bp->fp[0];
1672
1673 prefetch(fp->rx_cons_sb);
1674 prefetch(fp->tx_cons_sb);
1675 prefetch(&fp->status_blk->c_status_block.status_block_index);
1676 prefetch(&fp->status_blk->u_status_block.status_block_index);
1677
908a7a16 1678 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1679
34f80b04 1680 status &= ~mask;
a2fbb9ea
ET
1681 }
1682
a2fbb9ea 1683
34f80b04 1684 if (unlikely(status & 0x1)) {
1cf167f2 1685 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1686
1687 status &= ~0x1;
1688 if (!status)
1689 return IRQ_HANDLED;
1690 }
1691
34f80b04
EG
1692 if (status)
1693 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1694 status);
a2fbb9ea 1695
c18487ee 1696 return IRQ_HANDLED;
a2fbb9ea
ET
1697}
1698
c18487ee 1699/* end of fast path */
a2fbb9ea 1700
bb2a0f7a 1701static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1702
c18487ee
YR
1703/* Link */
1704
1705/*
1706 * General service functions
1707 */
a2fbb9ea 1708
4a37fb66 1709static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1710{
1711 u32 lock_status;
1712 u32 resource_bit = (1 << resource);
4a37fb66
YG
1713 int func = BP_FUNC(bp);
1714 u32 hw_lock_control_reg;
c18487ee 1715 int cnt;
a2fbb9ea 1716
c18487ee
YR
1717 /* Validating that the resource is within range */
1718 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1719 DP(NETIF_MSG_HW,
1720 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1721 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1722 return -EINVAL;
1723 }
a2fbb9ea 1724
4a37fb66
YG
1725 if (func <= 5) {
1726 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1727 } else {
1728 hw_lock_control_reg =
1729 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1730 }
1731
c18487ee 1732 /* Validating that the resource is not already taken */
4a37fb66 1733 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1734 if (lock_status & resource_bit) {
1735 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1736 lock_status, resource_bit);
1737 return -EEXIST;
1738 }
a2fbb9ea 1739
46230476
EG
1740 /* Try for 5 second every 5ms */
1741 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1742 /* Try to acquire the lock */
4a37fb66
YG
1743 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1744 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1745 if (lock_status & resource_bit)
1746 return 0;
a2fbb9ea 1747
c18487ee 1748 msleep(5);
a2fbb9ea 1749 }
c18487ee
YR
1750 DP(NETIF_MSG_HW, "Timeout\n");
1751 return -EAGAIN;
1752}
a2fbb9ea 1753
4a37fb66 1754static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1755{
1756 u32 lock_status;
1757 u32 resource_bit = (1 << resource);
4a37fb66
YG
1758 int func = BP_FUNC(bp);
1759 u32 hw_lock_control_reg;
a2fbb9ea 1760
c18487ee
YR
1761 /* Validating that the resource is within range */
1762 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1763 DP(NETIF_MSG_HW,
1764 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1765 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1766 return -EINVAL;
1767 }
1768
4a37fb66
YG
1769 if (func <= 5) {
1770 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1771 } else {
1772 hw_lock_control_reg =
1773 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1774 }
1775
c18487ee 1776 /* Validating that the resource is currently taken */
4a37fb66 1777 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1778 if (!(lock_status & resource_bit)) {
1779 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1780 lock_status, resource_bit);
1781 return -EFAULT;
a2fbb9ea
ET
1782 }
1783
4a37fb66 1784 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1785 return 0;
1786}
1787
1788/* HW Lock for shared dual port PHYs */
4a37fb66 1789static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1790{
1791 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1792
34f80b04 1793 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1794
c18487ee
YR
1795 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1796 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1797 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1798}
a2fbb9ea 1799
4a37fb66 1800static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1801{
1802 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1803
c18487ee
YR
1804 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1805 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1806 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1807
34f80b04 1808 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1809}
a2fbb9ea 1810
17de50b7 1811int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1812{
1813 /* The GPIO should be swapped if swap register is set and active */
1814 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1815 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1816 int gpio_shift = gpio_num +
1817 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1818 u32 gpio_mask = (1 << gpio_shift);
1819 u32 gpio_reg;
a2fbb9ea 1820
c18487ee
YR
1821 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1822 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1823 return -EINVAL;
1824 }
a2fbb9ea 1825
4a37fb66 1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1827 /* read GPIO and mask except the float bits */
1828 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1829
c18487ee
YR
1830 switch (mode) {
1831 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1832 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1833 gpio_num, gpio_shift);
1834 /* clear FLOAT and set CLR */
1835 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1836 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1837 break;
a2fbb9ea 1838
c18487ee
YR
1839 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1840 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1841 gpio_num, gpio_shift);
1842 /* clear FLOAT and set SET */
1843 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1844 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1845 break;
a2fbb9ea 1846
17de50b7 1847 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1848 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1849 gpio_num, gpio_shift);
1850 /* set FLOAT */
1851 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1852 break;
a2fbb9ea 1853
c18487ee
YR
1854 default:
1855 break;
a2fbb9ea
ET
1856 }
1857
c18487ee 1858 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1859 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1860
c18487ee 1861 return 0;
a2fbb9ea
ET
1862}
1863
c18487ee 1864static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1865{
c18487ee
YR
1866 u32 spio_mask = (1 << spio_num);
1867 u32 spio_reg;
a2fbb9ea 1868
c18487ee
YR
1869 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1870 (spio_num > MISC_REGISTERS_SPIO_7)) {
1871 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1872 return -EINVAL;
a2fbb9ea
ET
1873 }
1874
4a37fb66 1875 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1876 /* read SPIO and mask except the float bits */
1877 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1878
c18487ee 1879 switch (mode) {
6378c025 1880 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1881 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1882 /* clear FLOAT and set CLR */
1883 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1885 break;
a2fbb9ea 1886
6378c025 1887 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1888 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1889 /* clear FLOAT and set SET */
1890 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1891 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1892 break;
a2fbb9ea 1893
c18487ee
YR
1894 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1895 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1896 /* set FLOAT */
1897 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1898 break;
a2fbb9ea 1899
c18487ee
YR
1900 default:
1901 break;
a2fbb9ea
ET
1902 }
1903
c18487ee 1904 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1905 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1906
a2fbb9ea
ET
1907 return 0;
1908}
1909
c18487ee 1910static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1911{
ad33ea3a
EG
1912 switch (bp->link_vars.ieee_fc &
1913 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1914 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1915 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1916 ADVERTISED_Pause);
1917 break;
1918 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1919 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1920 ADVERTISED_Pause);
1921 break;
1922 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1923 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1924 break;
1925 default:
34f80b04 1926 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1927 ADVERTISED_Pause);
1928 break;
1929 }
1930}
f1410647 1931
c18487ee
YR
1932static void bnx2x_link_report(struct bnx2x *bp)
1933{
1934 if (bp->link_vars.link_up) {
1935 if (bp->state == BNX2X_STATE_OPEN)
1936 netif_carrier_on(bp->dev);
1937 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1938
c18487ee 1939 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1940
c18487ee
YR
1941 if (bp->link_vars.duplex == DUPLEX_FULL)
1942 printk("full duplex");
1943 else
1944 printk("half duplex");
f1410647 1945
c0700f90
DM
1946 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1947 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1948 printk(", receive ");
c0700f90 1949 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1950 printk("& transmit ");
1951 } else {
1952 printk(", transmit ");
1953 }
1954 printk("flow control ON");
1955 }
1956 printk("\n");
f1410647 1957
c18487ee
YR
1958 } else { /* link_down */
1959 netif_carrier_off(bp->dev);
1960 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1961 }
c18487ee
YR
1962}
1963
1964static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1965{
19680c48
EG
1966 if (!BP_NOMCP(bp)) {
1967 u8 rc;
a2fbb9ea 1968
19680c48 1969 /* Initialize link parameters structure variables */
8c99e7b0
YR
1970 /* It is recommended to turn off RX FC for jumbo frames
1971 for better performance */
1972 if (IS_E1HMF(bp))
c0700f90 1973 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 1974 else if (bp->dev->mtu > 5000)
c0700f90 1975 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1976 else
c0700f90 1977 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1978
4a37fb66 1979 bnx2x_acquire_phy_lock(bp);
19680c48 1980 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1981 bnx2x_release_phy_lock(bp);
a2fbb9ea 1982
3c96c68b
EG
1983 bnx2x_calc_fc_adv(bp);
1984
19680c48
EG
1985 if (bp->link_vars.link_up)
1986 bnx2x_link_report(bp);
a2fbb9ea 1987
34f80b04 1988
19680c48
EG
1989 return rc;
1990 }
1991 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1992 return -EINVAL;
a2fbb9ea
ET
1993}
1994
c18487ee 1995static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1996{
19680c48 1997 if (!BP_NOMCP(bp)) {
4a37fb66 1998 bnx2x_acquire_phy_lock(bp);
19680c48 1999 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2000 bnx2x_release_phy_lock(bp);
a2fbb9ea 2001
19680c48
EG
2002 bnx2x_calc_fc_adv(bp);
2003 } else
2004 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2005}
a2fbb9ea 2006
c18487ee
YR
2007static void bnx2x__link_reset(struct bnx2x *bp)
2008{
19680c48 2009 if (!BP_NOMCP(bp)) {
4a37fb66 2010 bnx2x_acquire_phy_lock(bp);
19680c48 2011 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2012 bnx2x_release_phy_lock(bp);
19680c48
EG
2013 } else
2014 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2015}
a2fbb9ea 2016
c18487ee
YR
2017static u8 bnx2x_link_test(struct bnx2x *bp)
2018{
2019 u8 rc;
a2fbb9ea 2020
4a37fb66 2021 bnx2x_acquire_phy_lock(bp);
c18487ee 2022 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2023 bnx2x_release_phy_lock(bp);
a2fbb9ea 2024
c18487ee
YR
2025 return rc;
2026}
a2fbb9ea 2027
34f80b04
EG
2028/* Calculates the sum of vn_min_rates.
2029 It's needed for further normalizing of the min_rates.
2030
2031 Returns:
2032 sum of vn_min_rates
2033 or
2034 0 - if all the min_rates are 0.
33471629 2035 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2036 If not all min_rates are zero then those that are zeroes will
2037 be set to 1.
2038 */
2039static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2040{
2041 int i, port = BP_PORT(bp);
2042 u32 wsum = 0;
2043 int all_zero = 1;
2044
2045 for (i = 0; i < E1HVN_MAX; i++) {
2046 u32 vn_cfg =
2047 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2048 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2049 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2050 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2051 /* If min rate is zero - set it to 1 */
2052 if (!vn_min_rate)
2053 vn_min_rate = DEF_MIN_RATE;
2054 else
2055 all_zero = 0;
2056
2057 wsum += vn_min_rate;
2058 }
2059 }
2060
2061 /* ... only if all min rates are zeros - disable FAIRNESS */
2062 if (all_zero)
2063 return 0;
2064
2065 return wsum;
2066}
2067
2068static void bnx2x_init_port_minmax(struct bnx2x *bp,
2069 int en_fness,
2070 u16 port_rate,
2071 struct cmng_struct_per_port *m_cmng_port)
2072{
2073 u32 r_param = port_rate / 8;
2074 int port = BP_PORT(bp);
2075 int i;
2076
2077 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2078
2079 /* Enable minmax only if we are in e1hmf mode */
2080 if (IS_E1HMF(bp)) {
2081 u32 fair_periodic_timeout_usec;
2082 u32 t_fair;
2083
2084 /* Enable rate shaping and fairness */
2085 m_cmng_port->flags.cmng_vn_enable = 1;
2086 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2087 m_cmng_port->flags.rate_shaping_enable = 1;
2088
2089 if (!en_fness)
2090 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2091 " fairness will be disabled\n");
2092
2093 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2094 m_cmng_port->rs_vars.rs_periodic_timeout =
2095 RS_PERIODIC_TIMEOUT_USEC / 4;
2096
2097 /* this is the threshold below which no timer arming will occur
2098 1.25 coefficient is for the threshold to be a little bigger
2099 than the real time, to compensate for timer in-accuracy */
2100 m_cmng_port->rs_vars.rs_threshold =
2101 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2102
2103 /* resolution of fairness timer */
2104 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2105 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2106 t_fair = T_FAIR_COEF / port_rate;
2107
2108 /* this is the threshold below which we won't arm
2109 the timer anymore */
2110 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2111
2112 /* we multiply by 1e3/8 to get bytes/msec.
2113 We don't want the credits to pass a credit
2114 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2115 m_cmng_port->fair_vars.upper_bound =
2116 r_param * t_fair * FAIR_MEM;
2117 /* since each tick is 4 usec */
2118 m_cmng_port->fair_vars.fairness_timeout =
2119 fair_periodic_timeout_usec / 4;
2120
2121 } else {
2122 /* Disable rate shaping and fairness */
2123 m_cmng_port->flags.cmng_vn_enable = 0;
2124 m_cmng_port->flags.fairness_enable = 0;
2125 m_cmng_port->flags.rate_shaping_enable = 0;
2126
2127 DP(NETIF_MSG_IFUP,
2128 "Single function mode minmax will be disabled\n");
2129 }
2130
2131 /* Store it to internal memory */
2132 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2133 REG_WR(bp, BAR_XSTRORM_INTMEM +
2134 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2135 ((u32 *)(m_cmng_port))[i]);
2136}
2137
2138static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2139 u32 wsum, u16 port_rate,
2140 struct cmng_struct_per_port *m_cmng_port)
2141{
2142 struct rate_shaping_vars_per_vn m_rs_vn;
2143 struct fairness_vars_per_vn m_fair_vn;
2144 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2145 u16 vn_min_rate, vn_max_rate;
2146 int i;
2147
2148 /* If function is hidden - set min and max to zeroes */
2149 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2150 vn_min_rate = 0;
2151 vn_max_rate = 0;
2152
2153 } else {
2154 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2155 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2156 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2157 if current min rate is zero - set it to 1.
33471629 2158 This is a requirement of the algorithm. */
34f80b04
EG
2159 if ((vn_min_rate == 0) && wsum)
2160 vn_min_rate = DEF_MIN_RATE;
2161 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2162 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2163 }
2164
2165 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2166 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2167
2168 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2169 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2170
2171 /* global vn counter - maximal Mbps for this vn */
2172 m_rs_vn.vn_counter.rate = vn_max_rate;
2173
2174 /* quota - number of bytes transmitted in this period */
2175 m_rs_vn.vn_counter.quota =
2176 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2177
2178#ifdef BNX2X_PER_PROT_QOS
2179 /* per protocol counter */
2180 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2181 /* maximal Mbps for this protocol */
2182 m_rs_vn.protocol_counters[protocol].rate =
2183 protocol_max_rate[protocol];
2184 /* the quota in each timer period -
2185 number of bytes transmitted in this period */
2186 m_rs_vn.protocol_counters[protocol].quota =
2187 (u32)(rs_periodic_timeout_usec *
2188 ((double)m_rs_vn.
2189 protocol_counters[protocol].rate/8));
2190 }
2191#endif
2192
2193 if (wsum) {
2194 /* credit for each period of the fairness algorithm:
2195 number of bytes in T_FAIR (the vn share the port rate).
2196 wsum should not be larger than 10000, thus
2197 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2198 m_fair_vn.vn_credit_delta =
2199 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2200 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2201 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2202 m_fair_vn.vn_credit_delta);
2203 }
2204
2205#ifdef BNX2X_PER_PROT_QOS
2206 do {
2207 u32 protocolWeightSum = 0;
2208
2209 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2210 protocolWeightSum +=
2211 drvInit.protocol_min_rate[protocol];
2212 /* per protocol counter -
2213 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2214 if (protocolWeightSum > 0) {
2215 for (protocol = 0;
2216 protocol < NUM_OF_PROTOCOLS; protocol++)
2217 /* credit for each period of the
2218 fairness algorithm - number of bytes in
2219 T_FAIR (the protocol share the vn rate) */
2220 m_fair_vn.protocol_credit_delta[protocol] =
2221 (u32)((vn_min_rate / 8) * t_fair *
2222 protocol_min_rate / protocolWeightSum);
2223 }
2224 } while (0);
2225#endif
2226
2227 /* Store it to internal memory */
2228 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2229 REG_WR(bp, BAR_XSTRORM_INTMEM +
2230 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2231 ((u32 *)(&m_rs_vn))[i]);
2232
2233 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2234 REG_WR(bp, BAR_XSTRORM_INTMEM +
2235 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2236 ((u32 *)(&m_fair_vn))[i]);
2237}
2238
c18487ee
YR
2239/* This function is called upon link interrupt */
2240static void bnx2x_link_attn(struct bnx2x *bp)
2241{
34f80b04
EG
2242 int vn;
2243
bb2a0f7a
YG
2244 /* Make sure that we are synced with the current statistics */
2245 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2246
c18487ee 2247 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2248
bb2a0f7a
YG
2249 if (bp->link_vars.link_up) {
2250
2251 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2252 struct host_port_stats *pstats;
2253
2254 pstats = bnx2x_sp(bp, port_stats);
2255 /* reset old bmac stats */
2256 memset(&(pstats->mac_stx[0]), 0,
2257 sizeof(struct mac_stx));
2258 }
2259 if ((bp->state == BNX2X_STATE_OPEN) ||
2260 (bp->state == BNX2X_STATE_DISABLED))
2261 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2262 }
2263
c18487ee
YR
2264 /* indicate link status */
2265 bnx2x_link_report(bp);
34f80b04
EG
2266
2267 if (IS_E1HMF(bp)) {
2268 int func;
2269
2270 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2271 if (vn == BP_E1HVN(bp))
2272 continue;
2273
2274 func = ((vn << 1) | BP_PORT(bp));
2275
2276 /* Set the attention towards other drivers
2277 on the same port */
2278 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2279 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2280 }
2281 }
2282
2283 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2284 struct cmng_struct_per_port m_cmng_port;
2285 u32 wsum;
2286 int port = BP_PORT(bp);
2287
2288 /* Init RATE SHAPING and FAIRNESS contexts */
2289 wsum = bnx2x_calc_vn_wsum(bp);
2290 bnx2x_init_port_minmax(bp, (int)wsum,
2291 bp->link_vars.line_speed,
2292 &m_cmng_port);
2293 if (IS_E1HMF(bp))
2294 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2295 bnx2x_init_vn_minmax(bp, 2*vn + port,
2296 wsum, bp->link_vars.line_speed,
2297 &m_cmng_port);
2298 }
c18487ee 2299}
a2fbb9ea 2300
c18487ee
YR
2301static void bnx2x__link_status_update(struct bnx2x *bp)
2302{
2303 if (bp->state != BNX2X_STATE_OPEN)
2304 return;
a2fbb9ea 2305
c18487ee 2306 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2307
bb2a0f7a
YG
2308 if (bp->link_vars.link_up)
2309 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2310 else
2311 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2312
c18487ee
YR
2313 /* indicate link status */
2314 bnx2x_link_report(bp);
a2fbb9ea 2315}
a2fbb9ea 2316
34f80b04
EG
2317static void bnx2x_pmf_update(struct bnx2x *bp)
2318{
2319 int port = BP_PORT(bp);
2320 u32 val;
2321
2322 bp->port.pmf = 1;
2323 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2324
2325 /* enable nig attention */
2326 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2327 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2328 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2329
2330 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2331}
2332
c18487ee 2333/* end of Link */
a2fbb9ea
ET
2334
2335/* slow path */
2336
2337/*
2338 * General service functions
2339 */
2340
2341/* the slow path queue is odd since completions arrive on the fastpath ring */
2342static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2343 u32 data_hi, u32 data_lo, int common)
2344{
34f80b04 2345 int func = BP_FUNC(bp);
a2fbb9ea 2346
34f80b04
EG
2347 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2348 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2349 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2350 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2351 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2352
2353#ifdef BNX2X_STOP_ON_ERROR
2354 if (unlikely(bp->panic))
2355 return -EIO;
2356#endif
2357
34f80b04 2358 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2359
2360 if (!bp->spq_left) {
2361 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2362 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2363 bnx2x_panic();
2364 return -EBUSY;
2365 }
f1410647 2366
a2fbb9ea
ET
2367 /* CID needs port number to be encoded int it */
2368 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2369 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2370 HW_CID(bp, cid)));
2371 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2372 if (common)
2373 bp->spq_prod_bd->hdr.type |=
2374 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2375
2376 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2377 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2378
2379 bp->spq_left--;
2380
2381 if (bp->spq_prod_bd == bp->spq_last_bd) {
2382 bp->spq_prod_bd = bp->spq;
2383 bp->spq_prod_idx = 0;
2384 DP(NETIF_MSG_TIMER, "end of spq\n");
2385
2386 } else {
2387 bp->spq_prod_bd++;
2388 bp->spq_prod_idx++;
2389 }
2390
34f80b04 2391 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2392 bp->spq_prod_idx);
2393
34f80b04 2394 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2395 return 0;
2396}
2397
2398/* acquire split MCP access lock register */
4a37fb66 2399static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2400{
a2fbb9ea 2401 u32 i, j, val;
34f80b04 2402 int rc = 0;
a2fbb9ea
ET
2403
2404 might_sleep();
2405 i = 100;
2406 for (j = 0; j < i*10; j++) {
2407 val = (1UL << 31);
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2410 if (val & (1L << 31))
2411 break;
2412
2413 msleep(5);
2414 }
a2fbb9ea 2415 if (!(val & (1L << 31))) {
19680c48 2416 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2417 rc = -EBUSY;
2418 }
2419
2420 return rc;
2421}
2422
4a37fb66
YG
2423/* release split MCP access lock register */
2424static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2425{
2426 u32 val = 0;
2427
2428 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2429}
2430
2431static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2432{
2433 struct host_def_status_block *def_sb = bp->def_status_blk;
2434 u16 rc = 0;
2435
2436 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2437 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2438 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2439 rc |= 1;
2440 }
2441 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2442 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2443 rc |= 2;
2444 }
2445 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2446 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2447 rc |= 4;
2448 }
2449 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2450 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2451 rc |= 8;
2452 }
2453 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2454 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2455 rc |= 16;
2456 }
2457 return rc;
2458}
2459
2460/*
2461 * slow path service functions
2462 */
2463
2464static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2465{
34f80b04 2466 int port = BP_PORT(bp);
5c862848
EG
2467 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2468 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2469 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2470 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2471 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2472 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2473 u32 aeu_mask;
a2fbb9ea 2474
a2fbb9ea
ET
2475 if (bp->attn_state & asserted)
2476 BNX2X_ERR("IGU ERROR\n");
2477
3fcaf2e5
EG
2478 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2479 aeu_mask = REG_RD(bp, aeu_addr);
2480
a2fbb9ea 2481 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2482 aeu_mask, asserted);
2483 aeu_mask &= ~(asserted & 0xff);
2484 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2485
3fcaf2e5
EG
2486 REG_WR(bp, aeu_addr, aeu_mask);
2487 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2488
3fcaf2e5 2489 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2490 bp->attn_state |= asserted;
3fcaf2e5 2491 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2492
2493 if (asserted & ATTN_HARD_WIRED_MASK) {
2494 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2495
a5e9a7cf
EG
2496 bnx2x_acquire_phy_lock(bp);
2497
877e9aa4
ET
2498 /* save nig interrupt mask */
2499 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2500 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2501
c18487ee 2502 bnx2x_link_attn(bp);
a2fbb9ea
ET
2503
2504 /* handle unicore attn? */
2505 }
2506 if (asserted & ATTN_SW_TIMER_4_FUNC)
2507 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2508
2509 if (asserted & GPIO_2_FUNC)
2510 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2511
2512 if (asserted & GPIO_3_FUNC)
2513 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2514
2515 if (asserted & GPIO_4_FUNC)
2516 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2517
2518 if (port == 0) {
2519 if (asserted & ATTN_GENERAL_ATTN_1) {
2520 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2521 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2522 }
2523 if (asserted & ATTN_GENERAL_ATTN_2) {
2524 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2525 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2526 }
2527 if (asserted & ATTN_GENERAL_ATTN_3) {
2528 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2529 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2530 }
2531 } else {
2532 if (asserted & ATTN_GENERAL_ATTN_4) {
2533 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2534 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2535 }
2536 if (asserted & ATTN_GENERAL_ATTN_5) {
2537 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2538 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2539 }
2540 if (asserted & ATTN_GENERAL_ATTN_6) {
2541 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2542 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2543 }
2544 }
2545
2546 } /* if hardwired */
2547
5c862848
EG
2548 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2549 asserted, hc_addr);
2550 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2551
2552 /* now set back the mask */
a5e9a7cf 2553 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2554 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2555 bnx2x_release_phy_lock(bp);
2556 }
a2fbb9ea
ET
2557}
2558
877e9aa4 2559static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2560{
34f80b04 2561 int port = BP_PORT(bp);
877e9aa4
ET
2562 int reg_offset;
2563 u32 val;
2564
34f80b04
EG
2565 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2566 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2567
34f80b04 2568 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2569
2570 val = REG_RD(bp, reg_offset);
2571 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2572 REG_WR(bp, reg_offset, val);
2573
2574 BNX2X_ERR("SPIO5 hw attention\n");
2575
34f80b04 2576 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2577 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2578 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2579 /* Fan failure attention */
2580
17de50b7 2581 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2582 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2583 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2584 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2585 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2586 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2587 /* mark the failure */
c18487ee 2588 bp->link_params.ext_phy_config &=
877e9aa4 2589 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2590 bp->link_params.ext_phy_config |=
877e9aa4
ET
2591 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2592 SHMEM_WR(bp,
2593 dev_info.port_hw_config[port].
2594 external_phy_config,
c18487ee 2595 bp->link_params.ext_phy_config);
877e9aa4
ET
2596 /* log the failure */
2597 printk(KERN_ERR PFX "Fan Failure on Network"
2598 " Controller %s has caused the driver to"
2599 " shutdown the card to prevent permanent"
2600 " damage. Please contact Dell Support for"
2601 " assistance\n", bp->dev->name);
2602 break;
2603
2604 default:
2605 break;
2606 }
2607 }
34f80b04
EG
2608
2609 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2610
2611 val = REG_RD(bp, reg_offset);
2612 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2613 REG_WR(bp, reg_offset, val);
2614
2615 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2616 (attn & HW_INTERRUT_ASSERT_SET_0));
2617 bnx2x_panic();
2618 }
877e9aa4
ET
2619}
2620
2621static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2622{
2623 u32 val;
2624
2625 if (attn & BNX2X_DOORQ_ASSERT) {
2626
2627 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2628 BNX2X_ERR("DB hw attention 0x%x\n", val);
2629 /* DORQ discard attention */
2630 if (val & 0x2)
2631 BNX2X_ERR("FATAL error from DORQ\n");
2632 }
34f80b04
EG
2633
2634 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2635
2636 int port = BP_PORT(bp);
2637 int reg_offset;
2638
2639 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2640 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2641
2642 val = REG_RD(bp, reg_offset);
2643 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2644 REG_WR(bp, reg_offset, val);
2645
2646 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2647 (attn & HW_INTERRUT_ASSERT_SET_1));
2648 bnx2x_panic();
2649 }
877e9aa4
ET
2650}
2651
2652static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2653{
2654 u32 val;
2655
2656 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2657
2658 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2659 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2660 /* CFC error attention */
2661 if (val & 0x2)
2662 BNX2X_ERR("FATAL error from CFC\n");
2663 }
2664
2665 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2666
2667 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2668 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2669 /* RQ_USDMDP_FIFO_OVERFLOW */
2670 if (val & 0x18000)
2671 BNX2X_ERR("FATAL error from PXP\n");
2672 }
34f80b04
EG
2673
2674 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2675
2676 int port = BP_PORT(bp);
2677 int reg_offset;
2678
2679 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2680 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2681
2682 val = REG_RD(bp, reg_offset);
2683 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2684 REG_WR(bp, reg_offset, val);
2685
2686 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2687 (attn & HW_INTERRUT_ASSERT_SET_2));
2688 bnx2x_panic();
2689 }
877e9aa4
ET
2690}
2691
2692static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2693{
34f80b04
EG
2694 u32 val;
2695
877e9aa4
ET
2696 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2697
34f80b04
EG
2698 if (attn & BNX2X_PMF_LINK_ASSERT) {
2699 int func = BP_FUNC(bp);
2700
2701 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2702 bnx2x__link_status_update(bp);
2703 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2704 DRV_STATUS_PMF)
2705 bnx2x_pmf_update(bp);
2706
2707 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2708
2709 BNX2X_ERR("MC assert!\n");
2710 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2711 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2712 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2713 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2714 bnx2x_panic();
2715
2716 } else if (attn & BNX2X_MCP_ASSERT) {
2717
2718 BNX2X_ERR("MCP assert!\n");
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2720 bnx2x_fw_dump(bp);
877e9aa4
ET
2721
2722 } else
2723 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2724 }
2725
2726 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2727 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2728 if (attn & BNX2X_GRC_TIMEOUT) {
2729 val = CHIP_IS_E1H(bp) ?
2730 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2731 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2732 }
2733 if (attn & BNX2X_GRC_RSV) {
2734 val = CHIP_IS_E1H(bp) ?
2735 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2736 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2737 }
877e9aa4 2738 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2739 }
2740}
2741
2742static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2743{
a2fbb9ea
ET
2744 struct attn_route attn;
2745 struct attn_route group_mask;
34f80b04 2746 int port = BP_PORT(bp);
877e9aa4 2747 int index;
a2fbb9ea
ET
2748 u32 reg_addr;
2749 u32 val;
3fcaf2e5 2750 u32 aeu_mask;
a2fbb9ea
ET
2751
2752 /* need to take HW lock because MCP or other port might also
2753 try to handle this event */
4a37fb66 2754 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2755
2756 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2757 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2758 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2759 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2760 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2761 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2762
2763 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2764 if (deasserted & (1 << index)) {
2765 group_mask = bp->attn_group[index];
2766
34f80b04
EG
2767 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2768 index, group_mask.sig[0], group_mask.sig[1],
2769 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2770
877e9aa4
ET
2771 bnx2x_attn_int_deasserted3(bp,
2772 attn.sig[3] & group_mask.sig[3]);
2773 bnx2x_attn_int_deasserted1(bp,
2774 attn.sig[1] & group_mask.sig[1]);
2775 bnx2x_attn_int_deasserted2(bp,
2776 attn.sig[2] & group_mask.sig[2]);
2777 bnx2x_attn_int_deasserted0(bp,
2778 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2779
a2fbb9ea
ET
2780 if ((attn.sig[0] & group_mask.sig[0] &
2781 HW_PRTY_ASSERT_SET_0) ||
2782 (attn.sig[1] & group_mask.sig[1] &
2783 HW_PRTY_ASSERT_SET_1) ||
2784 (attn.sig[2] & group_mask.sig[2] &
2785 HW_PRTY_ASSERT_SET_2))
6378c025 2786 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2787 }
2788 }
2789
4a37fb66 2790 bnx2x_release_alr(bp);
a2fbb9ea 2791
5c862848 2792 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2793
2794 val = ~deasserted;
3fcaf2e5
EG
2795 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2796 val, reg_addr);
5c862848 2797 REG_WR(bp, reg_addr, val);
a2fbb9ea 2798
a2fbb9ea 2799 if (~bp->attn_state & deasserted)
3fcaf2e5 2800 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2801
2802 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2803 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2804
3fcaf2e5
EG
2805 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2806 aeu_mask = REG_RD(bp, reg_addr);
2807
2808 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2809 aeu_mask, deasserted);
2810 aeu_mask |= (deasserted & 0xff);
2811 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2812
3fcaf2e5
EG
2813 REG_WR(bp, reg_addr, aeu_mask);
2814 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2815
2816 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2817 bp->attn_state &= ~deasserted;
2818 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2819}
2820
2821static void bnx2x_attn_int(struct bnx2x *bp)
2822{
2823 /* read local copy of bits */
68d59484
EG
2824 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2825 attn_bits);
2826 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2827 attn_bits_ack);
a2fbb9ea
ET
2828 u32 attn_state = bp->attn_state;
2829
2830 /* look for changed bits */
2831 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2832 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2833
2834 DP(NETIF_MSG_HW,
2835 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2836 attn_bits, attn_ack, asserted, deasserted);
2837
2838 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2839 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2840
2841 /* handle bits that were raised */
2842 if (asserted)
2843 bnx2x_attn_int_asserted(bp, asserted);
2844
2845 if (deasserted)
2846 bnx2x_attn_int_deasserted(bp, deasserted);
2847}
2848
2849static void bnx2x_sp_task(struct work_struct *work)
2850{
1cf167f2 2851 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2852 u16 status;
2853
34f80b04 2854
a2fbb9ea
ET
2855 /* Return here if interrupt is disabled */
2856 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2857 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2858 return;
2859 }
2860
2861 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2862/* if (status == 0) */
2863/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2864
3196a88a 2865 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2866
877e9aa4
ET
2867 /* HW attentions */
2868 if (status & 0x1)
a2fbb9ea 2869 bnx2x_attn_int(bp);
a2fbb9ea 2870
bb2a0f7a
YG
2871 /* CStorm events: query_stats, port delete ramrod */
2872 if (status & 0x2)
2873 bp->stats_pending = 0;
2874
68d59484 2875 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2876 IGU_INT_NOP, 1);
2877 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2878 IGU_INT_NOP, 1);
2879 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2880 IGU_INT_NOP, 1);
2881 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2882 IGU_INT_NOP, 1);
2883 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2884 IGU_INT_ENABLE, 1);
877e9aa4 2885
a2fbb9ea
ET
2886}
2887
2888static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2889{
2890 struct net_device *dev = dev_instance;
2891 struct bnx2x *bp = netdev_priv(dev);
2892
2893 /* Return here if interrupt is disabled */
2894 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2895 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2896 return IRQ_HANDLED;
2897 }
2898
877e9aa4 2899 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2900
2901#ifdef BNX2X_STOP_ON_ERROR
2902 if (unlikely(bp->panic))
2903 return IRQ_HANDLED;
2904#endif
2905
1cf167f2 2906 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2907
2908 return IRQ_HANDLED;
2909}
2910
2911/* end of slow path */
2912
2913/* Statistics */
2914
2915/****************************************************************************
2916* Macros
2917****************************************************************************/
2918
a2fbb9ea
ET
2919/* sum[hi:lo] += add[hi:lo] */
2920#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2921 do { \
2922 s_lo += a_lo; \
f5ba6772 2923 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2924 } while (0)
2925
2926/* difference = minuend - subtrahend */
2927#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2928 do { \
bb2a0f7a
YG
2929 if (m_lo < s_lo) { \
2930 /* underflow */ \
a2fbb9ea 2931 d_hi = m_hi - s_hi; \
bb2a0f7a 2932 if (d_hi > 0) { \
6378c025 2933 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2934 d_hi--; \
2935 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2936 } else { \
6378c025 2937 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2938 d_hi = 0; \
2939 d_lo = 0; \
2940 } \
bb2a0f7a
YG
2941 } else { \
2942 /* m_lo >= s_lo */ \
a2fbb9ea 2943 if (m_hi < s_hi) { \
bb2a0f7a
YG
2944 d_hi = 0; \
2945 d_lo = 0; \
2946 } else { \
6378c025 2947 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2948 d_hi = m_hi - s_hi; \
2949 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2950 } \
2951 } \
2952 } while (0)
2953
bb2a0f7a 2954#define UPDATE_STAT64(s, t) \
a2fbb9ea 2955 do { \
bb2a0f7a
YG
2956 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2957 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2958 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2959 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2960 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2961 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2962 } while (0)
2963
bb2a0f7a 2964#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2965 do { \
bb2a0f7a
YG
2966 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2967 diff.lo, new->s##_lo, old->s##_lo); \
2968 ADD_64(estats->t##_hi, diff.hi, \
2969 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2970 } while (0)
2971
2972/* sum[hi:lo] += add */
2973#define ADD_EXTEND_64(s_hi, s_lo, a) \
2974 do { \
2975 s_lo += a; \
2976 s_hi += (s_lo < a) ? 1 : 0; \
2977 } while (0)
2978
bb2a0f7a 2979#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2980 do { \
bb2a0f7a
YG
2981 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2982 pstats->mac_stx[1].s##_lo, \
2983 new->s); \
a2fbb9ea
ET
2984 } while (0)
2985
bb2a0f7a 2986#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2987 do { \
2988 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2989 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2990 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2991 } while (0)
2992
2993#define UPDATE_EXTEND_XSTAT(s, t) \
2994 do { \
2995 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2996 old_xclient->s = le32_to_cpu(xclient->s); \
2997 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2998 } while (0)
2999
3000/*
3001 * General service functions
3002 */
3003
3004static inline long bnx2x_hilo(u32 *hiref)
3005{
3006 u32 lo = *(hiref + 1);
3007#if (BITS_PER_LONG == 64)
3008 u32 hi = *hiref;
3009
3010 return HILO_U64(hi, lo);
3011#else
3012 return lo;
3013#endif
3014}
3015
3016/*
3017 * Init service functions
3018 */
3019
bb2a0f7a
YG
3020static void bnx2x_storm_stats_post(struct bnx2x *bp)
3021{
3022 if (!bp->stats_pending) {
3023 struct eth_query_ramrod_data ramrod_data = {0};
3024 int rc;
3025
3026 ramrod_data.drv_counter = bp->stats_counter++;
3027 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3028 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3029
3030 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3031 ((u32 *)&ramrod_data)[1],
3032 ((u32 *)&ramrod_data)[0], 0);
3033 if (rc == 0) {
3034 /* stats ramrod has it's own slot on the spq */
3035 bp->spq_left++;
3036 bp->stats_pending = 1;
3037 }
3038 }
3039}
3040
3041static void bnx2x_stats_init(struct bnx2x *bp)
3042{
3043 int port = BP_PORT(bp);
3044
3045 bp->executer_idx = 0;
3046 bp->stats_counter = 0;
3047
3048 /* port stats */
3049 if (!BP_NOMCP(bp))
3050 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3051 else
3052 bp->port.port_stx = 0;
3053 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3054
3055 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3056 bp->port.old_nig_stats.brb_discard =
3057 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3058 bp->port.old_nig_stats.brb_truncate =
3059 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3060 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3061 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3062 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3063 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3064
3065 /* function stats */
3066 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3067 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3068 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3069 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3070
3071 bp->stats_state = STATS_STATE_DISABLED;
3072 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3073 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3074}
3075
3076static void bnx2x_hw_stats_post(struct bnx2x *bp)
3077{
3078 struct dmae_command *dmae = &bp->stats_dmae;
3079 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3080
3081 *stats_comp = DMAE_COMP_VAL;
3082
3083 /* loader */
3084 if (bp->executer_idx) {
3085 int loader_idx = PMF_DMAE_C(bp);
3086
3087 memset(dmae, 0, sizeof(struct dmae_command));
3088
3089 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3090 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3091 DMAE_CMD_DST_RESET |
3092#ifdef __BIG_ENDIAN
3093 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3094#else
3095 DMAE_CMD_ENDIANITY_DW_SWAP |
3096#endif
3097 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3098 DMAE_CMD_PORT_0) |
3099 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3100 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3101 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3102 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3103 sizeof(struct dmae_command) *
3104 (loader_idx + 1)) >> 2;
3105 dmae->dst_addr_hi = 0;
3106 dmae->len = sizeof(struct dmae_command) >> 2;
3107 if (CHIP_IS_E1(bp))
3108 dmae->len--;
3109 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3110 dmae->comp_addr_hi = 0;
3111 dmae->comp_val = 1;
3112
3113 *stats_comp = 0;
3114 bnx2x_post_dmae(bp, dmae, loader_idx);
3115
3116 } else if (bp->func_stx) {
3117 *stats_comp = 0;
3118 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3119 }
3120}
3121
3122static int bnx2x_stats_comp(struct bnx2x *bp)
3123{
3124 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3125 int cnt = 10;
3126
3127 might_sleep();
3128 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3129 if (!cnt) {
3130 BNX2X_ERR("timeout waiting for stats finished\n");
3131 break;
3132 }
3133 cnt--;
12469401 3134 msleep(1);
bb2a0f7a
YG
3135 }
3136 return 1;
3137}
3138
3139/*
3140 * Statistics service functions
3141 */
3142
3143static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3144{
3145 struct dmae_command *dmae;
3146 u32 opcode;
3147 int loader_idx = PMF_DMAE_C(bp);
3148 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3149
3150 /* sanity */
3151 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3152 BNX2X_ERR("BUG!\n");
3153 return;
3154 }
3155
3156 bp->executer_idx = 0;
3157
3158 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3159 DMAE_CMD_C_ENABLE |
3160 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3161#ifdef __BIG_ENDIAN
3162 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3163#else
3164 DMAE_CMD_ENDIANITY_DW_SWAP |
3165#endif
3166 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3167 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3168
3169 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3170 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3171 dmae->src_addr_lo = bp->port.port_stx >> 2;
3172 dmae->src_addr_hi = 0;
3173 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3174 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3175 dmae->len = DMAE_LEN32_RD_MAX;
3176 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3177 dmae->comp_addr_hi = 0;
3178 dmae->comp_val = 1;
3179
3180 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3182 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3183 dmae->src_addr_hi = 0;
7a9b2557
VZ
3184 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3185 DMAE_LEN32_RD_MAX * 4);
3186 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3187 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3188 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3189 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3190 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3191 dmae->comp_val = DMAE_COMP_VAL;
3192
3193 *stats_comp = 0;
3194 bnx2x_hw_stats_post(bp);
3195 bnx2x_stats_comp(bp);
3196}
3197
3198static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3199{
3200 struct dmae_command *dmae;
34f80b04 3201 int port = BP_PORT(bp);
bb2a0f7a 3202 int vn = BP_E1HVN(bp);
a2fbb9ea 3203 u32 opcode;
bb2a0f7a 3204 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3205 u32 mac_addr;
bb2a0f7a
YG
3206 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3207
3208 /* sanity */
3209 if (!bp->link_vars.link_up || !bp->port.pmf) {
3210 BNX2X_ERR("BUG!\n");
3211 return;
3212 }
a2fbb9ea
ET
3213
3214 bp->executer_idx = 0;
bb2a0f7a
YG
3215
3216 /* MCP */
3217 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3218 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3219 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3220#ifdef __BIG_ENDIAN
bb2a0f7a 3221 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3222#else
bb2a0f7a 3223 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3224#endif
bb2a0f7a
YG
3225 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3226 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3227
bb2a0f7a 3228 if (bp->port.port_stx) {
a2fbb9ea
ET
3229
3230 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231 dmae->opcode = opcode;
bb2a0f7a
YG
3232 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3233 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3234 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3235 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3236 dmae->len = sizeof(struct host_port_stats) >> 2;
3237 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3238 dmae->comp_addr_hi = 0;
3239 dmae->comp_val = 1;
a2fbb9ea
ET
3240 }
3241
bb2a0f7a
YG
3242 if (bp->func_stx) {
3243
3244 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3245 dmae->opcode = opcode;
3246 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3247 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3248 dmae->dst_addr_lo = bp->func_stx >> 2;
3249 dmae->dst_addr_hi = 0;
3250 dmae->len = sizeof(struct host_func_stats) >> 2;
3251 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252 dmae->comp_addr_hi = 0;
3253 dmae->comp_val = 1;
a2fbb9ea
ET
3254 }
3255
bb2a0f7a 3256 /* MAC */
a2fbb9ea
ET
3257 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3258 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3259 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3260#ifdef __BIG_ENDIAN
3261 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3262#else
3263 DMAE_CMD_ENDIANITY_DW_SWAP |
3264#endif
bb2a0f7a
YG
3265 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3266 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3267
c18487ee 3268 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3269
3270 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3271 NIG_REG_INGRESS_BMAC0_MEM);
3272
3273 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3274 BIGMAC_REGISTER_TX_STAT_GTBYT */
3275 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3276 dmae->opcode = opcode;
3277 dmae->src_addr_lo = (mac_addr +
3278 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3279 dmae->src_addr_hi = 0;
3280 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3281 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3282 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3283 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3284 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285 dmae->comp_addr_hi = 0;
3286 dmae->comp_val = 1;
3287
3288 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3289 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3290 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3291 dmae->opcode = opcode;
3292 dmae->src_addr_lo = (mac_addr +
3293 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3294 dmae->src_addr_hi = 0;
3295 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3296 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3298 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3299 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3300 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302 dmae->comp_addr_hi = 0;
3303 dmae->comp_val = 1;
3304
c18487ee 3305 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3306
3307 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3308
3309 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3310 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3311 dmae->opcode = opcode;
3312 dmae->src_addr_lo = (mac_addr +
3313 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3314 dmae->src_addr_hi = 0;
3315 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3316 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3317 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3318 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3319 dmae->comp_addr_hi = 0;
3320 dmae->comp_val = 1;
3321
3322 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3323 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324 dmae->opcode = opcode;
3325 dmae->src_addr_lo = (mac_addr +
3326 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3327 dmae->src_addr_hi = 0;
3328 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3329 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3330 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3331 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3332 dmae->len = 1;
3333 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334 dmae->comp_addr_hi = 0;
3335 dmae->comp_val = 1;
3336
3337 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3338 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339 dmae->opcode = opcode;
3340 dmae->src_addr_lo = (mac_addr +
3341 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3342 dmae->src_addr_hi = 0;
3343 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3344 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3346 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3347 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3348 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3349 dmae->comp_addr_hi = 0;
3350 dmae->comp_val = 1;
3351 }
3352
3353 /* NIG */
bb2a0f7a
YG
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = opcode;
3356 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3357 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3358 dmae->src_addr_hi = 0;
3359 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3360 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3361 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3364 dmae->comp_val = 1;
3365
3366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367 dmae->opcode = opcode;
3368 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3369 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3370 dmae->src_addr_hi = 0;
3371 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3372 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3373 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3374 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3375 dmae->len = (2*sizeof(u32)) >> 2;
3376 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377 dmae->comp_addr_hi = 0;
3378 dmae->comp_val = 1;
3379
a2fbb9ea
ET
3380 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3381 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3382 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3383 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3384#ifdef __BIG_ENDIAN
3385 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3386#else
3387 DMAE_CMD_ENDIANITY_DW_SWAP |
3388#endif
bb2a0f7a
YG
3389 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3390 (vn << DMAE_CMD_E1HVN_SHIFT));
3391 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3392 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3393 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3394 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3395 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3396 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3397 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3398 dmae->len = (2*sizeof(u32)) >> 2;
3399 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3400 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3401 dmae->comp_val = DMAE_COMP_VAL;
3402
3403 *stats_comp = 0;
a2fbb9ea
ET
3404}
3405
bb2a0f7a 3406static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3407{
bb2a0f7a
YG
3408 struct dmae_command *dmae = &bp->stats_dmae;
3409 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3410
bb2a0f7a
YG
3411 /* sanity */
3412 if (!bp->func_stx) {
3413 BNX2X_ERR("BUG!\n");
3414 return;
3415 }
a2fbb9ea 3416
bb2a0f7a
YG
3417 bp->executer_idx = 0;
3418 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3419
bb2a0f7a
YG
3420 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3421 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3422 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3423#ifdef __BIG_ENDIAN
3424 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3425#else
3426 DMAE_CMD_ENDIANITY_DW_SWAP |
3427#endif
3428 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3429 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3430 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3431 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3432 dmae->dst_addr_lo = bp->func_stx >> 2;
3433 dmae->dst_addr_hi = 0;
3434 dmae->len = sizeof(struct host_func_stats) >> 2;
3435 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3436 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3437 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3438
bb2a0f7a
YG
3439 *stats_comp = 0;
3440}
a2fbb9ea 3441
bb2a0f7a
YG
3442static void bnx2x_stats_start(struct bnx2x *bp)
3443{
3444 if (bp->port.pmf)
3445 bnx2x_port_stats_init(bp);
3446
3447 else if (bp->func_stx)
3448 bnx2x_func_stats_init(bp);
3449
3450 bnx2x_hw_stats_post(bp);
3451 bnx2x_storm_stats_post(bp);
3452}
3453
3454static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3455{
3456 bnx2x_stats_comp(bp);
3457 bnx2x_stats_pmf_update(bp);
3458 bnx2x_stats_start(bp);
3459}
3460
3461static void bnx2x_stats_restart(struct bnx2x *bp)
3462{
3463 bnx2x_stats_comp(bp);
3464 bnx2x_stats_start(bp);
3465}
3466
3467static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3468{
3469 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3470 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3471 struct regpair diff;
3472
3473 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3474 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3475 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3476 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3477 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3478 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3479 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3480 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3481 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3482 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3483 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3484 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3485 UPDATE_STAT64(tx_stat_gt127,
3486 tx_stat_etherstatspkts65octetsto127octets);
3487 UPDATE_STAT64(tx_stat_gt255,
3488 tx_stat_etherstatspkts128octetsto255octets);
3489 UPDATE_STAT64(tx_stat_gt511,
3490 tx_stat_etherstatspkts256octetsto511octets);
3491 UPDATE_STAT64(tx_stat_gt1023,
3492 tx_stat_etherstatspkts512octetsto1023octets);
3493 UPDATE_STAT64(tx_stat_gt1518,
3494 tx_stat_etherstatspkts1024octetsto1522octets);
3495 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3496 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3497 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3498 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3499 UPDATE_STAT64(tx_stat_gterr,
3500 tx_stat_dot3statsinternalmactransmiterrors);
3501 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3502}
3503
3504static void bnx2x_emac_stats_update(struct bnx2x *bp)
3505{
3506 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3507 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3508
3509 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3510 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3511 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3512 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3513 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3514 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3515 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3516 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3517 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3518 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3519 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3520 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3521 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3522 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3523 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3524 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3525 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3526 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3527 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3528 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3529 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3530 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3531 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3532 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3533 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3534 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3535 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3536 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3537 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3538 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3539 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3540}
3541
3542static int bnx2x_hw_stats_update(struct bnx2x *bp)
3543{
3544 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3545 struct nig_stats *old = &(bp->port.old_nig_stats);
3546 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3547 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3548 struct regpair diff;
3549
3550 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3551 bnx2x_bmac_stats_update(bp);
3552
3553 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3554 bnx2x_emac_stats_update(bp);
3555
3556 else { /* unreached */
3557 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3558 return -1;
3559 }
a2fbb9ea 3560
bb2a0f7a
YG
3561 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3562 new->brb_discard - old->brb_discard);
66e855f3
YG
3563 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3564 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3565
bb2a0f7a
YG
3566 UPDATE_STAT64_NIG(egress_mac_pkt0,
3567 etherstatspkts1024octetsto1522octets);
3568 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3569
bb2a0f7a 3570 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3571
bb2a0f7a
YG
3572 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3573 sizeof(struct mac_stx));
3574 estats->brb_drop_hi = pstats->brb_drop_hi;
3575 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3576
bb2a0f7a 3577 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3578
bb2a0f7a 3579 return 0;
a2fbb9ea
ET
3580}
3581
bb2a0f7a 3582static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3583{
3584 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3585 int cl_id = BP_CL_ID(bp);
3586 struct tstorm_per_port_stats *tport =
3587 &stats->tstorm_common.port_statistics;
a2fbb9ea 3588 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3589 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3590 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3591 struct xstorm_per_client_stats *xclient =
3592 &stats->xstorm_common.client_statistics[cl_id];
3593 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3594 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3595 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3596 u32 diff;
3597
bb2a0f7a
YG
3598 /* are storm stats valid? */
3599 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3600 bp->stats_counter) {
3601 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3602 " tstorm counter (%d) != stats_counter (%d)\n",
3603 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3604 return -1;
3605 }
bb2a0f7a
YG
3606 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3607 bp->stats_counter) {
3608 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3609 " xstorm counter (%d) != stats_counter (%d)\n",
3610 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3611 return -2;
3612 }
a2fbb9ea 3613
bb2a0f7a
YG
3614 fstats->total_bytes_received_hi =
3615 fstats->valid_bytes_received_hi =
a2fbb9ea 3616 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3617 fstats->total_bytes_received_lo =
3618 fstats->valid_bytes_received_lo =
a2fbb9ea 3619 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3620
3621 estats->error_bytes_received_hi =
3622 le32_to_cpu(tclient->rcv_error_bytes.hi);
3623 estats->error_bytes_received_lo =
3624 le32_to_cpu(tclient->rcv_error_bytes.lo);
3625 ADD_64(estats->error_bytes_received_hi,
3626 estats->rx_stat_ifhcinbadoctets_hi,
3627 estats->error_bytes_received_lo,
3628 estats->rx_stat_ifhcinbadoctets_lo);
3629
3630 ADD_64(fstats->total_bytes_received_hi,
3631 estats->error_bytes_received_hi,
3632 fstats->total_bytes_received_lo,
3633 estats->error_bytes_received_lo);
3634
3635 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3636 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3637 total_multicast_packets_received);
a2fbb9ea 3638 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3639 total_broadcast_packets_received);
3640
3641 fstats->total_bytes_transmitted_hi =
3642 le32_to_cpu(xclient->total_sent_bytes.hi);
3643 fstats->total_bytes_transmitted_lo =
3644 le32_to_cpu(xclient->total_sent_bytes.lo);
3645
3646 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3647 total_unicast_packets_transmitted);
3648 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3649 total_multicast_packets_transmitted);
3650 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3651 total_broadcast_packets_transmitted);
3652
3653 memcpy(estats, &(fstats->total_bytes_received_hi),
3654 sizeof(struct host_func_stats) - 2*sizeof(u32));
3655
3656 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3657 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3658 estats->brb_truncate_discard =
3659 le32_to_cpu(tport->brb_truncate_discard);
3660 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3661
3662 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3663 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3664 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3665 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3666 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3667 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3668 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3669 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3670 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3671 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3672 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3673 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3674 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3675
bb2a0f7a
YG
3676 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3677 old_tclient->packets_too_big_discard =
a2fbb9ea 3678 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3679 estats->no_buff_discard =
3680 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3681 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3682
3683 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3684 old_xclient->unicast_bytes_sent.hi =
3685 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3686 old_xclient->unicast_bytes_sent.lo =
3687 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3688 old_xclient->multicast_bytes_sent.hi =
3689 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3690 old_xclient->multicast_bytes_sent.lo =
3691 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3692 old_xclient->broadcast_bytes_sent.hi =
3693 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3694 old_xclient->broadcast_bytes_sent.lo =
3695 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3696
3697 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3698
3699 return 0;
3700}
3701
bb2a0f7a 3702static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3703{
bb2a0f7a
YG
3704 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3705 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3706 struct net_device_stats *nstats = &bp->dev->stats;
3707
3708 nstats->rx_packets =
3709 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3710 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3711 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3712
3713 nstats->tx_packets =
3714 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3715 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3716 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3717
bb2a0f7a 3718 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3719
0e39e645 3720 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3721
bb2a0f7a
YG
3722 nstats->rx_dropped = old_tclient->checksum_discard +
3723 estats->mac_discard;
a2fbb9ea
ET
3724 nstats->tx_dropped = 0;
3725
3726 nstats->multicast =
3727 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3728
bb2a0f7a
YG
3729 nstats->collisions =
3730 estats->tx_stat_dot3statssinglecollisionframes_lo +
3731 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3732 estats->tx_stat_dot3statslatecollisions_lo +
3733 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3734
bb2a0f7a
YG
3735 estats->jabber_packets_received =
3736 old_tclient->packets_too_big_discard +
3737 estats->rx_stat_dot3statsframestoolong_lo;
3738
3739 nstats->rx_length_errors =
3740 estats->rx_stat_etherstatsundersizepkts_lo +
3741 estats->jabber_packets_received;
66e855f3 3742 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3743 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3744 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3745 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3746 nstats->rx_missed_errors = estats->xxoverflow_discard;
3747
3748 nstats->rx_errors = nstats->rx_length_errors +
3749 nstats->rx_over_errors +
3750 nstats->rx_crc_errors +
3751 nstats->rx_frame_errors +
0e39e645
ET
3752 nstats->rx_fifo_errors +
3753 nstats->rx_missed_errors;
a2fbb9ea 3754
bb2a0f7a
YG
3755 nstats->tx_aborted_errors =
3756 estats->tx_stat_dot3statslatecollisions_lo +
3757 estats->tx_stat_dot3statsexcessivecollisions_lo;
3758 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3759 nstats->tx_fifo_errors = 0;
3760 nstats->tx_heartbeat_errors = 0;
3761 nstats->tx_window_errors = 0;
3762
3763 nstats->tx_errors = nstats->tx_aborted_errors +
3764 nstats->tx_carrier_errors;
a2fbb9ea
ET
3765}
3766
bb2a0f7a 3767static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3768{
bb2a0f7a
YG
3769 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3770 int update = 0;
a2fbb9ea 3771
bb2a0f7a
YG
3772 if (*stats_comp != DMAE_COMP_VAL)
3773 return;
3774
3775 if (bp->port.pmf)
3776 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3777
bb2a0f7a 3778 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3779
bb2a0f7a
YG
3780 if (update)
3781 bnx2x_net_stats_update(bp);
a2fbb9ea 3782
bb2a0f7a
YG
3783 else {
3784 if (bp->stats_pending) {
3785 bp->stats_pending++;
3786 if (bp->stats_pending == 3) {
3787 BNX2X_ERR("stats not updated for 3 times\n");
3788 bnx2x_panic();
3789 return;
3790 }
3791 }
a2fbb9ea
ET
3792 }
3793
3794 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3795 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3796 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3797 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3798 int i;
a2fbb9ea
ET
3799
3800 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3801 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3802 " tx pkt (%lx)\n",
3803 bnx2x_tx_avail(bp->fp),
7a9b2557 3804 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3805 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3806 " rx pkt (%lx)\n",
7a9b2557
VZ
3807 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3808 bp->fp->rx_comp_cons),
3809 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3810 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3811 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3812 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3813 printk(KERN_DEBUG "tstats: checksum_discard %u "
3814 "packets_too_big_discard %u no_buff_discard %u "
3815 "mac_discard %u mac_filter_discard %u "
3816 "xxovrflow_discard %u brb_truncate_discard %u "
3817 "ttl0_discard %u\n",
bb2a0f7a
YG
3818 old_tclient->checksum_discard,
3819 old_tclient->packets_too_big_discard,
3820 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3821 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3822 estats->brb_truncate_discard,
3823 old_tclient->ttl0_discard);
a2fbb9ea
ET
3824
3825 for_each_queue(bp, i) {
3826 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3827 bnx2x_fp(bp, i, tx_pkt),
3828 bnx2x_fp(bp, i, rx_pkt),
3829 bnx2x_fp(bp, i, rx_calls));
3830 }
3831 }
3832
bb2a0f7a
YG
3833 bnx2x_hw_stats_post(bp);
3834 bnx2x_storm_stats_post(bp);
3835}
a2fbb9ea 3836
bb2a0f7a
YG
3837static void bnx2x_port_stats_stop(struct bnx2x *bp)
3838{
3839 struct dmae_command *dmae;
3840 u32 opcode;
3841 int loader_idx = PMF_DMAE_C(bp);
3842 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3843
bb2a0f7a 3844 bp->executer_idx = 0;
a2fbb9ea 3845
bb2a0f7a
YG
3846 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3847 DMAE_CMD_C_ENABLE |
3848 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3849#ifdef __BIG_ENDIAN
bb2a0f7a 3850 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3851#else
bb2a0f7a 3852 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3853#endif
bb2a0f7a
YG
3854 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3855 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3856
3857 if (bp->port.port_stx) {
3858
3859 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860 if (bp->func_stx)
3861 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3862 else
3863 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3864 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3865 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3866 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3867 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3868 dmae->len = sizeof(struct host_port_stats) >> 2;
3869 if (bp->func_stx) {
3870 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3871 dmae->comp_addr_hi = 0;
3872 dmae->comp_val = 1;
3873 } else {
3874 dmae->comp_addr_lo =
3875 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3876 dmae->comp_addr_hi =
3877 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3878 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3879
bb2a0f7a
YG
3880 *stats_comp = 0;
3881 }
a2fbb9ea
ET
3882 }
3883
bb2a0f7a
YG
3884 if (bp->func_stx) {
3885
3886 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3887 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3888 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3889 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3890 dmae->dst_addr_lo = bp->func_stx >> 2;
3891 dmae->dst_addr_hi = 0;
3892 dmae->len = sizeof(struct host_func_stats) >> 2;
3893 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3895 dmae->comp_val = DMAE_COMP_VAL;
3896
3897 *stats_comp = 0;
a2fbb9ea 3898 }
bb2a0f7a
YG
3899}
3900
3901static void bnx2x_stats_stop(struct bnx2x *bp)
3902{
3903 int update = 0;
3904
3905 bnx2x_stats_comp(bp);
3906
3907 if (bp->port.pmf)
3908 update = (bnx2x_hw_stats_update(bp) == 0);
3909
3910 update |= (bnx2x_storm_stats_update(bp) == 0);
3911
3912 if (update) {
3913 bnx2x_net_stats_update(bp);
a2fbb9ea 3914
bb2a0f7a
YG
3915 if (bp->port.pmf)
3916 bnx2x_port_stats_stop(bp);
3917
3918 bnx2x_hw_stats_post(bp);
3919 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3920 }
3921}
3922
bb2a0f7a
YG
3923static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3924{
3925}
3926
3927static const struct {
3928 void (*action)(struct bnx2x *bp);
3929 enum bnx2x_stats_state next_state;
3930} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3931/* state event */
3932{
3933/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3934/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3935/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3936/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3937},
3938{
3939/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3940/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3941/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3942/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3943}
3944};
3945
3946static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3947{
3948 enum bnx2x_stats_state state = bp->stats_state;
3949
3950 bnx2x_stats_stm[state][event].action(bp);
3951 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3952
3953 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3954 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3955 state, event, bp->stats_state);
3956}
3957
a2fbb9ea
ET
3958static void bnx2x_timer(unsigned long data)
3959{
3960 struct bnx2x *bp = (struct bnx2x *) data;
3961
3962 if (!netif_running(bp->dev))
3963 return;
3964
3965 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3966 goto timer_restart;
a2fbb9ea
ET
3967
3968 if (poll) {
3969 struct bnx2x_fastpath *fp = &bp->fp[0];
3970 int rc;
3971
3972 bnx2x_tx_int(fp, 1000);
3973 rc = bnx2x_rx_int(fp, 1000);
3974 }
3975
34f80b04
EG
3976 if (!BP_NOMCP(bp)) {
3977 int func = BP_FUNC(bp);
a2fbb9ea
ET
3978 u32 drv_pulse;
3979 u32 mcp_pulse;
3980
3981 ++bp->fw_drv_pulse_wr_seq;
3982 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3983 /* TBD - add SYSTEM_TIME */
3984 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3985 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3986
34f80b04 3987 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3988 MCP_PULSE_SEQ_MASK);
3989 /* The delta between driver pulse and mcp response
3990 * should be 1 (before mcp response) or 0 (after mcp response)
3991 */
3992 if ((drv_pulse != mcp_pulse) &&
3993 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3994 /* someone lost a heartbeat... */
3995 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3996 drv_pulse, mcp_pulse);
3997 }
3998 }
3999
bb2a0f7a
YG
4000 if ((bp->state == BNX2X_STATE_OPEN) ||
4001 (bp->state == BNX2X_STATE_DISABLED))
4002 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4003
f1410647 4004timer_restart:
a2fbb9ea
ET
4005 mod_timer(&bp->timer, jiffies + bp->current_interval);
4006}
4007
4008/* end of Statistics */
4009
4010/* nic init */
4011
4012/*
4013 * nic init service functions
4014 */
4015
34f80b04 4016static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4017{
34f80b04
EG
4018 int port = BP_PORT(bp);
4019
4020 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4021 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4022 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4023 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4024 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4025 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4026}
4027
5c862848
EG
4028static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4029 dma_addr_t mapping, int sb_id)
34f80b04
EG
4030{
4031 int port = BP_PORT(bp);
bb2a0f7a 4032 int func = BP_FUNC(bp);
a2fbb9ea 4033 int index;
34f80b04 4034 u64 section;
a2fbb9ea
ET
4035
4036 /* USTORM */
4037 section = ((u64)mapping) + offsetof(struct host_status_block,
4038 u_status_block);
34f80b04 4039 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4040
4041 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4042 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4043 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4044 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4045 U64_HI(section));
bb2a0f7a
YG
4046 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4047 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4048
4049 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4050 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4051 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4052
4053 /* CSTORM */
4054 section = ((u64)mapping) + offsetof(struct host_status_block,
4055 c_status_block);
34f80b04 4056 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4057
4058 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4059 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4060 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4061 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4062 U64_HI(section));
7a9b2557
VZ
4063 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4064 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4065
4066 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4067 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4068 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4069
4070 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4071}
4072
4073static void bnx2x_zero_def_sb(struct bnx2x *bp)
4074{
4075 int func = BP_FUNC(bp);
a2fbb9ea 4076
34f80b04
EG
4077 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4078 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4079 sizeof(struct ustorm_def_status_block)/4);
4080 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4081 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4082 sizeof(struct cstorm_def_status_block)/4);
4083 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4084 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4085 sizeof(struct xstorm_def_status_block)/4);
4086 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4087 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4088 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4089}
4090
4091static void bnx2x_init_def_sb(struct bnx2x *bp,
4092 struct host_def_status_block *def_sb,
34f80b04 4093 dma_addr_t mapping, int sb_id)
a2fbb9ea 4094{
34f80b04
EG
4095 int port = BP_PORT(bp);
4096 int func = BP_FUNC(bp);
a2fbb9ea
ET
4097 int index, val, reg_offset;
4098 u64 section;
4099
4100 /* ATTN */
4101 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4102 atten_status_block);
34f80b04 4103 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4104
49d66772
ET
4105 bp->attn_state = 0;
4106
a2fbb9ea
ET
4107 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4108 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4109
34f80b04 4110 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4111 bp->attn_group[index].sig[0] = REG_RD(bp,
4112 reg_offset + 0x10*index);
4113 bp->attn_group[index].sig[1] = REG_RD(bp,
4114 reg_offset + 0x4 + 0x10*index);
4115 bp->attn_group[index].sig[2] = REG_RD(bp,
4116 reg_offset + 0x8 + 0x10*index);
4117 bp->attn_group[index].sig[3] = REG_RD(bp,
4118 reg_offset + 0xc + 0x10*index);
4119 }
4120
a2fbb9ea
ET
4121 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4122 HC_REG_ATTN_MSG0_ADDR_L);
4123
4124 REG_WR(bp, reg_offset, U64_LO(section));
4125 REG_WR(bp, reg_offset + 4, U64_HI(section));
4126
4127 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4128
4129 val = REG_RD(bp, reg_offset);
34f80b04 4130 val |= sb_id;
a2fbb9ea
ET
4131 REG_WR(bp, reg_offset, val);
4132
4133 /* USTORM */
4134 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135 u_def_status_block);
34f80b04 4136 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4137
4138 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4139 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4140 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4141 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4142 U64_HI(section));
5c862848 4143 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4144 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4145
4146 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4147 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4148 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4149
4150 /* CSTORM */
4151 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152 c_def_status_block);
34f80b04 4153 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4154
4155 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4156 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4157 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4158 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4159 U64_HI(section));
5c862848 4160 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4161 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4162
4163 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4164 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4165 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4166
4167 /* TSTORM */
4168 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4169 t_def_status_block);
34f80b04 4170 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4171
4172 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4173 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4174 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4175 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4176 U64_HI(section));
5c862848 4177 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4178 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4179
4180 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4181 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4182 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4183
4184 /* XSTORM */
4185 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4186 x_def_status_block);
34f80b04 4187 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4188
4189 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4190 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4191 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4192 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4193 U64_HI(section));
5c862848 4194 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4195 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4196
4197 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4198 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4199 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4200
bb2a0f7a 4201 bp->stats_pending = 0;
66e855f3 4202 bp->set_mac_pending = 0;
bb2a0f7a 4203
34f80b04 4204 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4205}
4206
4207static void bnx2x_update_coalesce(struct bnx2x *bp)
4208{
34f80b04 4209 int port = BP_PORT(bp);
a2fbb9ea
ET
4210 int i;
4211
4212 for_each_queue(bp, i) {
34f80b04 4213 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4214
4215 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4216 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4217 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4218 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4219 bp->rx_ticks/12);
a2fbb9ea 4220 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4221 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4222 U_SB_ETH_RX_CQ_INDEX),
4223 bp->rx_ticks ? 0 : 1);
4224 REG_WR16(bp, BAR_USTRORM_INTMEM +
4225 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226 U_SB_ETH_RX_BD_INDEX),
34f80b04 4227 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4228
4229 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4230 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4231 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4232 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4233 bp->tx_ticks/12);
a2fbb9ea 4234 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4235 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4236 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4237 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4238 }
4239}
4240
7a9b2557
VZ
4241static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4242 struct bnx2x_fastpath *fp, int last)
4243{
4244 int i;
4245
4246 for (i = 0; i < last; i++) {
4247 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4248 struct sk_buff *skb = rx_buf->skb;
4249
4250 if (skb == NULL) {
4251 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4252 continue;
4253 }
4254
4255 if (fp->tpa_state[i] == BNX2X_TPA_START)
4256 pci_unmap_single(bp->pdev,
4257 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4258 bp->rx_buf_size,
7a9b2557
VZ
4259 PCI_DMA_FROMDEVICE);
4260
4261 dev_kfree_skb(skb);
4262 rx_buf->skb = NULL;
4263 }
4264}
4265
a2fbb9ea
ET
4266static void bnx2x_init_rx_rings(struct bnx2x *bp)
4267{
7a9b2557 4268 int func = BP_FUNC(bp);
32626230
EG
4269 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4270 ETH_MAX_AGGREGATION_QUEUES_E1H;
4271 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4272 int i, j;
a2fbb9ea 4273
437cf2f1
EG
4274 bp->rx_buf_size = bp->dev->mtu;
4275 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4276 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4277
7a9b2557
VZ
4278 if (bp->flags & TPA_ENABLE_FLAG) {
4279 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4280 "rx_buf_size %d effective_mtu %d\n",
4281 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4282
4283 for_each_queue(bp, j) {
32626230 4284 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4285
32626230 4286 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4287 fp->tpa_pool[i].skb =
4288 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4289 if (!fp->tpa_pool[i].skb) {
4290 BNX2X_ERR("Failed to allocate TPA "
4291 "skb pool for queue[%d] - "
4292 "disabling TPA on this "
4293 "queue!\n", j);
4294 bnx2x_free_tpa_pool(bp, fp, i);
4295 fp->disable_tpa = 1;
4296 break;
4297 }
4298 pci_unmap_addr_set((struct sw_rx_bd *)
4299 &bp->fp->tpa_pool[i],
4300 mapping, 0);
4301 fp->tpa_state[i] = BNX2X_TPA_STOP;
4302 }
4303 }
4304 }
4305
a2fbb9ea
ET
4306 for_each_queue(bp, j) {
4307 struct bnx2x_fastpath *fp = &bp->fp[j];
4308
4309 fp->rx_bd_cons = 0;
4310 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4311 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4312
4313 /* "next page" elements initialization */
4314 /* SGE ring */
4315 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4316 struct eth_rx_sge *sge;
4317
4318 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4319 sge->addr_hi =
4320 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4321 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4322 sge->addr_lo =
4323 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4324 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4325 }
4326
4327 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4328
7a9b2557 4329 /* RX BD ring */
a2fbb9ea
ET
4330 for (i = 1; i <= NUM_RX_RINGS; i++) {
4331 struct eth_rx_bd *rx_bd;
4332
4333 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4334 rx_bd->addr_hi =
4335 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4336 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4337 rx_bd->addr_lo =
4338 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4339 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4340 }
4341
34f80b04 4342 /* CQ ring */
a2fbb9ea
ET
4343 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4344 struct eth_rx_cqe_next_page *nextpg;
4345
4346 nextpg = (struct eth_rx_cqe_next_page *)
4347 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4348 nextpg->addr_hi =
4349 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4350 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4351 nextpg->addr_lo =
4352 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4353 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4354 }
4355
7a9b2557
VZ
4356 /* Allocate SGEs and initialize the ring elements */
4357 for (i = 0, ring_prod = 0;
4358 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4359
7a9b2557
VZ
4360 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4361 BNX2X_ERR("was only able to allocate "
4362 "%d rx sges\n", i);
4363 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4364 /* Cleanup already allocated elements */
4365 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4366 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4367 fp->disable_tpa = 1;
4368 ring_prod = 0;
4369 break;
4370 }
4371 ring_prod = NEXT_SGE_IDX(ring_prod);
4372 }
4373 fp->rx_sge_prod = ring_prod;
4374
4375 /* Allocate BDs and initialize BD ring */
66e855f3 4376 fp->rx_comp_cons = 0;
7a9b2557 4377 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4378 for (i = 0; i < bp->rx_ring_size; i++) {
4379 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4380 BNX2X_ERR("was only able to allocate "
4381 "%d rx skbs\n", i);
66e855f3 4382 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4383 break;
4384 }
4385 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4386 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4387 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4388 }
4389
7a9b2557
VZ
4390 fp->rx_bd_prod = ring_prod;
4391 /* must not have more available CQEs than BDs */
4392 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4393 cqe_ring_prod);
a2fbb9ea
ET
4394 fp->rx_pkt = fp->rx_calls = 0;
4395
7a9b2557
VZ
4396 /* Warning!
4397 * this will generate an interrupt (to the TSTORM)
4398 * must only be done after chip is initialized
4399 */
4400 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4401 fp->rx_sge_prod);
a2fbb9ea
ET
4402 if (j != 0)
4403 continue;
4404
4405 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4406 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4407 U64_LO(fp->rx_comp_mapping));
4408 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4409 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4410 U64_HI(fp->rx_comp_mapping));
4411 }
4412}
4413
4414static void bnx2x_init_tx_ring(struct bnx2x *bp)
4415{
4416 int i, j;
4417
4418 for_each_queue(bp, j) {
4419 struct bnx2x_fastpath *fp = &bp->fp[j];
4420
4421 for (i = 1; i <= NUM_TX_RINGS; i++) {
4422 struct eth_tx_bd *tx_bd =
4423 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4424
4425 tx_bd->addr_hi =
4426 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4427 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4428 tx_bd->addr_lo =
4429 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4430 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4431 }
4432
4433 fp->tx_pkt_prod = 0;
4434 fp->tx_pkt_cons = 0;
4435 fp->tx_bd_prod = 0;
4436 fp->tx_bd_cons = 0;
4437 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4438 fp->tx_pkt = 0;
4439 }
4440}
4441
4442static void bnx2x_init_sp_ring(struct bnx2x *bp)
4443{
34f80b04 4444 int func = BP_FUNC(bp);
a2fbb9ea
ET
4445
4446 spin_lock_init(&bp->spq_lock);
4447
4448 bp->spq_left = MAX_SPQ_PENDING;
4449 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4450 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4451 bp->spq_prod_bd = bp->spq;
4452 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4453
34f80b04 4454 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4455 U64_LO(bp->spq_mapping));
34f80b04
EG
4456 REG_WR(bp,
4457 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4458 U64_HI(bp->spq_mapping));
4459
34f80b04 4460 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4461 bp->spq_prod_idx);
4462}
4463
4464static void bnx2x_init_context(struct bnx2x *bp)
4465{
4466 int i;
4467
4468 for_each_queue(bp, i) {
4469 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4470 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4471 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4472
4473 context->xstorm_st_context.tx_bd_page_base_hi =
4474 U64_HI(fp->tx_desc_mapping);
4475 context->xstorm_st_context.tx_bd_page_base_lo =
4476 U64_LO(fp->tx_desc_mapping);
4477 context->xstorm_st_context.db_data_addr_hi =
4478 U64_HI(fp->tx_prods_mapping);
4479 context->xstorm_st_context.db_data_addr_lo =
4480 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4481 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4482 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4483
4484 context->ustorm_st_context.common.sb_index_numbers =
4485 BNX2X_RX_SB_INDEX_NUM;
4486 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4487 context->ustorm_st_context.common.status_block_id = sb_id;
4488 context->ustorm_st_context.common.flags =
4489 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
437cf2f1
EG
4490 context->ustorm_st_context.common.mc_alignment_size =
4491 BCM_RX_ETH_PAYLOAD_ALIGN;
34f80b04 4492 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4493 bp->rx_buf_size;
34f80b04 4494 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4495 U64_HI(fp->rx_desc_mapping);
34f80b04 4496 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4497 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4498 if (!fp->disable_tpa) {
4499 context->ustorm_st_context.common.flags |=
4500 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4501 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4502 context->ustorm_st_context.common.sge_buff_size =
4503 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4504 context->ustorm_st_context.common.sge_page_base_hi =
4505 U64_HI(fp->rx_sge_mapping);
4506 context->ustorm_st_context.common.sge_page_base_lo =
4507 U64_LO(fp->rx_sge_mapping);
4508 }
4509
a2fbb9ea 4510 context->cstorm_st_context.sb_index_number =
5c862848 4511 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4512 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4513
4514 context->xstorm_ag_context.cdu_reserved =
4515 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4516 CDU_REGION_NUMBER_XCM_AG,
4517 ETH_CONNECTION_TYPE);
4518 context->ustorm_ag_context.cdu_usage =
4519 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4520 CDU_REGION_NUMBER_UCM_AG,
4521 ETH_CONNECTION_TYPE);
4522 }
4523}
4524
4525static void bnx2x_init_ind_table(struct bnx2x *bp)
4526{
34f80b04 4527 int port = BP_PORT(bp);
a2fbb9ea
ET
4528 int i;
4529
4530 if (!is_multi(bp))
4531 return;
4532
34f80b04 4533 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4534 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4535 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4536 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4537 i % bp->num_queues);
4538
4539 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4540}
4541
49d66772
ET
4542static void bnx2x_set_client_config(struct bnx2x *bp)
4543{
49d66772 4544 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4545 int port = BP_PORT(bp);
4546 int i;
49d66772 4547
34f80b04 4548 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4549 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4550 tstorm_client.config_flags =
4551 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4552#ifdef BCM_VLAN
0c6671b0 4553 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772
ET
4554 tstorm_client.config_flags |=
4555 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4556 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4557 }
4558#endif
49d66772 4559
7a9b2557
VZ
4560 if (bp->flags & TPA_ENABLE_FLAG) {
4561 tstorm_client.max_sges_for_packet =
4f40f2cb 4562 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4563 tstorm_client.max_sges_for_packet =
4564 ((tstorm_client.max_sges_for_packet +
4565 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4566 PAGES_PER_SGE_SHIFT;
4567
4568 tstorm_client.config_flags |=
4569 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4570 }
4571
49d66772
ET
4572 for_each_queue(bp, i) {
4573 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4574 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4575 ((u32 *)&tstorm_client)[0]);
4576 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4577 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4578 ((u32 *)&tstorm_client)[1]);
4579 }
4580
34f80b04
EG
4581 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4582 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4583}
4584
a2fbb9ea
ET
4585static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4586{
a2fbb9ea 4587 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4588 int mode = bp->rx_mode;
4589 int mask = (1 << BP_L_ID(bp));
4590 int func = BP_FUNC(bp);
a2fbb9ea
ET
4591 int i;
4592
3196a88a 4593 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4594
4595 switch (mode) {
4596 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4597 tstorm_mac_filter.ucast_drop_all = mask;
4598 tstorm_mac_filter.mcast_drop_all = mask;
4599 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4600 break;
4601 case BNX2X_RX_MODE_NORMAL:
34f80b04 4602 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4603 break;
4604 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4605 tstorm_mac_filter.mcast_accept_all = mask;
4606 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4607 break;
4608 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4609 tstorm_mac_filter.ucast_accept_all = mask;
4610 tstorm_mac_filter.mcast_accept_all = mask;
4611 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4612 break;
4613 default:
34f80b04
EG
4614 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4615 break;
a2fbb9ea
ET
4616 }
4617
4618 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4619 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4620 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4621 ((u32 *)&tstorm_mac_filter)[i]);
4622
34f80b04 4623/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4624 ((u32 *)&tstorm_mac_filter)[i]); */
4625 }
a2fbb9ea 4626
49d66772
ET
4627 if (mode != BNX2X_RX_MODE_NONE)
4628 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4629}
4630
471de716
EG
4631static void bnx2x_init_internal_common(struct bnx2x *bp)
4632{
4633 int i;
4634
3cdf1db7
YG
4635 if (bp->flags & TPA_ENABLE_FLAG) {
4636 struct tstorm_eth_tpa_exist tpa = {0};
4637
4638 tpa.tpa_exist = 1;
4639
4640 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4641 ((u32 *)&tpa)[0]);
4642 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4643 ((u32 *)&tpa)[1]);
4644 }
4645
471de716
EG
4646 /* Zero this manually as its initialization is
4647 currently missing in the initTool */
4648 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4649 REG_WR(bp, BAR_USTRORM_INTMEM +
4650 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4651}
4652
4653static void bnx2x_init_internal_port(struct bnx2x *bp)
4654{
4655 int port = BP_PORT(bp);
4656
4657 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4658 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4659 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4660 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4661}
4662
4663static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4664{
a2fbb9ea
ET
4665 struct tstorm_eth_function_common_config tstorm_config = {0};
4666 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4667 int port = BP_PORT(bp);
4668 int func = BP_FUNC(bp);
4669 int i;
471de716 4670 u16 max_agg_size;
a2fbb9ea
ET
4671
4672 if (is_multi(bp)) {
4673 tstorm_config.config_flags = MULTI_FLAGS;
4674 tstorm_config.rss_result_mask = MULTI_MASK;
4675 }
4676
34f80b04
EG
4677 tstorm_config.leading_client_id = BP_L_ID(bp);
4678
a2fbb9ea 4679 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4680 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4681 (*(u32 *)&tstorm_config));
4682
c14423fe 4683 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4684 bnx2x_set_storm_rx_mode(bp);
4685
66e855f3
YG
4686 /* reset xstorm per client statistics */
4687 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4688 REG_WR(bp, BAR_XSTRORM_INTMEM +
4689 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4690 i*4, 0);
4691 }
4692 /* reset tstorm per client statistics */
4693 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4694 REG_WR(bp, BAR_TSTRORM_INTMEM +
4695 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4696 i*4, 0);
4697 }
4698
4699 /* Init statistics related context */
34f80b04 4700 stats_flags.collect_eth = 1;
a2fbb9ea 4701
66e855f3 4702 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4703 ((u32 *)&stats_flags)[0]);
66e855f3 4704 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4705 ((u32 *)&stats_flags)[1]);
4706
66e855f3 4707 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4708 ((u32 *)&stats_flags)[0]);
66e855f3 4709 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4710 ((u32 *)&stats_flags)[1]);
4711
66e855f3 4712 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4713 ((u32 *)&stats_flags)[0]);
66e855f3 4714 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4715 ((u32 *)&stats_flags)[1]);
4716
66e855f3
YG
4717 REG_WR(bp, BAR_XSTRORM_INTMEM +
4718 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4719 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4720 REG_WR(bp, BAR_XSTRORM_INTMEM +
4721 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4722 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4723
4724 REG_WR(bp, BAR_TSTRORM_INTMEM +
4725 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4726 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4727 REG_WR(bp, BAR_TSTRORM_INTMEM +
4728 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4729 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4730
4731 if (CHIP_IS_E1H(bp)) {
4732 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4733 IS_E1HMF(bp));
4734 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4735 IS_E1HMF(bp));
4736 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4737 IS_E1HMF(bp));
4738 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4739 IS_E1HMF(bp));
4740
7a9b2557
VZ
4741 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4742 bp->e1hov);
34f80b04
EG
4743 }
4744
4f40f2cb
EG
4745 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4746 max_agg_size =
4747 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4748 SGE_PAGE_SIZE * PAGES_PER_SGE),
4749 (u32)0xffff);
7a9b2557
VZ
4750 for_each_queue(bp, i) {
4751 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4752
4753 REG_WR(bp, BAR_USTRORM_INTMEM +
4754 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4755 U64_LO(fp->rx_comp_mapping));
4756 REG_WR(bp, BAR_USTRORM_INTMEM +
4757 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4758 U64_HI(fp->rx_comp_mapping));
4759
7a9b2557
VZ
4760 REG_WR16(bp, BAR_USTRORM_INTMEM +
4761 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4762 max_agg_size);
4763 }
a2fbb9ea
ET
4764}
4765
471de716
EG
4766static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4767{
4768 switch (load_code) {
4769 case FW_MSG_CODE_DRV_LOAD_COMMON:
4770 bnx2x_init_internal_common(bp);
4771 /* no break */
4772
4773 case FW_MSG_CODE_DRV_LOAD_PORT:
4774 bnx2x_init_internal_port(bp);
4775 /* no break */
4776
4777 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4778 bnx2x_init_internal_func(bp);
4779 break;
4780
4781 default:
4782 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4783 break;
4784 }
4785}
4786
4787static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4788{
4789 int i;
4790
4791 for_each_queue(bp, i) {
4792 struct bnx2x_fastpath *fp = &bp->fp[i];
4793
34f80b04 4794 fp->bp = bp;
a2fbb9ea 4795 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4796 fp->index = i;
34f80b04
EG
4797 fp->cl_id = BP_L_ID(bp) + i;
4798 fp->sb_id = fp->cl_id;
4799 DP(NETIF_MSG_IFUP,
4800 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4801 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4802 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4803 FP_SB_ID(fp));
4804 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4805 }
4806
5c862848
EG
4807 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4808 DEF_SB_ID);
4809 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4810 bnx2x_update_coalesce(bp);
4811 bnx2x_init_rx_rings(bp);
4812 bnx2x_init_tx_ring(bp);
4813 bnx2x_init_sp_ring(bp);
4814 bnx2x_init_context(bp);
471de716 4815 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4816 bnx2x_init_ind_table(bp);
615f8fd9 4817 bnx2x_int_enable(bp);
a2fbb9ea
ET
4818}
4819
4820/* end of nic init */
4821
4822/*
4823 * gzip service functions
4824 */
4825
4826static int bnx2x_gunzip_init(struct bnx2x *bp)
4827{
4828 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4829 &bp->gunzip_mapping);
4830 if (bp->gunzip_buf == NULL)
4831 goto gunzip_nomem1;
4832
4833 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4834 if (bp->strm == NULL)
4835 goto gunzip_nomem2;
4836
4837 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4838 GFP_KERNEL);
4839 if (bp->strm->workspace == NULL)
4840 goto gunzip_nomem3;
4841
4842 return 0;
4843
4844gunzip_nomem3:
4845 kfree(bp->strm);
4846 bp->strm = NULL;
4847
4848gunzip_nomem2:
4849 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4850 bp->gunzip_mapping);
4851 bp->gunzip_buf = NULL;
4852
4853gunzip_nomem1:
4854 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4855 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4856 return -ENOMEM;
4857}
4858
4859static void bnx2x_gunzip_end(struct bnx2x *bp)
4860{
4861 kfree(bp->strm->workspace);
4862
4863 kfree(bp->strm);
4864 bp->strm = NULL;
4865
4866 if (bp->gunzip_buf) {
4867 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4868 bp->gunzip_mapping);
4869 bp->gunzip_buf = NULL;
4870 }
4871}
4872
4873static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4874{
4875 int n, rc;
4876
4877 /* check gzip header */
4878 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4879 return -EINVAL;
4880
4881 n = 10;
4882
34f80b04 4883#define FNAME 0x8
a2fbb9ea
ET
4884
4885 if (zbuf[3] & FNAME)
4886 while ((zbuf[n++] != 0) && (n < len));
4887
4888 bp->strm->next_in = zbuf + n;
4889 bp->strm->avail_in = len - n;
4890 bp->strm->next_out = bp->gunzip_buf;
4891 bp->strm->avail_out = FW_BUF_SIZE;
4892
4893 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4894 if (rc != Z_OK)
4895 return rc;
4896
4897 rc = zlib_inflate(bp->strm, Z_FINISH);
4898 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4899 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4900 bp->dev->name, bp->strm->msg);
4901
4902 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4903 if (bp->gunzip_outlen & 0x3)
4904 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4905 " gunzip_outlen (%d) not aligned\n",
4906 bp->dev->name, bp->gunzip_outlen);
4907 bp->gunzip_outlen >>= 2;
4908
4909 zlib_inflateEnd(bp->strm);
4910
4911 if (rc == Z_STREAM_END)
4912 return 0;
4913
4914 return rc;
4915}
4916
4917/* nic load/unload */
4918
4919/*
34f80b04 4920 * General service functions
a2fbb9ea
ET
4921 */
4922
4923/* send a NIG loopback debug packet */
4924static void bnx2x_lb_pckt(struct bnx2x *bp)
4925{
a2fbb9ea 4926 u32 wb_write[3];
a2fbb9ea
ET
4927
4928 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4929 wb_write[0] = 0x55555555;
4930 wb_write[1] = 0x55555555;
34f80b04 4931 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4932 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4933
4934 /* NON-IP protocol */
a2fbb9ea
ET
4935 wb_write[0] = 0x09000000;
4936 wb_write[1] = 0x55555555;
34f80b04 4937 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4938 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4939}
4940
4941/* some of the internal memories
4942 * are not directly readable from the driver
4943 * to test them we send debug packets
4944 */
4945static int bnx2x_int_mem_test(struct bnx2x *bp)
4946{
4947 int factor;
4948 int count, i;
4949 u32 val = 0;
4950
ad8d3948 4951 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4952 factor = 120;
ad8d3948
EG
4953 else if (CHIP_REV_IS_EMUL(bp))
4954 factor = 200;
4955 else
a2fbb9ea 4956 factor = 1;
a2fbb9ea
ET
4957
4958 DP(NETIF_MSG_HW, "start part1\n");
4959
4960 /* Disable inputs of parser neighbor blocks */
4961 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4962 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4963 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4964 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4965
4966 /* Write 0 to parser credits for CFC search request */
4967 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4968
4969 /* send Ethernet packet */
4970 bnx2x_lb_pckt(bp);
4971
4972 /* TODO do i reset NIG statistic? */
4973 /* Wait until NIG register shows 1 packet of size 0x10 */
4974 count = 1000 * factor;
4975 while (count) {
34f80b04 4976
a2fbb9ea
ET
4977 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4978 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4979 if (val == 0x10)
4980 break;
4981
4982 msleep(10);
4983 count--;
4984 }
4985 if (val != 0x10) {
4986 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4987 return -1;
4988 }
4989
4990 /* Wait until PRS register shows 1 packet */
4991 count = 1000 * factor;
4992 while (count) {
4993 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4994 if (val == 1)
4995 break;
4996
4997 msleep(10);
4998 count--;
4999 }
5000 if (val != 0x1) {
5001 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5002 return -2;
5003 }
5004
5005 /* Reset and init BRB, PRS */
34f80b04 5006 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5007 msleep(50);
34f80b04 5008 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5009 msleep(50);
5010 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5011 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5012
5013 DP(NETIF_MSG_HW, "part2\n");
5014
5015 /* Disable inputs of parser neighbor blocks */
5016 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5017 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5018 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5019 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5020
5021 /* Write 0 to parser credits for CFC search request */
5022 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5023
5024 /* send 10 Ethernet packets */
5025 for (i = 0; i < 10; i++)
5026 bnx2x_lb_pckt(bp);
5027
5028 /* Wait until NIG register shows 10 + 1
5029 packets of size 11*0x10 = 0xb0 */
5030 count = 1000 * factor;
5031 while (count) {
34f80b04 5032
a2fbb9ea
ET
5033 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5034 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5035 if (val == 0xb0)
5036 break;
5037
5038 msleep(10);
5039 count--;
5040 }
5041 if (val != 0xb0) {
5042 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5043 return -3;
5044 }
5045
5046 /* Wait until PRS register shows 2 packets */
5047 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5048 if (val != 2)
5049 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5050
5051 /* Write 1 to parser credits for CFC search request */
5052 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5053
5054 /* Wait until PRS register shows 3 packets */
5055 msleep(10 * factor);
5056 /* Wait until NIG register shows 1 packet of size 0x10 */
5057 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5058 if (val != 3)
5059 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5060
5061 /* clear NIG EOP FIFO */
5062 for (i = 0; i < 11; i++)
5063 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5064 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5065 if (val != 1) {
5066 BNX2X_ERR("clear of NIG failed\n");
5067 return -4;
5068 }
5069
5070 /* Reset and init BRB, PRS, NIG */
5071 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5072 msleep(50);
5073 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5074 msleep(50);
5075 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5076 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5077#ifndef BCM_ISCSI
5078 /* set NIC mode */
5079 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5080#endif
5081
5082 /* Enable inputs of parser neighbor blocks */
5083 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5084 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5085 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5086 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5087
5088 DP(NETIF_MSG_HW, "done\n");
5089
5090 return 0; /* OK */
5091}
5092
5093static void enable_blocks_attention(struct bnx2x *bp)
5094{
5095 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5096 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5097 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5098 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5099 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5100 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5101 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5102 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5103 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5104/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5105/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5106 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5107 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5108 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5109/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5110/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5111 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5112 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5113 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5114 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5115/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5116/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5117 if (CHIP_REV_IS_FPGA(bp))
5118 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5119 else
5120 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5121 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5122 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5123 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5124/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5125/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5126 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5127 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5128/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5129 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5130}
5131
34f80b04
EG
5132
5133static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5134{
a2fbb9ea 5135 u32 val, i;
a2fbb9ea 5136
34f80b04 5137 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5138
34f80b04
EG
5139 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5140 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5141
34f80b04
EG
5142 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5143 if (CHIP_IS_E1H(bp))
5144 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5145
34f80b04
EG
5146 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5147 msleep(30);
5148 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5149
34f80b04
EG
5150 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5151 if (CHIP_IS_E1(bp)) {
5152 /* enable HW interrupt from PXP on USDM overflow
5153 bit 16 on INT_MASK_0 */
5154 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5155 }
a2fbb9ea 5156
34f80b04
EG
5157 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5158 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5159
5160#ifdef __BIG_ENDIAN
34f80b04
EG
5161 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5162 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5163 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5164 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5165 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
34f80b04
EG
5166
5167/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5168 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5169 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5170 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5171 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5172#endif
5173
34f80b04 5174 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5175#ifdef BCM_ISCSI
34f80b04
EG
5176 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5177 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5178 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5179#endif
5180
34f80b04
EG
5181 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5182 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5183
34f80b04
EG
5184 /* let the HW do it's magic ... */
5185 msleep(100);
5186 /* finish PXP init */
5187 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5188 if (val != 1) {
5189 BNX2X_ERR("PXP2 CFG failed\n");
5190 return -EBUSY;
5191 }
5192 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5193 if (val != 1) {
5194 BNX2X_ERR("PXP2 RD_INIT failed\n");
5195 return -EBUSY;
5196 }
a2fbb9ea 5197
34f80b04
EG
5198 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5199 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5200
34f80b04 5201 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5202
34f80b04
EG
5203 /* clean the DMAE memory */
5204 bp->dmae_ready = 1;
5205 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5206
34f80b04
EG
5207 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5208 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5209 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5210 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5211
34f80b04
EG
5212 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5213 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5214 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5215 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5216
5217 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5218 /* soft reset pulse */
5219 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5220 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5221
5222#ifdef BCM_ISCSI
34f80b04 5223 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5224#endif
a2fbb9ea 5225
34f80b04
EG
5226 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5227 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5228 if (!CHIP_REV_IS_SLOW(bp)) {
5229 /* enable hw interrupt from doorbell Q */
5230 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5231 }
a2fbb9ea 5232
34f80b04
EG
5233 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5234 if (CHIP_REV_IS_SLOW(bp)) {
5235 /* fix for emulation and FPGA for no pause */
5236 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5237 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5238 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5239 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5240 }
a2fbb9ea 5241
34f80b04 5242 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3196a88a
EG
5243 /* set NIC mode */
5244 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5245 if (CHIP_IS_E1H(bp))
5246 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5247
34f80b04
EG
5248 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5249 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5250 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5251 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5252
34f80b04
EG
5253 if (CHIP_IS_E1H(bp)) {
5254 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5255 STORM_INTMEM_SIZE_E1H/2);
5256 bnx2x_init_fill(bp,
5257 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5258 0, STORM_INTMEM_SIZE_E1H/2);
5259 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5260 STORM_INTMEM_SIZE_E1H/2);
5261 bnx2x_init_fill(bp,
5262 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5263 0, STORM_INTMEM_SIZE_E1H/2);
5264 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5265 STORM_INTMEM_SIZE_E1H/2);
5266 bnx2x_init_fill(bp,
5267 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5268 0, STORM_INTMEM_SIZE_E1H/2);
5269 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5270 STORM_INTMEM_SIZE_E1H/2);
5271 bnx2x_init_fill(bp,
5272 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5273 0, STORM_INTMEM_SIZE_E1H/2);
5274 } else { /* E1 */
ad8d3948
EG
5275 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5276 STORM_INTMEM_SIZE_E1);
5277 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5278 STORM_INTMEM_SIZE_E1);
5279 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5280 STORM_INTMEM_SIZE_E1);
5281 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5282 STORM_INTMEM_SIZE_E1);
34f80b04 5283 }
a2fbb9ea 5284
34f80b04
EG
5285 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5286 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5287 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5288 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5289
34f80b04
EG
5290 /* sync semi rtc */
5291 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5292 0x80000000);
5293 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5294 0x80000000);
a2fbb9ea 5295
34f80b04
EG
5296 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5297 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5298 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5299
34f80b04
EG
5300 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5301 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5302 REG_WR(bp, i, 0xc0cac01a);
5303 /* TODO: replace with something meaningful */
5304 }
5305 if (CHIP_IS_E1H(bp))
5306 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5307 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5308
34f80b04
EG
5309 if (sizeof(union cdu_context) != 1024)
5310 /* we currently assume that a context is 1024 bytes */
5311 printk(KERN_ALERT PFX "please adjust the size of"
5312 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5313
34f80b04
EG
5314 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5315 val = (4 << 24) + (0 << 12) + 1024;
5316 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5317 if (CHIP_IS_E1(bp)) {
5318 /* !!! fix pxp client crdit until excel update */
5319 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5320 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5321 }
a2fbb9ea 5322
34f80b04
EG
5323 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5324 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5325
34f80b04
EG
5326 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5327 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5328
34f80b04
EG
5329 /* PXPCS COMMON comes here */
5330 /* Reset PCIE errors for debug */
5331 REG_WR(bp, 0x2814, 0xffffffff);
5332 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5333
34f80b04
EG
5334 /* EMAC0 COMMON comes here */
5335 /* EMAC1 COMMON comes here */
5336 /* DBU COMMON comes here */
5337 /* DBG COMMON comes here */
5338
5339 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5340 if (CHIP_IS_E1H(bp)) {
5341 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5342 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5343 }
5344
5345 if (CHIP_REV_IS_SLOW(bp))
5346 msleep(200);
5347
5348 /* finish CFC init */
5349 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5350 if (val != 1) {
5351 BNX2X_ERR("CFC LL_INIT failed\n");
5352 return -EBUSY;
5353 }
5354 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5355 if (val != 1) {
5356 BNX2X_ERR("CFC AC_INIT failed\n");
5357 return -EBUSY;
5358 }
5359 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5360 if (val != 1) {
5361 BNX2X_ERR("CFC CAM_INIT failed\n");
5362 return -EBUSY;
5363 }
5364 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5365
34f80b04
EG
5366 /* read NIG statistic
5367 to see if this is our first up since powerup */
5368 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5369 val = *bnx2x_sp(bp, wb_data[0]);
5370
5371 /* do internal memory self test */
5372 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5373 BNX2X_ERR("internal mem self test failed\n");
5374 return -EBUSY;
5375 }
5376
5377 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5378 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5379 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5380 /* Fan failure is indicated by SPIO 5 */
5381 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5382 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5383
5384 /* set to active low mode */
5385 val = REG_RD(bp, MISC_REG_SPIO_INT);
5386 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5387 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5388 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5389
34f80b04
EG
5390 /* enable interrupt to signal the IGU */
5391 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5392 val |= (1 << MISC_REGISTERS_SPIO_5);
5393 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5394 break;
f1410647 5395
34f80b04
EG
5396 default:
5397 break;
5398 }
f1410647 5399
34f80b04
EG
5400 /* clear PXP2 attentions */
5401 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5402
34f80b04 5403 enable_blocks_attention(bp);
a2fbb9ea 5404
6bbca910
YR
5405 if (!BP_NOMCP(bp)) {
5406 bnx2x_acquire_phy_lock(bp);
5407 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5408 bnx2x_release_phy_lock(bp);
5409 } else
5410 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5411
34f80b04
EG
5412 return 0;
5413}
a2fbb9ea 5414
34f80b04
EG
5415static int bnx2x_init_port(struct bnx2x *bp)
5416{
5417 int port = BP_PORT(bp);
5418 u32 val;
a2fbb9ea 5419
34f80b04
EG
5420 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5421
5422 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5423
5424 /* Port PXP comes here */
5425 /* Port PXP2 comes here */
a2fbb9ea
ET
5426#ifdef BCM_ISCSI
5427 /* Port0 1
5428 * Port1 385 */
5429 i++;
5430 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5431 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5432 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5433 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5434
5435 /* Port0 2
5436 * Port1 386 */
5437 i++;
5438 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5439 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5440 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5441 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5442
5443 /* Port0 3
5444 * Port1 387 */
5445 i++;
5446 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5447 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5448 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5449 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5450#endif
34f80b04 5451 /* Port CMs come here */
a2fbb9ea
ET
5452
5453 /* Port QM comes here */
a2fbb9ea
ET
5454#ifdef BCM_ISCSI
5455 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5456 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5457
5458 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5459 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5460#endif
5461 /* Port DQ comes here */
5462 /* Port BRB1 comes here */
ad8d3948 5463 /* Port PRS comes here */
a2fbb9ea
ET
5464 /* Port TSDM comes here */
5465 /* Port CSDM comes here */
5466 /* Port USDM comes here */
5467 /* Port XSDM comes here */
34f80b04
EG
5468 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5469 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5470 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5471 port ? USEM_PORT1_END : USEM_PORT0_END);
5472 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5473 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5474 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5475 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5476 /* Port UPB comes here */
34f80b04
EG
5477 /* Port XPB comes here */
5478
5479 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5480 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5481
5482 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5483 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5484
5485 /* update threshold */
34f80b04 5486 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5487 /* update init credit */
34f80b04 5488 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5489
5490 /* probe changes */
34f80b04 5491 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5492 msleep(5);
34f80b04 5493 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5494
5495#ifdef BCM_ISCSI
5496 /* tell the searcher where the T2 table is */
5497 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5498
5499 wb_write[0] = U64_LO(bp->t2_mapping);
5500 wb_write[1] = U64_HI(bp->t2_mapping);
5501 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5502 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5503 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5504 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5505
5506 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5507 /* Port SRCH comes here */
5508#endif
5509 /* Port CDU comes here */
5510 /* Port CFC comes here */
34f80b04
EG
5511
5512 if (CHIP_IS_E1(bp)) {
5513 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5514 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5515 }
5516 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5517 port ? HC_PORT1_END : HC_PORT0_END);
5518
5519 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5520 MISC_AEU_PORT0_START,
34f80b04
EG
5521 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5522 /* init aeu_mask_attn_func_0/1:
5523 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5524 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5525 * bits 4-7 are used for "per vn group attention" */
5526 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5527 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5528
a2fbb9ea
ET
5529 /* Port PXPCS comes here */
5530 /* Port EMAC0 comes here */
5531 /* Port EMAC1 comes here */
5532 /* Port DBU comes here */
5533 /* Port DBG comes here */
34f80b04
EG
5534 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5535 port ? NIG_PORT1_END : NIG_PORT0_END);
5536
5537 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5538
5539 if (CHIP_IS_E1H(bp)) {
5540 u32 wsum;
5541 struct cmng_struct_per_port m_cmng_port;
5542 int vn;
5543
5544 /* 0x2 disable e1hov, 0x1 enable */
5545 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5546 (IS_E1HMF(bp) ? 0x1 : 0x2));
5547
5548 /* Init RATE SHAPING and FAIRNESS contexts.
5549 Initialize as if there is 10G link. */
5550 wsum = bnx2x_calc_vn_wsum(bp);
5551 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5552 if (IS_E1HMF(bp))
5553 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5554 bnx2x_init_vn_minmax(bp, 2*vn + port,
5555 wsum, 10000, &m_cmng_port);
5556 }
5557
a2fbb9ea
ET
5558 /* Port MCP comes here */
5559 /* Port DMAE comes here */
5560
34f80b04 5561 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5562 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5563 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5564 /* add SPIO 5 to group 0 */
5565 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5566 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5567 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5568 break;
5569
5570 default:
5571 break;
5572 }
5573
c18487ee 5574 bnx2x__link_reset(bp);
a2fbb9ea 5575
34f80b04
EG
5576 return 0;
5577}
5578
5579#define ILT_PER_FUNC (768/2)
5580#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5581/* the phys address is shifted right 12 bits and has an added
5582 1=valid bit added to the 53rd bit
5583 then since this is a wide register(TM)
5584 we split it into two 32 bit writes
5585 */
5586#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5587#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5588#define PXP_ONE_ILT(x) (((x) << 10) | x)
5589#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5590
5591#define CNIC_ILT_LINES 0
5592
5593static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5594{
5595 int reg;
5596
5597 if (CHIP_IS_E1H(bp))
5598 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5599 else /* E1 */
5600 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5601
5602 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5603}
5604
5605static int bnx2x_init_func(struct bnx2x *bp)
5606{
5607 int port = BP_PORT(bp);
5608 int func = BP_FUNC(bp);
5609 int i;
5610
5611 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5612
5613 i = FUNC_ILT_BASE(func);
5614
5615 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5616 if (CHIP_IS_E1H(bp)) {
5617 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5618 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5619 } else /* E1 */
5620 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5621 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5622
5623
5624 if (CHIP_IS_E1H(bp)) {
5625 for (i = 0; i < 9; i++)
5626 bnx2x_init_block(bp,
5627 cm_start[func][i], cm_end[func][i]);
5628
5629 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5630 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5631 }
5632
5633 /* HC init per function */
5634 if (CHIP_IS_E1H(bp)) {
5635 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5636
5637 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5638 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5639 }
5640 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5641
5642 if (CHIP_IS_E1H(bp))
5643 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5644
c14423fe 5645 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5646 REG_WR(bp, 0x2114, 0xffffffff);
5647 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5648
34f80b04
EG
5649 return 0;
5650}
5651
5652static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5653{
5654 int i, rc = 0;
a2fbb9ea 5655
34f80b04
EG
5656 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5657 BP_FUNC(bp), load_code);
a2fbb9ea 5658
34f80b04
EG
5659 bp->dmae_ready = 0;
5660 mutex_init(&bp->dmae_mutex);
5661 bnx2x_gunzip_init(bp);
a2fbb9ea 5662
34f80b04
EG
5663 switch (load_code) {
5664 case FW_MSG_CODE_DRV_LOAD_COMMON:
5665 rc = bnx2x_init_common(bp);
5666 if (rc)
5667 goto init_hw_err;
5668 /* no break */
5669
5670 case FW_MSG_CODE_DRV_LOAD_PORT:
5671 bp->dmae_ready = 1;
5672 rc = bnx2x_init_port(bp);
5673 if (rc)
5674 goto init_hw_err;
5675 /* no break */
5676
5677 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5678 bp->dmae_ready = 1;
5679 rc = bnx2x_init_func(bp);
5680 if (rc)
5681 goto init_hw_err;
5682 break;
5683
5684 default:
5685 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5686 break;
5687 }
5688
5689 if (!BP_NOMCP(bp)) {
5690 int func = BP_FUNC(bp);
a2fbb9ea
ET
5691
5692 bp->fw_drv_pulse_wr_seq =
34f80b04 5693 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5694 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5695 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5696 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5697 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5698 } else
5699 bp->func_stx = 0;
a2fbb9ea 5700
34f80b04
EG
5701 /* this needs to be done before gunzip end */
5702 bnx2x_zero_def_sb(bp);
5703 for_each_queue(bp, i)
5704 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5705
5706init_hw_err:
5707 bnx2x_gunzip_end(bp);
5708
5709 return rc;
a2fbb9ea
ET
5710}
5711
c14423fe 5712/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5713static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5714{
34f80b04 5715 int func = BP_FUNC(bp);
f1410647
ET
5716 u32 seq = ++bp->fw_seq;
5717 u32 rc = 0;
19680c48
EG
5718 u32 cnt = 1;
5719 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5720
34f80b04 5721 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5722 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5723
19680c48
EG
5724 do {
5725 /* let the FW do it's magic ... */
5726 msleep(delay);
a2fbb9ea 5727
19680c48 5728 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5729
19680c48
EG
5730 /* Give the FW up to 2 second (200*10ms) */
5731 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5732
5733 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5734 cnt*delay, rc, seq);
a2fbb9ea
ET
5735
5736 /* is this a reply to our command? */
5737 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5738 rc &= FW_MSG_CODE_MASK;
f1410647 5739
a2fbb9ea
ET
5740 } else {
5741 /* FW BUG! */
5742 BNX2X_ERR("FW failed to respond!\n");
5743 bnx2x_fw_dump(bp);
5744 rc = 0;
5745 }
f1410647 5746
a2fbb9ea
ET
5747 return rc;
5748}
5749
5750static void bnx2x_free_mem(struct bnx2x *bp)
5751{
5752
5753#define BNX2X_PCI_FREE(x, y, size) \
5754 do { \
5755 if (x) { \
5756 pci_free_consistent(bp->pdev, size, x, y); \
5757 x = NULL; \
5758 y = 0; \
5759 } \
5760 } while (0)
5761
5762#define BNX2X_FREE(x) \
5763 do { \
5764 if (x) { \
5765 vfree(x); \
5766 x = NULL; \
5767 } \
5768 } while (0)
5769
5770 int i;
5771
5772 /* fastpath */
5773 for_each_queue(bp, i) {
5774
5775 /* Status blocks */
5776 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5777 bnx2x_fp(bp, i, status_blk_mapping),
5778 sizeof(struct host_status_block) +
5779 sizeof(struct eth_tx_db_data));
5780
5781 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5782 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5783 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5784 bnx2x_fp(bp, i, tx_desc_mapping),
5785 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5786
5787 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5788 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5789 bnx2x_fp(bp, i, rx_desc_mapping),
5790 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5791
5792 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5793 bnx2x_fp(bp, i, rx_comp_mapping),
5794 sizeof(struct eth_fast_path_rx_cqe) *
5795 NUM_RCQ_BD);
a2fbb9ea 5796
7a9b2557 5797 /* SGE ring */
32626230 5798 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5799 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5800 bnx2x_fp(bp, i, rx_sge_mapping),
5801 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5802 }
a2fbb9ea
ET
5803 /* end of fastpath */
5804
5805 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5806 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5807
5808 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5809 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5810
5811#ifdef BCM_ISCSI
5812 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5813 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5814 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5815 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5816#endif
7a9b2557 5817 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5818
5819#undef BNX2X_PCI_FREE
5820#undef BNX2X_KFREE
5821}
5822
5823static int bnx2x_alloc_mem(struct bnx2x *bp)
5824{
5825
5826#define BNX2X_PCI_ALLOC(x, y, size) \
5827 do { \
5828 x = pci_alloc_consistent(bp->pdev, size, y); \
5829 if (x == NULL) \
5830 goto alloc_mem_err; \
5831 memset(x, 0, size); \
5832 } while (0)
5833
5834#define BNX2X_ALLOC(x, size) \
5835 do { \
5836 x = vmalloc(size); \
5837 if (x == NULL) \
5838 goto alloc_mem_err; \
5839 memset(x, 0, size); \
5840 } while (0)
5841
5842 int i;
5843
5844 /* fastpath */
a2fbb9ea
ET
5845 for_each_queue(bp, i) {
5846 bnx2x_fp(bp, i, bp) = bp;
5847
5848 /* Status blocks */
5849 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5850 &bnx2x_fp(bp, i, status_blk_mapping),
5851 sizeof(struct host_status_block) +
5852 sizeof(struct eth_tx_db_data));
5853
5854 bnx2x_fp(bp, i, hw_tx_prods) =
5855 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5856
5857 bnx2x_fp(bp, i, tx_prods_mapping) =
5858 bnx2x_fp(bp, i, status_blk_mapping) +
5859 sizeof(struct host_status_block);
5860
5861 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5862 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5863 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5864 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5865 &bnx2x_fp(bp, i, tx_desc_mapping),
5866 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5867
5868 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5869 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5870 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5871 &bnx2x_fp(bp, i, rx_desc_mapping),
5872 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5873
5874 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5875 &bnx2x_fp(bp, i, rx_comp_mapping),
5876 sizeof(struct eth_fast_path_rx_cqe) *
5877 NUM_RCQ_BD);
5878
7a9b2557
VZ
5879 /* SGE ring */
5880 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5881 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5882 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5883 &bnx2x_fp(bp, i, rx_sge_mapping),
5884 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5885 }
5886 /* end of fastpath */
5887
5888 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5889 sizeof(struct host_def_status_block));
5890
5891 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5892 sizeof(struct bnx2x_slowpath));
5893
5894#ifdef BCM_ISCSI
5895 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5896
5897 /* Initialize T1 */
5898 for (i = 0; i < 64*1024; i += 64) {
5899 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5900 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5901 }
5902
5903 /* allocate searcher T2 table
5904 we allocate 1/4 of alloc num for T2
5905 (which is not entered into the ILT) */
5906 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5907
5908 /* Initialize T2 */
5909 for (i = 0; i < 16*1024; i += 64)
5910 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5911
c14423fe 5912 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5913 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5914
5915 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5916 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5917
5918 /* QM queues (128*MAX_CONN) */
5919 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5920#endif
5921
5922 /* Slow path ring */
5923 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5924
5925 return 0;
5926
5927alloc_mem_err:
5928 bnx2x_free_mem(bp);
5929 return -ENOMEM;
5930
5931#undef BNX2X_PCI_ALLOC
5932#undef BNX2X_ALLOC
5933}
5934
5935static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5936{
5937 int i;
5938
5939 for_each_queue(bp, i) {
5940 struct bnx2x_fastpath *fp = &bp->fp[i];
5941
5942 u16 bd_cons = fp->tx_bd_cons;
5943 u16 sw_prod = fp->tx_pkt_prod;
5944 u16 sw_cons = fp->tx_pkt_cons;
5945
a2fbb9ea
ET
5946 while (sw_cons != sw_prod) {
5947 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5948 sw_cons++;
5949 }
5950 }
5951}
5952
5953static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5954{
5955 int i, j;
5956
5957 for_each_queue(bp, j) {
5958 struct bnx2x_fastpath *fp = &bp->fp[j];
5959
a2fbb9ea
ET
5960 for (i = 0; i < NUM_RX_BD; i++) {
5961 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5962 struct sk_buff *skb = rx_buf->skb;
5963
5964 if (skb == NULL)
5965 continue;
5966
5967 pci_unmap_single(bp->pdev,
5968 pci_unmap_addr(rx_buf, mapping),
437cf2f1 5969 bp->rx_buf_size,
a2fbb9ea
ET
5970 PCI_DMA_FROMDEVICE);
5971
5972 rx_buf->skb = NULL;
5973 dev_kfree_skb(skb);
5974 }
7a9b2557 5975 if (!fp->disable_tpa)
32626230
EG
5976 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5977 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5978 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5979 }
5980}
5981
5982static void bnx2x_free_skbs(struct bnx2x *bp)
5983{
5984 bnx2x_free_tx_skbs(bp);
5985 bnx2x_free_rx_skbs(bp);
5986}
5987
5988static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5989{
34f80b04 5990 int i, offset = 1;
a2fbb9ea
ET
5991
5992 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5993 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5994 bp->msix_table[0].vector);
5995
5996 for_each_queue(bp, i) {
c14423fe 5997 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5998 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5999 bnx2x_fp(bp, i, state));
6000
228241eb
ET
6001 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6002 BNX2X_ERR("IRQ of fp #%d being freed while "
6003 "state != closed\n", i);
a2fbb9ea 6004
34f80b04 6005 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6006 }
a2fbb9ea
ET
6007}
6008
6009static void bnx2x_free_irq(struct bnx2x *bp)
6010{
a2fbb9ea 6011 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6012 bnx2x_free_msix_irqs(bp);
6013 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6014 bp->flags &= ~USING_MSIX_FLAG;
6015
6016 } else
6017 free_irq(bp->pdev->irq, bp->dev);
6018}
6019
6020static int bnx2x_enable_msix(struct bnx2x *bp)
6021{
34f80b04 6022 int i, rc, offset;
a2fbb9ea
ET
6023
6024 bp->msix_table[0].entry = 0;
34f80b04
EG
6025 offset = 1;
6026 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6027
34f80b04
EG
6028 for_each_queue(bp, i) {
6029 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6030
34f80b04
EG
6031 bp->msix_table[i + offset].entry = igu_vec;
6032 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6033 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6034 }
6035
34f80b04
EG
6036 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6037 bp->num_queues + offset);
6038 if (rc) {
6039 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6040 return -1;
6041 }
a2fbb9ea
ET
6042 bp->flags |= USING_MSIX_FLAG;
6043
6044 return 0;
a2fbb9ea
ET
6045}
6046
a2fbb9ea
ET
6047static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6048{
34f80b04 6049 int i, rc, offset = 1;
a2fbb9ea 6050
a2fbb9ea
ET
6051 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6052 bp->dev->name, bp->dev);
a2fbb9ea
ET
6053 if (rc) {
6054 BNX2X_ERR("request sp irq failed\n");
6055 return -EBUSY;
6056 }
6057
6058 for_each_queue(bp, i) {
34f80b04 6059 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6060 bnx2x_msix_fp_int, 0,
6061 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6062 if (rc) {
3196a88a
EG
6063 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6064 i + offset, -rc);
a2fbb9ea
ET
6065 bnx2x_free_msix_irqs(bp);
6066 return -EBUSY;
6067 }
6068
6069 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6070 }
6071
6072 return 0;
a2fbb9ea
ET
6073}
6074
6075static int bnx2x_req_irq(struct bnx2x *bp)
6076{
34f80b04 6077 int rc;
a2fbb9ea 6078
34f80b04
EG
6079 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6080 bp->dev->name, bp->dev);
a2fbb9ea
ET
6081 if (!rc)
6082 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6083
6084 return rc;
a2fbb9ea
ET
6085}
6086
65abd74d
YG
6087static void bnx2x_napi_enable(struct bnx2x *bp)
6088{
6089 int i;
6090
6091 for_each_queue(bp, i)
6092 napi_enable(&bnx2x_fp(bp, i, napi));
6093}
6094
6095static void bnx2x_napi_disable(struct bnx2x *bp)
6096{
6097 int i;
6098
6099 for_each_queue(bp, i)
6100 napi_disable(&bnx2x_fp(bp, i, napi));
6101}
6102
6103static void bnx2x_netif_start(struct bnx2x *bp)
6104{
6105 if (atomic_dec_and_test(&bp->intr_sem)) {
6106 if (netif_running(bp->dev)) {
6107 if (bp->state == BNX2X_STATE_OPEN)
6108 netif_wake_queue(bp->dev);
6109 bnx2x_napi_enable(bp);
6110 bnx2x_int_enable(bp);
6111 }
6112 }
6113}
6114
f8ef6e44 6115static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6116{
f8ef6e44 6117 bnx2x_int_disable_sync(bp, disable_hw);
65abd74d
YG
6118 if (netif_running(bp->dev)) {
6119 bnx2x_napi_disable(bp);
6120 netif_tx_disable(bp->dev);
6121 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6122 }
6123}
6124
a2fbb9ea
ET
6125/*
6126 * Init service functions
6127 */
6128
3101c2bc 6129static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6130{
6131 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6132 int port = BP_PORT(bp);
a2fbb9ea
ET
6133
6134 /* CAM allocation
6135 * unicasts 0-31:port0 32-63:port1
6136 * multicast 64-127:port0 128-191:port1
6137 */
6138 config->hdr.length_6b = 2;
34f80b04
EG
6139 config->hdr.offset = port ? 31 : 0;
6140 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6141 config->hdr.reserved1 = 0;
6142
6143 /* primary MAC */
6144 config->config_table[0].cam_entry.msb_mac_addr =
6145 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6146 config->config_table[0].cam_entry.middle_mac_addr =
6147 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6148 config->config_table[0].cam_entry.lsb_mac_addr =
6149 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6150 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6151 if (set)
6152 config->config_table[0].target_table_entry.flags = 0;
6153 else
6154 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6155 config->config_table[0].target_table_entry.client_id = 0;
6156 config->config_table[0].target_table_entry.vlan_id = 0;
6157
3101c2bc
YG
6158 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6159 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6160 config->config_table[0].cam_entry.msb_mac_addr,
6161 config->config_table[0].cam_entry.middle_mac_addr,
6162 config->config_table[0].cam_entry.lsb_mac_addr);
6163
6164 /* broadcast */
6165 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6166 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6167 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6168 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6169 if (set)
6170 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6171 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6172 else
6173 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6174 config->config_table[1].target_table_entry.client_id = 0;
6175 config->config_table[1].target_table_entry.vlan_id = 0;
6176
6177 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6178 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6179 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6180}
6181
3101c2bc 6182static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6183{
6184 struct mac_configuration_cmd_e1h *config =
6185 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6186
3101c2bc 6187 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6188 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6189 return;
6190 }
6191
6192 /* CAM allocation for E1H
6193 * unicasts: by func number
6194 * multicast: 20+FUNC*20, 20 each
6195 */
6196 config->hdr.length_6b = 1;
6197 config->hdr.offset = BP_FUNC(bp);
6198 config->hdr.client_id = BP_CL_ID(bp);
6199 config->hdr.reserved1 = 0;
6200
6201 /* primary MAC */
6202 config->config_table[0].msb_mac_addr =
6203 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6204 config->config_table[0].middle_mac_addr =
6205 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6206 config->config_table[0].lsb_mac_addr =
6207 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6208 config->config_table[0].client_id = BP_L_ID(bp);
6209 config->config_table[0].vlan_id = 0;
6210 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6211 if (set)
6212 config->config_table[0].flags = BP_PORT(bp);
6213 else
6214 config->config_table[0].flags =
6215 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6216
3101c2bc
YG
6217 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6218 (set ? "setting" : "clearing"),
34f80b04
EG
6219 config->config_table[0].msb_mac_addr,
6220 config->config_table[0].middle_mac_addr,
6221 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6222
6223 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6224 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6225 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6226}
6227
a2fbb9ea
ET
6228static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6229 int *state_p, int poll)
6230{
6231 /* can take a while if any port is running */
34f80b04 6232 int cnt = 500;
a2fbb9ea 6233
c14423fe
ET
6234 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6235 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6236
6237 might_sleep();
34f80b04 6238 while (cnt--) {
a2fbb9ea
ET
6239 if (poll) {
6240 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6241 /* if index is different from 0
6242 * the reply for some commands will
3101c2bc 6243 * be on the non default queue
a2fbb9ea
ET
6244 */
6245 if (idx)
6246 bnx2x_rx_int(&bp->fp[idx], 10);
6247 }
a2fbb9ea 6248
3101c2bc 6249 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6250 if (*state_p == state)
a2fbb9ea
ET
6251 return 0;
6252
a2fbb9ea 6253 msleep(1);
a2fbb9ea
ET
6254 }
6255
a2fbb9ea 6256 /* timeout! */
49d66772
ET
6257 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6258 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6259#ifdef BNX2X_STOP_ON_ERROR
6260 bnx2x_panic();
6261#endif
a2fbb9ea 6262
49d66772 6263 return -EBUSY;
a2fbb9ea
ET
6264}
6265
6266static int bnx2x_setup_leading(struct bnx2x *bp)
6267{
34f80b04 6268 int rc;
a2fbb9ea 6269
c14423fe 6270 /* reset IGU state */
34f80b04 6271 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6272
6273 /* SETUP ramrod */
6274 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6275
34f80b04
EG
6276 /* Wait for completion */
6277 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6278
34f80b04 6279 return rc;
a2fbb9ea
ET
6280}
6281
6282static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6283{
a2fbb9ea 6284 /* reset IGU state */
34f80b04 6285 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6286
228241eb 6287 /* SETUP ramrod */
a2fbb9ea
ET
6288 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6289 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6290
6291 /* Wait for completion */
6292 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6293 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6294}
6295
a2fbb9ea
ET
6296static int bnx2x_poll(struct napi_struct *napi, int budget);
6297static void bnx2x_set_rx_mode(struct net_device *dev);
6298
34f80b04
EG
6299/* must be called with rtnl_lock */
6300static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6301{
228241eb 6302 u32 load_code;
34f80b04 6303 int i, rc;
34f80b04
EG
6304#ifdef BNX2X_STOP_ON_ERROR
6305 if (unlikely(bp->panic))
6306 return -EPERM;
6307#endif
a2fbb9ea
ET
6308
6309 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6310
34f80b04
EG
6311 /* Send LOAD_REQUEST command to MCP
6312 Returns the type of LOAD command:
6313 if it is the first port to be initialized
6314 common blocks should be initialized, otherwise - not
a2fbb9ea 6315 */
34f80b04 6316 if (!BP_NOMCP(bp)) {
228241eb
ET
6317 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6318 if (!load_code) {
da5a662a 6319 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6320 return -EBUSY;
6321 }
34f80b04 6322 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6323 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6324
a2fbb9ea 6325 } else {
da5a662a
VZ
6326 int port = BP_PORT(bp);
6327
34f80b04
EG
6328 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6329 load_count[0], load_count[1], load_count[2]);
6330 load_count[0]++;
da5a662a 6331 load_count[1 + port]++;
34f80b04
EG
6332 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6333 load_count[0], load_count[1], load_count[2]);
6334 if (load_count[0] == 1)
6335 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6336 else if (load_count[1 + port] == 1)
34f80b04
EG
6337 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6338 else
6339 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6340 }
6341
34f80b04
EG
6342 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6343 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6344 bp->port.pmf = 1;
6345 else
6346 bp->port.pmf = 0;
6347 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6348
6349 /* if we can't use MSI-X we only need one fp,
6350 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6351 * and fallback to inta with one fp
6352 */
34f80b04
EG
6353 if (use_inta) {
6354 bp->num_queues = 1;
6355
6356 } else {
6357 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6358 /* user requested number */
6359 bp->num_queues = use_multi;
6360
6361 else if (use_multi)
6362 bp->num_queues = min_t(u32, num_online_cpus(),
6363 BP_MAX_QUEUES(bp));
6364 else
a2fbb9ea 6365 bp->num_queues = 1;
34f80b04
EG
6366
6367 if (bnx2x_enable_msix(bp)) {
6368 /* failed to enable MSI-X */
6369 bp->num_queues = 1;
6370 if (use_multi)
6371 BNX2X_ERR("Multi requested but failed"
6372 " to enable MSI-X\n");
a2fbb9ea
ET
6373 }
6374 }
34f80b04
EG
6375 DP(NETIF_MSG_IFUP,
6376 "set number of queues to %d\n", bp->num_queues);
c14423fe 6377
a2fbb9ea
ET
6378 if (bnx2x_alloc_mem(bp))
6379 return -ENOMEM;
6380
7a9b2557
VZ
6381 for_each_queue(bp, i)
6382 bnx2x_fp(bp, i, disable_tpa) =
6383 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6384
34f80b04
EG
6385 if (bp->flags & USING_MSIX_FLAG) {
6386 rc = bnx2x_req_msix_irqs(bp);
6387 if (rc) {
6388 pci_disable_msix(bp->pdev);
6389 goto load_error;
6390 }
6391 } else {
6392 bnx2x_ack_int(bp);
6393 rc = bnx2x_req_irq(bp);
6394 if (rc) {
6395 BNX2X_ERR("IRQ request failed, aborting\n");
6396 goto load_error;
a2fbb9ea
ET
6397 }
6398 }
6399
6400 for_each_queue(bp, i)
6401 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6402 bnx2x_poll, 128);
6403
a2fbb9ea 6404 /* Initialize HW */
34f80b04
EG
6405 rc = bnx2x_init_hw(bp, load_code);
6406 if (rc) {
a2fbb9ea 6407 BNX2X_ERR("HW init failed, aborting\n");
d1014634 6408 goto load_int_disable;
a2fbb9ea
ET
6409 }
6410
a2fbb9ea 6411 /* Setup NIC internals and enable interrupts */
471de716 6412 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6413
6414 /* Send LOAD_DONE command to MCP */
34f80b04 6415 if (!BP_NOMCP(bp)) {
228241eb
ET
6416 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6417 if (!load_code) {
da5a662a 6418 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6419 rc = -EBUSY;
d1014634 6420 goto load_rings_free;
a2fbb9ea
ET
6421 }
6422 }
6423
bb2a0f7a
YG
6424 bnx2x_stats_init(bp);
6425
a2fbb9ea
ET
6426 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6427
6428 /* Enable Rx interrupt handling before sending the ramrod
6429 as it's completed on Rx FP queue */
65abd74d 6430 bnx2x_napi_enable(bp);
a2fbb9ea 6431
da5a662a
VZ
6432 /* Enable interrupt handling */
6433 atomic_set(&bp->intr_sem, 0);
6434
34f80b04
EG
6435 rc = bnx2x_setup_leading(bp);
6436 if (rc) {
da5a662a 6437 BNX2X_ERR("Setup leading failed!\n");
d1014634 6438 goto load_netif_stop;
34f80b04 6439 }
a2fbb9ea 6440
34f80b04
EG
6441 if (CHIP_IS_E1H(bp))
6442 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6443 BNX2X_ERR("!!! mf_cfg function disabled\n");
6444 bp->state = BNX2X_STATE_DISABLED;
6445 }
a2fbb9ea 6446
34f80b04
EG
6447 if (bp->state == BNX2X_STATE_OPEN)
6448 for_each_nondefault_queue(bp, i) {
6449 rc = bnx2x_setup_multi(bp, i);
6450 if (rc)
d1014634 6451 goto load_netif_stop;
34f80b04 6452 }
a2fbb9ea 6453
34f80b04 6454 if (CHIP_IS_E1(bp))
3101c2bc 6455 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6456 else
3101c2bc 6457 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6458
6459 if (bp->port.pmf)
6460 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6461
6462 /* Start fast path */
34f80b04
EG
6463 switch (load_mode) {
6464 case LOAD_NORMAL:
6465 /* Tx queue should be only reenabled */
6466 netif_wake_queue(bp->dev);
6467 bnx2x_set_rx_mode(bp->dev);
6468 break;
6469
6470 case LOAD_OPEN:
a2fbb9ea 6471 netif_start_queue(bp->dev);
34f80b04 6472 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6473 if (bp->flags & USING_MSIX_FLAG)
6474 printk(KERN_INFO PFX "%s: using MSI-X\n",
6475 bp->dev->name);
34f80b04 6476 break;
a2fbb9ea 6477
34f80b04 6478 case LOAD_DIAG:
a2fbb9ea 6479 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6480 bp->state = BNX2X_STATE_DIAG;
6481 break;
6482
6483 default:
6484 break;
a2fbb9ea
ET
6485 }
6486
34f80b04
EG
6487 if (!bp->port.pmf)
6488 bnx2x__link_status_update(bp);
6489
a2fbb9ea
ET
6490 /* start the timer */
6491 mod_timer(&bp->timer, jiffies + bp->current_interval);
6492
34f80b04 6493
a2fbb9ea
ET
6494 return 0;
6495
d1014634 6496load_netif_stop:
65abd74d 6497 bnx2x_napi_disable(bp);
d1014634 6498load_rings_free:
7a9b2557
VZ
6499 /* Free SKBs, SGEs, TPA pool and driver internals */
6500 bnx2x_free_skbs(bp);
6501 for_each_queue(bp, i)
3196a88a 6502 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d1014634 6503load_int_disable:
f8ef6e44 6504 bnx2x_int_disable_sync(bp, 1);
d1014634
YG
6505 /* Release IRQs */
6506 bnx2x_free_irq(bp);
228241eb 6507load_error:
a2fbb9ea 6508 bnx2x_free_mem(bp);
9a035440 6509 bp->port.pmf = 0;
a2fbb9ea
ET
6510
6511 /* TBD we really need to reset the chip
6512 if we want to recover from this */
34f80b04 6513 return rc;
a2fbb9ea
ET
6514}
6515
6516static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6517{
a2fbb9ea
ET
6518 int rc;
6519
c14423fe 6520 /* halt the connection */
a2fbb9ea 6521 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6522 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6523
34f80b04 6524 /* Wait for completion */
a2fbb9ea 6525 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6526 &(bp->fp[index].state), 1);
c14423fe 6527 if (rc) /* timeout */
a2fbb9ea
ET
6528 return rc;
6529
6530 /* delete cfc entry */
6531 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6532
34f80b04
EG
6533 /* Wait for completion */
6534 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6535 &(bp->fp[index].state), 1);
6536 return rc;
a2fbb9ea
ET
6537}
6538
da5a662a 6539static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6540{
49d66772 6541 u16 dsb_sp_prod_idx;
c14423fe 6542 /* if the other port is handling traffic,
a2fbb9ea 6543 this can take a lot of time */
34f80b04
EG
6544 int cnt = 500;
6545 int rc;
a2fbb9ea
ET
6546
6547 might_sleep();
6548
6549 /* Send HALT ramrod */
6550 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6551 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6552
34f80b04
EG
6553 /* Wait for completion */
6554 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6555 &(bp->fp[0].state), 1);
6556 if (rc) /* timeout */
da5a662a 6557 return rc;
a2fbb9ea 6558
49d66772 6559 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6560
228241eb 6561 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6562 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6563
49d66772 6564 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6565 we are going to reset the chip anyway
6566 so there is not much to do if this times out
6567 */
34f80b04 6568 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6569 if (!cnt) {
6570 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6571 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6572 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6573#ifdef BNX2X_STOP_ON_ERROR
6574 bnx2x_panic();
da5a662a
VZ
6575#else
6576 rc = -EBUSY;
34f80b04
EG
6577#endif
6578 break;
6579 }
6580 cnt--;
da5a662a 6581 msleep(1);
49d66772
ET
6582 }
6583 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6584 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6585
6586 return rc;
a2fbb9ea
ET
6587}
6588
34f80b04
EG
6589static void bnx2x_reset_func(struct bnx2x *bp)
6590{
6591 int port = BP_PORT(bp);
6592 int func = BP_FUNC(bp);
6593 int base, i;
6594
6595 /* Configure IGU */
6596 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6597 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6598
6599 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6600
6601 /* Clear ILT */
6602 base = FUNC_ILT_BASE(func);
6603 for (i = base; i < base + ILT_PER_FUNC; i++)
6604 bnx2x_ilt_wr(bp, i, 0);
6605}
6606
6607static void bnx2x_reset_port(struct bnx2x *bp)
6608{
6609 int port = BP_PORT(bp);
6610 u32 val;
6611
6612 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6613
6614 /* Do not rcv packets to BRB */
6615 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6616 /* Do not direct rcv packets that are not for MCP to the BRB */
6617 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6618 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6619
6620 /* Configure AEU */
6621 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6622
6623 msleep(100);
6624 /* Check for BRB port occupancy */
6625 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6626 if (val)
6627 DP(NETIF_MSG_IFDOWN,
33471629 6628 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6629
6630 /* TODO: Close Doorbell port? */
6631}
6632
6633static void bnx2x_reset_common(struct bnx2x *bp)
6634{
6635 /* reset_common */
6636 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6637 0xd3ffff7f);
6638 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6639}
6640
6641static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6642{
6643 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6644 BP_FUNC(bp), reset_code);
6645
6646 switch (reset_code) {
6647 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6648 bnx2x_reset_port(bp);
6649 bnx2x_reset_func(bp);
6650 bnx2x_reset_common(bp);
6651 break;
6652
6653 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6654 bnx2x_reset_port(bp);
6655 bnx2x_reset_func(bp);
6656 break;
6657
6658 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6659 bnx2x_reset_func(bp);
6660 break;
49d66772 6661
34f80b04
EG
6662 default:
6663 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6664 break;
6665 }
6666}
6667
33471629 6668/* must be called with rtnl_lock */
34f80b04 6669static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6670{
da5a662a 6671 int port = BP_PORT(bp);
a2fbb9ea 6672 u32 reset_code = 0;
da5a662a 6673 int i, cnt, rc;
a2fbb9ea
ET
6674
6675 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6676
228241eb
ET
6677 bp->rx_mode = BNX2X_RX_MODE_NONE;
6678 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6679
f8ef6e44 6680 bnx2x_netif_stop(bp, 1);
65abd74d
YG
6681 if (!netif_running(bp->dev))
6682 bnx2x_napi_disable(bp);
34f80b04
EG
6683 del_timer_sync(&bp->timer);
6684 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6685 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6686 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6687
da5a662a 6688 /* Wait until tx fast path tasks complete */
228241eb
ET
6689 for_each_queue(bp, i) {
6690 struct bnx2x_fastpath *fp = &bp->fp[i];
6691
34f80b04
EG
6692 cnt = 1000;
6693 smp_rmb();
da5a662a
VZ
6694 while (BNX2X_HAS_TX_WORK(fp)) {
6695
65abd74d 6696 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6697 if (!cnt) {
6698 BNX2X_ERR("timeout waiting for queue[%d]\n",
6699 i);
6700#ifdef BNX2X_STOP_ON_ERROR
6701 bnx2x_panic();
6702 return -EBUSY;
6703#else
6704 break;
6705#endif
6706 }
6707 cnt--;
da5a662a 6708 msleep(1);
34f80b04
EG
6709 smp_rmb();
6710 }
228241eb 6711 }
da5a662a
VZ
6712 /* Give HW time to discard old tx messages */
6713 msleep(1);
a2fbb9ea 6714
34f80b04
EG
6715 /* Release IRQs */
6716 bnx2x_free_irq(bp);
6717
3101c2bc
YG
6718 if (CHIP_IS_E1(bp)) {
6719 struct mac_configuration_cmd *config =
6720 bnx2x_sp(bp, mcast_config);
6721
6722 bnx2x_set_mac_addr_e1(bp, 0);
6723
6724 for (i = 0; i < config->hdr.length_6b; i++)
6725 CAM_INVALIDATE(config->config_table[i]);
6726
6727 config->hdr.length_6b = i;
6728 if (CHIP_REV_IS_SLOW(bp))
6729 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6730 else
6731 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6732 config->hdr.client_id = BP_CL_ID(bp);
6733 config->hdr.reserved1 = 0;
6734
6735 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6736 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6737 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6738
6739 } else { /* E1H */
65abd74d
YG
6740 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6741
3101c2bc
YG
6742 bnx2x_set_mac_addr_e1h(bp, 0);
6743
6744 for (i = 0; i < MC_HASH_SIZE; i++)
6745 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6746 }
6747
65abd74d
YG
6748 if (unload_mode == UNLOAD_NORMAL)
6749 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6750
6751 else if (bp->flags & NO_WOL_FLAG) {
6752 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6753 if (CHIP_IS_E1H(bp))
6754 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6755
6756 } else if (bp->wol) {
6757 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6758 u8 *mac_addr = bp->dev->dev_addr;
6759 u32 val;
6760 /* The mac address is written to entries 1-4 to
6761 preserve entry 0 which is used by the PMF */
6762 u8 entry = (BP_E1HVN(bp) + 1)*8;
6763
6764 val = (mac_addr[0] << 8) | mac_addr[1];
6765 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6766
6767 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6768 (mac_addr[4] << 8) | mac_addr[5];
6769 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6770
6771 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6772
6773 } else
6774 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6775
34f80b04
EG
6776 /* Close multi and leading connections
6777 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6778 for_each_nondefault_queue(bp, i)
6779 if (bnx2x_stop_multi(bp, i))
228241eb 6780 goto unload_error;
a2fbb9ea 6781
da5a662a
VZ
6782 rc = bnx2x_stop_leading(bp);
6783 if (rc) {
34f80b04 6784 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6785#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6786 return -EBUSY;
da5a662a
VZ
6787#else
6788 goto unload_error;
34f80b04 6789#endif
228241eb
ET
6790 }
6791
6792unload_error:
34f80b04 6793 if (!BP_NOMCP(bp))
228241eb 6794 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6795 else {
6796 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6797 load_count[0], load_count[1], load_count[2]);
6798 load_count[0]--;
da5a662a 6799 load_count[1 + port]--;
34f80b04
EG
6800 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6801 load_count[0], load_count[1], load_count[2]);
6802 if (load_count[0] == 0)
6803 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6804 else if (load_count[1 + port] == 0)
34f80b04
EG
6805 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6806 else
6807 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6808 }
a2fbb9ea 6809
34f80b04
EG
6810 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6811 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6812 bnx2x__link_reset(bp);
a2fbb9ea
ET
6813
6814 /* Reset the chip */
228241eb 6815 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6816
6817 /* Report UNLOAD_DONE to MCP */
34f80b04 6818 if (!BP_NOMCP(bp))
a2fbb9ea 6819 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6820 bp->port.pmf = 0;
a2fbb9ea 6821
7a9b2557 6822 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6823 bnx2x_free_skbs(bp);
7a9b2557 6824 for_each_queue(bp, i)
3196a88a 6825 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
a2fbb9ea
ET
6826 bnx2x_free_mem(bp);
6827
6828 bp->state = BNX2X_STATE_CLOSED;
228241eb 6829
a2fbb9ea
ET
6830 netif_carrier_off(bp->dev);
6831
6832 return 0;
6833}
6834
34f80b04
EG
6835static void bnx2x_reset_task(struct work_struct *work)
6836{
6837 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6838
6839#ifdef BNX2X_STOP_ON_ERROR
6840 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6841 " so reset not done to allow debug dump,\n"
6842 KERN_ERR " you will need to reboot when done\n");
6843 return;
6844#endif
6845
6846 rtnl_lock();
6847
6848 if (!netif_running(bp->dev))
6849 goto reset_task_exit;
6850
6851 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6852 bnx2x_nic_load(bp, LOAD_NORMAL);
6853
6854reset_task_exit:
6855 rtnl_unlock();
6856}
6857
a2fbb9ea
ET
6858/* end of nic load/unload */
6859
6860/* ethtool_ops */
6861
6862/*
6863 * Init service functions
6864 */
6865
34f80b04
EG
6866static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6867{
6868 u32 val;
6869
6870 /* Check if there is any driver already loaded */
6871 val = REG_RD(bp, MISC_REG_UNPREPARED);
6872 if (val == 0x1) {
6873 /* Check if it is the UNDI driver
6874 * UNDI driver initializes CID offset for normal bell to 0x7
6875 */
4a37fb66 6876 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04 6877 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
76b190c5
EG
6878 if (val == 0x7)
6879 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6880 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6881
34f80b04
EG
6882 if (val == 0x7) {
6883 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6884 /* save our func */
34f80b04 6885 int func = BP_FUNC(bp);
da5a662a
VZ
6886 u32 swap_en;
6887 u32 swap_val;
34f80b04
EG
6888
6889 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6890
6891 /* try unload UNDI on port 0 */
6892 bp->func = 0;
da5a662a
VZ
6893 bp->fw_seq =
6894 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6895 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6896 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6897
6898 /* if UNDI is loaded on the other port */
6899 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6900
da5a662a
VZ
6901 /* send "DONE" for previous unload */
6902 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6903
6904 /* unload UNDI on port 1 */
34f80b04 6905 bp->func = 1;
da5a662a
VZ
6906 bp->fw_seq =
6907 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6908 DRV_MSG_SEQ_NUMBER_MASK);
6909 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6910
6911 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6912 }
6913
da5a662a
VZ
6914 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6915 HC_REG_CONFIG_0), 0x1000);
6916
6917 /* close input traffic and wait for it */
6918 /* Do not rcv packets to BRB */
6919 REG_WR(bp,
6920 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6921 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6922 /* Do not direct rcv packets that are not for MCP to
6923 * the BRB */
6924 REG_WR(bp,
6925 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6926 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6927 /* clear AEU */
6928 REG_WR(bp,
6929 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6930 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6931 msleep(10);
6932
6933 /* save NIG port swap info */
6934 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6935 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6936 /* reset device */
6937 REG_WR(bp,
6938 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6939 0xd3ffffff);
34f80b04
EG
6940 REG_WR(bp,
6941 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6942 0x1403);
da5a662a
VZ
6943 /* take the NIG out of reset and restore swap values */
6944 REG_WR(bp,
6945 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6946 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6947 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6948 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6949
6950 /* send unload done to the MCP */
6951 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6952
6953 /* restore our func and fw_seq */
6954 bp->func = func;
6955 bp->fw_seq =
6956 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6957 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
6958 }
6959 }
6960}
6961
6962static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6963{
6964 u32 val, val2, val3, val4, id;
72ce58c3 6965 u16 pmc;
34f80b04
EG
6966
6967 /* Get the chip revision id and number. */
6968 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6969 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6970 id = ((val & 0xffff) << 16);
6971 val = REG_RD(bp, MISC_REG_CHIP_REV);
6972 id |= ((val & 0xf) << 12);
6973 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6974 id |= ((val & 0xff) << 4);
6975 REG_RD(bp, MISC_REG_BOND_ID);
6976 id |= (val & 0xf);
6977 bp->common.chip_id = id;
6978 bp->link_params.chip_id = bp->common.chip_id;
6979 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6980
6981 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6982 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6983 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6984 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6985 bp->common.flash_size, bp->common.flash_size);
6986
6987 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6988 bp->link_params.shmem_base = bp->common.shmem_base;
6989 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6990
6991 if (!bp->common.shmem_base ||
6992 (bp->common.shmem_base < 0xA0000) ||
6993 (bp->common.shmem_base >= 0xC0000)) {
6994 BNX2X_DEV_INFO("MCP not active\n");
6995 bp->flags |= NO_MCP_FLAG;
6996 return;
6997 }
6998
6999 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7000 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7001 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7002 BNX2X_ERR("BAD MCP validity signature\n");
7003
7004 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7005 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7006
7007 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7008 bp->common.hw_config, bp->common.board);
7009
7010 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7011 SHARED_HW_CFG_LED_MODE_MASK) >>
7012 SHARED_HW_CFG_LED_MODE_SHIFT);
7013
7014 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7015 bp->common.bc_ver = val;
7016 BNX2X_DEV_INFO("bc_ver %X\n", val);
7017 if (val < BNX2X_BC_VER) {
7018 /* for now only warn
7019 * later we might need to enforce this */
7020 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7021 " please upgrade BC\n", BNX2X_BC_VER, val);
7022 }
72ce58c3
EG
7023
7024 if (BP_E1HVN(bp) == 0) {
7025 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7026 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7027 } else {
7028 /* no WOL capability for E1HVN != 0 */
7029 bp->flags |= NO_WOL_FLAG;
7030 }
7031 BNX2X_DEV_INFO("%sWoL capable\n",
7032 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7033
7034 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7035 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7036 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7037 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7038
7039 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7040 val, val2, val3, val4);
7041}
7042
7043static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7044 u32 switch_cfg)
a2fbb9ea 7045{
34f80b04 7046 int port = BP_PORT(bp);
a2fbb9ea
ET
7047 u32 ext_phy_type;
7048
a2fbb9ea
ET
7049 switch (switch_cfg) {
7050 case SWITCH_CFG_1G:
7051 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7052
c18487ee
YR
7053 ext_phy_type =
7054 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7055 switch (ext_phy_type) {
7056 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7057 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7058 ext_phy_type);
7059
34f80b04
EG
7060 bp->port.supported |= (SUPPORTED_10baseT_Half |
7061 SUPPORTED_10baseT_Full |
7062 SUPPORTED_100baseT_Half |
7063 SUPPORTED_100baseT_Full |
7064 SUPPORTED_1000baseT_Full |
7065 SUPPORTED_2500baseX_Full |
7066 SUPPORTED_TP |
7067 SUPPORTED_FIBRE |
7068 SUPPORTED_Autoneg |
7069 SUPPORTED_Pause |
7070 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7071 break;
7072
7073 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7074 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7075 ext_phy_type);
7076
34f80b04
EG
7077 bp->port.supported |= (SUPPORTED_10baseT_Half |
7078 SUPPORTED_10baseT_Full |
7079 SUPPORTED_100baseT_Half |
7080 SUPPORTED_100baseT_Full |
7081 SUPPORTED_1000baseT_Full |
7082 SUPPORTED_TP |
7083 SUPPORTED_FIBRE |
7084 SUPPORTED_Autoneg |
7085 SUPPORTED_Pause |
7086 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7087 break;
7088
7089 default:
7090 BNX2X_ERR("NVRAM config error. "
7091 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7092 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7093 return;
7094 }
7095
34f80b04
EG
7096 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7097 port*0x10);
7098 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7099 break;
7100
7101 case SWITCH_CFG_10G:
7102 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7103
c18487ee
YR
7104 ext_phy_type =
7105 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7106 switch (ext_phy_type) {
7107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7108 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7109 ext_phy_type);
7110
34f80b04
EG
7111 bp->port.supported |= (SUPPORTED_10baseT_Half |
7112 SUPPORTED_10baseT_Full |
7113 SUPPORTED_100baseT_Half |
7114 SUPPORTED_100baseT_Full |
7115 SUPPORTED_1000baseT_Full |
7116 SUPPORTED_2500baseX_Full |
7117 SUPPORTED_10000baseT_Full |
7118 SUPPORTED_TP |
7119 SUPPORTED_FIBRE |
7120 SUPPORTED_Autoneg |
7121 SUPPORTED_Pause |
7122 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7123 break;
7124
7125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7126 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7127 ext_phy_type);
f1410647 7128
34f80b04
EG
7129 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7130 SUPPORTED_FIBRE |
7131 SUPPORTED_Pause |
7132 SUPPORTED_Asym_Pause);
f1410647
ET
7133 break;
7134
a2fbb9ea 7135 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7136 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7137 ext_phy_type);
7138
34f80b04
EG
7139 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7140 SUPPORTED_1000baseT_Full |
7141 SUPPORTED_FIBRE |
7142 SUPPORTED_Pause |
7143 SUPPORTED_Asym_Pause);
f1410647
ET
7144 break;
7145
7146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7147 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7148 ext_phy_type);
7149
34f80b04
EG
7150 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7151 SUPPORTED_1000baseT_Full |
7152 SUPPORTED_FIBRE |
7153 SUPPORTED_Autoneg |
7154 SUPPORTED_Pause |
7155 SUPPORTED_Asym_Pause);
f1410647
ET
7156 break;
7157
c18487ee
YR
7158 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7159 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7160 ext_phy_type);
7161
34f80b04
EG
7162 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7163 SUPPORTED_2500baseX_Full |
7164 SUPPORTED_1000baseT_Full |
7165 SUPPORTED_FIBRE |
7166 SUPPORTED_Autoneg |
7167 SUPPORTED_Pause |
7168 SUPPORTED_Asym_Pause);
c18487ee
YR
7169 break;
7170
f1410647
ET
7171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7172 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7173 ext_phy_type);
7174
34f80b04
EG
7175 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7176 SUPPORTED_TP |
7177 SUPPORTED_Autoneg |
7178 SUPPORTED_Pause |
7179 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7180 break;
7181
c18487ee
YR
7182 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7183 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7184 bp->link_params.ext_phy_config);
7185 break;
7186
a2fbb9ea
ET
7187 default:
7188 BNX2X_ERR("NVRAM config error. "
7189 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7190 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7191 return;
7192 }
7193
34f80b04
EG
7194 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7195 port*0x18);
7196 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7197
a2fbb9ea
ET
7198 break;
7199
7200 default:
7201 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7202 bp->port.link_config);
a2fbb9ea
ET
7203 return;
7204 }
34f80b04 7205 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7206
7207 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7208 if (!(bp->link_params.speed_cap_mask &
7209 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7210 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7211
c18487ee
YR
7212 if (!(bp->link_params.speed_cap_mask &
7213 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7214 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7215
c18487ee
YR
7216 if (!(bp->link_params.speed_cap_mask &
7217 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7218 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7219
c18487ee
YR
7220 if (!(bp->link_params.speed_cap_mask &
7221 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7222 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7223
c18487ee
YR
7224 if (!(bp->link_params.speed_cap_mask &
7225 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7226 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7227 SUPPORTED_1000baseT_Full);
a2fbb9ea 7228
c18487ee
YR
7229 if (!(bp->link_params.speed_cap_mask &
7230 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7231 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7232
c18487ee
YR
7233 if (!(bp->link_params.speed_cap_mask &
7234 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7235 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7236
34f80b04 7237 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7238}
7239
34f80b04 7240static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7241{
c18487ee 7242 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7243
34f80b04 7244 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7245 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7246 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7247 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7248 bp->port.advertising = bp->port.supported;
a2fbb9ea 7249 } else {
c18487ee
YR
7250 u32 ext_phy_type =
7251 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7252
7253 if ((ext_phy_type ==
7254 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7255 (ext_phy_type ==
7256 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7257 /* force 10G, no AN */
c18487ee 7258 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7259 bp->port.advertising =
a2fbb9ea
ET
7260 (ADVERTISED_10000baseT_Full |
7261 ADVERTISED_FIBRE);
7262 break;
7263 }
7264 BNX2X_ERR("NVRAM config error. "
7265 "Invalid link_config 0x%x"
7266 " Autoneg not supported\n",
34f80b04 7267 bp->port.link_config);
a2fbb9ea
ET
7268 return;
7269 }
7270 break;
7271
7272 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7273 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7274 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7275 bp->port.advertising = (ADVERTISED_10baseT_Full |
7276 ADVERTISED_TP);
a2fbb9ea
ET
7277 } else {
7278 BNX2X_ERR("NVRAM config error. "
7279 "Invalid link_config 0x%x"
7280 " speed_cap_mask 0x%x\n",
34f80b04 7281 bp->port.link_config,
c18487ee 7282 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7283 return;
7284 }
7285 break;
7286
7287 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7288 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7289 bp->link_params.req_line_speed = SPEED_10;
7290 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7291 bp->port.advertising = (ADVERTISED_10baseT_Half |
7292 ADVERTISED_TP);
a2fbb9ea
ET
7293 } else {
7294 BNX2X_ERR("NVRAM config error. "
7295 "Invalid link_config 0x%x"
7296 " speed_cap_mask 0x%x\n",
34f80b04 7297 bp->port.link_config,
c18487ee 7298 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7299 return;
7300 }
7301 break;
7302
7303 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7304 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7305 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7306 bp->port.advertising = (ADVERTISED_100baseT_Full |
7307 ADVERTISED_TP);
a2fbb9ea
ET
7308 } else {
7309 BNX2X_ERR("NVRAM config error. "
7310 "Invalid link_config 0x%x"
7311 " speed_cap_mask 0x%x\n",
34f80b04 7312 bp->port.link_config,
c18487ee 7313 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7314 return;
7315 }
7316 break;
7317
7318 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7319 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7320 bp->link_params.req_line_speed = SPEED_100;
7321 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7322 bp->port.advertising = (ADVERTISED_100baseT_Half |
7323 ADVERTISED_TP);
a2fbb9ea
ET
7324 } else {
7325 BNX2X_ERR("NVRAM config error. "
7326 "Invalid link_config 0x%x"
7327 " speed_cap_mask 0x%x\n",
34f80b04 7328 bp->port.link_config,
c18487ee 7329 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7330 return;
7331 }
7332 break;
7333
7334 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7335 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7336 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7337 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7338 ADVERTISED_TP);
a2fbb9ea
ET
7339 } else {
7340 BNX2X_ERR("NVRAM config error. "
7341 "Invalid link_config 0x%x"
7342 " speed_cap_mask 0x%x\n",
34f80b04 7343 bp->port.link_config,
c18487ee 7344 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7345 return;
7346 }
7347 break;
7348
7349 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7350 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7351 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7352 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7353 ADVERTISED_TP);
a2fbb9ea
ET
7354 } else {
7355 BNX2X_ERR("NVRAM config error. "
7356 "Invalid link_config 0x%x"
7357 " speed_cap_mask 0x%x\n",
34f80b04 7358 bp->port.link_config,
c18487ee 7359 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7360 return;
7361 }
7362 break;
7363
7364 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7365 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7366 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7367 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7368 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7369 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7370 ADVERTISED_FIBRE);
a2fbb9ea
ET
7371 } else {
7372 BNX2X_ERR("NVRAM config error. "
7373 "Invalid link_config 0x%x"
7374 " speed_cap_mask 0x%x\n",
34f80b04 7375 bp->port.link_config,
c18487ee 7376 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7377 return;
7378 }
7379 break;
7380
7381 default:
7382 BNX2X_ERR("NVRAM config error. "
7383 "BAD link speed link_config 0x%x\n",
34f80b04 7384 bp->port.link_config);
c18487ee 7385 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7386 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7387 break;
7388 }
a2fbb9ea 7389
34f80b04
EG
7390 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7391 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7392 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7393 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7394 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7395
c18487ee 7396 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7397 " advertising 0x%x\n",
c18487ee
YR
7398 bp->link_params.req_line_speed,
7399 bp->link_params.req_duplex,
34f80b04 7400 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7401}
7402
34f80b04 7403static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7404{
34f80b04
EG
7405 int port = BP_PORT(bp);
7406 u32 val, val2;
a2fbb9ea 7407
c18487ee 7408 bp->link_params.bp = bp;
34f80b04 7409 bp->link_params.port = port;
c18487ee 7410
c18487ee 7411 bp->link_params.serdes_config =
f1410647 7412 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7413 bp->link_params.lane_config =
a2fbb9ea 7414 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7415 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7416 SHMEM_RD(bp,
7417 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7418 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7419 SHMEM_RD(bp,
7420 dev_info.port_hw_config[port].speed_capability_mask);
7421
34f80b04 7422 bp->port.link_config =
a2fbb9ea
ET
7423 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7424
34f80b04
EG
7425 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7426 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7427 " link_config 0x%08x\n",
c18487ee
YR
7428 bp->link_params.serdes_config,
7429 bp->link_params.lane_config,
7430 bp->link_params.ext_phy_config,
34f80b04 7431 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7432
34f80b04 7433 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7434 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7435 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7436
7437 bnx2x_link_settings_requested(bp);
7438
7439 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7440 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7441 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7442 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7443 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7444 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7445 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7446 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7447 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7448 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7449}
7450
7451static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7452{
7453 int func = BP_FUNC(bp);
7454 u32 val, val2;
7455 int rc = 0;
a2fbb9ea 7456
34f80b04 7457 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7458
34f80b04
EG
7459 bp->e1hov = 0;
7460 bp->e1hmf = 0;
7461 if (CHIP_IS_E1H(bp)) {
7462 bp->mf_config =
7463 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7464
3196a88a
EG
7465 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7466 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7467 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7468
34f80b04
EG
7469 bp->e1hov = val;
7470 bp->e1hmf = 1;
7471 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7472 "(0x%04x)\n",
7473 func, bp->e1hov, bp->e1hov);
7474 } else {
7475 BNX2X_DEV_INFO("Single function mode\n");
7476 if (BP_E1HVN(bp)) {
7477 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7478 " aborting\n", func);
7479 rc = -EPERM;
7480 }
7481 }
7482 }
a2fbb9ea 7483
34f80b04
EG
7484 if (!BP_NOMCP(bp)) {
7485 bnx2x_get_port_hwinfo(bp);
7486
7487 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7488 DRV_MSG_SEQ_NUMBER_MASK);
7489 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7490 }
7491
7492 if (IS_E1HMF(bp)) {
7493 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7494 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7495 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7496 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7497 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7498 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7499 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7500 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7501 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7502 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7503 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7504 ETH_ALEN);
7505 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7506 ETH_ALEN);
a2fbb9ea 7507 }
34f80b04
EG
7508
7509 return rc;
a2fbb9ea
ET
7510 }
7511
34f80b04
EG
7512 if (BP_NOMCP(bp)) {
7513 /* only supposed to happen on emulation/FPGA */
33471629 7514 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7515 random_ether_addr(bp->dev->dev_addr);
7516 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7517 }
a2fbb9ea 7518
34f80b04
EG
7519 return rc;
7520}
7521
7522static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7523{
7524 int func = BP_FUNC(bp);
7525 int rc;
7526
da5a662a
VZ
7527 /* Disable interrupt handling until HW is initialized */
7528 atomic_set(&bp->intr_sem, 1);
7529
34f80b04 7530 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7531
1cf167f2 7532 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7533 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7534
7535 rc = bnx2x_get_hwinfo(bp);
7536
7537 /* need to reset chip if undi was active */
7538 if (!BP_NOMCP(bp))
7539 bnx2x_undi_unload(bp);
7540
7541 if (CHIP_REV_IS_FPGA(bp))
7542 printk(KERN_ERR PFX "FPGA detected\n");
7543
7544 if (BP_NOMCP(bp) && (func == 0))
7545 printk(KERN_ERR PFX
7546 "MCP disabled, must load devices in order!\n");
7547
7a9b2557
VZ
7548 /* Set TPA flags */
7549 if (disable_tpa) {
7550 bp->flags &= ~TPA_ENABLE_FLAG;
7551 bp->dev->features &= ~NETIF_F_LRO;
7552 } else {
7553 bp->flags |= TPA_ENABLE_FLAG;
7554 bp->dev->features |= NETIF_F_LRO;
7555 }
7556
7557
34f80b04
EG
7558 bp->tx_ring_size = MAX_TX_AVAIL;
7559 bp->rx_ring_size = MAX_RX_AVAIL;
7560
7561 bp->rx_csum = 1;
7562 bp->rx_offset = 0;
7563
7564 bp->tx_ticks = 50;
7565 bp->rx_ticks = 25;
7566
34f80b04
EG
7567 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7568 bp->current_interval = (poll ? poll : bp->timer_interval);
7569
7570 init_timer(&bp->timer);
7571 bp->timer.expires = jiffies + bp->current_interval;
7572 bp->timer.data = (unsigned long) bp;
7573 bp->timer.function = bnx2x_timer;
7574
7575 return rc;
a2fbb9ea
ET
7576}
7577
7578/*
7579 * ethtool service functions
7580 */
7581
7582/* All ethtool functions called with rtnl_lock */
7583
7584static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7585{
7586 struct bnx2x *bp = netdev_priv(dev);
7587
34f80b04
EG
7588 cmd->supported = bp->port.supported;
7589 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7590
7591 if (netif_carrier_ok(dev)) {
c18487ee
YR
7592 cmd->speed = bp->link_vars.line_speed;
7593 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7594 } else {
c18487ee
YR
7595 cmd->speed = bp->link_params.req_line_speed;
7596 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7597 }
34f80b04
EG
7598 if (IS_E1HMF(bp)) {
7599 u16 vn_max_rate;
7600
7601 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7602 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7603 if (vn_max_rate < cmd->speed)
7604 cmd->speed = vn_max_rate;
7605 }
a2fbb9ea 7606
c18487ee
YR
7607 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7608 u32 ext_phy_type =
7609 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7610
7611 switch (ext_phy_type) {
7612 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7613 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7614 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7615 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7616 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7617 cmd->port = PORT_FIBRE;
7618 break;
7619
7620 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7621 cmd->port = PORT_TP;
7622 break;
7623
c18487ee
YR
7624 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7625 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7626 bp->link_params.ext_phy_config);
7627 break;
7628
f1410647
ET
7629 default:
7630 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7631 bp->link_params.ext_phy_config);
7632 break;
f1410647
ET
7633 }
7634 } else
a2fbb9ea 7635 cmd->port = PORT_TP;
a2fbb9ea 7636
34f80b04 7637 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7638 cmd->transceiver = XCVR_INTERNAL;
7639
c18487ee 7640 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7641 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7642 else
a2fbb9ea 7643 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7644
7645 cmd->maxtxpkt = 0;
7646 cmd->maxrxpkt = 0;
7647
7648 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7649 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7650 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7651 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7652 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7653 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7654 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7655
7656 return 0;
7657}
7658
7659static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7660{
7661 struct bnx2x *bp = netdev_priv(dev);
7662 u32 advertising;
7663
34f80b04
EG
7664 if (IS_E1HMF(bp))
7665 return 0;
7666
a2fbb9ea
ET
7667 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7668 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7669 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7670 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7671 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7672 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7673 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7674
a2fbb9ea 7675 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7676 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7677 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7678 return -EINVAL;
f1410647 7679 }
a2fbb9ea
ET
7680
7681 /* advertise the requested speed and duplex if supported */
34f80b04 7682 cmd->advertising &= bp->port.supported;
a2fbb9ea 7683
c18487ee
YR
7684 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7685 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7686 bp->port.advertising |= (ADVERTISED_Autoneg |
7687 cmd->advertising);
a2fbb9ea
ET
7688
7689 } else { /* forced speed */
7690 /* advertise the requested speed and duplex if supported */
7691 switch (cmd->speed) {
7692 case SPEED_10:
7693 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7694 if (!(bp->port.supported &
f1410647
ET
7695 SUPPORTED_10baseT_Full)) {
7696 DP(NETIF_MSG_LINK,
7697 "10M full not supported\n");
a2fbb9ea 7698 return -EINVAL;
f1410647 7699 }
a2fbb9ea
ET
7700
7701 advertising = (ADVERTISED_10baseT_Full |
7702 ADVERTISED_TP);
7703 } else {
34f80b04 7704 if (!(bp->port.supported &
f1410647
ET
7705 SUPPORTED_10baseT_Half)) {
7706 DP(NETIF_MSG_LINK,
7707 "10M half not supported\n");
a2fbb9ea 7708 return -EINVAL;
f1410647 7709 }
a2fbb9ea
ET
7710
7711 advertising = (ADVERTISED_10baseT_Half |
7712 ADVERTISED_TP);
7713 }
7714 break;
7715
7716 case SPEED_100:
7717 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7718 if (!(bp->port.supported &
f1410647
ET
7719 SUPPORTED_100baseT_Full)) {
7720 DP(NETIF_MSG_LINK,
7721 "100M full not supported\n");
a2fbb9ea 7722 return -EINVAL;
f1410647 7723 }
a2fbb9ea
ET
7724
7725 advertising = (ADVERTISED_100baseT_Full |
7726 ADVERTISED_TP);
7727 } else {
34f80b04 7728 if (!(bp->port.supported &
f1410647
ET
7729 SUPPORTED_100baseT_Half)) {
7730 DP(NETIF_MSG_LINK,
7731 "100M half not supported\n");
a2fbb9ea 7732 return -EINVAL;
f1410647 7733 }
a2fbb9ea
ET
7734
7735 advertising = (ADVERTISED_100baseT_Half |
7736 ADVERTISED_TP);
7737 }
7738 break;
7739
7740 case SPEED_1000:
f1410647
ET
7741 if (cmd->duplex != DUPLEX_FULL) {
7742 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7743 return -EINVAL;
f1410647 7744 }
a2fbb9ea 7745
34f80b04 7746 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7747 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7748 return -EINVAL;
f1410647 7749 }
a2fbb9ea
ET
7750
7751 advertising = (ADVERTISED_1000baseT_Full |
7752 ADVERTISED_TP);
7753 break;
7754
7755 case SPEED_2500:
f1410647
ET
7756 if (cmd->duplex != DUPLEX_FULL) {
7757 DP(NETIF_MSG_LINK,
7758 "2.5G half not supported\n");
a2fbb9ea 7759 return -EINVAL;
f1410647 7760 }
a2fbb9ea 7761
34f80b04 7762 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7763 DP(NETIF_MSG_LINK,
7764 "2.5G full not supported\n");
a2fbb9ea 7765 return -EINVAL;
f1410647 7766 }
a2fbb9ea 7767
f1410647 7768 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7769 ADVERTISED_TP);
7770 break;
7771
7772 case SPEED_10000:
f1410647
ET
7773 if (cmd->duplex != DUPLEX_FULL) {
7774 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7775 return -EINVAL;
f1410647 7776 }
a2fbb9ea 7777
34f80b04 7778 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7779 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7780 return -EINVAL;
f1410647 7781 }
a2fbb9ea
ET
7782
7783 advertising = (ADVERTISED_10000baseT_Full |
7784 ADVERTISED_FIBRE);
7785 break;
7786
7787 default:
f1410647 7788 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7789 return -EINVAL;
7790 }
7791
c18487ee
YR
7792 bp->link_params.req_line_speed = cmd->speed;
7793 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7794 bp->port.advertising = advertising;
a2fbb9ea
ET
7795 }
7796
c18487ee 7797 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7798 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7799 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7800 bp->port.advertising);
a2fbb9ea 7801
34f80b04 7802 if (netif_running(dev)) {
bb2a0f7a 7803 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7804 bnx2x_link_set(bp);
7805 }
a2fbb9ea
ET
7806
7807 return 0;
7808}
7809
c18487ee
YR
7810#define PHY_FW_VER_LEN 10
7811
a2fbb9ea
ET
7812static void bnx2x_get_drvinfo(struct net_device *dev,
7813 struct ethtool_drvinfo *info)
7814{
7815 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7816 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7817
7818 strcpy(info->driver, DRV_MODULE_NAME);
7819 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7820
7821 phy_fw_ver[0] = '\0';
34f80b04 7822 if (bp->port.pmf) {
4a37fb66 7823 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7824 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7825 (bp->state != BNX2X_STATE_CLOSED),
7826 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7827 bnx2x_release_phy_lock(bp);
34f80b04 7828 }
c18487ee 7829
f0e53a84
EG
7830 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7831 (bp->common.bc_ver & 0xff0000) >> 16,
7832 (bp->common.bc_ver & 0xff00) >> 8,
7833 (bp->common.bc_ver & 0xff),
7834 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7835 strcpy(info->bus_info, pci_name(bp->pdev));
7836 info->n_stats = BNX2X_NUM_STATS;
7837 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7838 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7839 info->regdump_len = 0;
7840}
7841
7842static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7843{
7844 struct bnx2x *bp = netdev_priv(dev);
7845
7846 if (bp->flags & NO_WOL_FLAG) {
7847 wol->supported = 0;
7848 wol->wolopts = 0;
7849 } else {
7850 wol->supported = WAKE_MAGIC;
7851 if (bp->wol)
7852 wol->wolopts = WAKE_MAGIC;
7853 else
7854 wol->wolopts = 0;
7855 }
7856 memset(&wol->sopass, 0, sizeof(wol->sopass));
7857}
7858
7859static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7860{
7861 struct bnx2x *bp = netdev_priv(dev);
7862
7863 if (wol->wolopts & ~WAKE_MAGIC)
7864 return -EINVAL;
7865
7866 if (wol->wolopts & WAKE_MAGIC) {
7867 if (bp->flags & NO_WOL_FLAG)
7868 return -EINVAL;
7869
7870 bp->wol = 1;
34f80b04 7871 } else
a2fbb9ea 7872 bp->wol = 0;
34f80b04 7873
a2fbb9ea
ET
7874 return 0;
7875}
7876
7877static u32 bnx2x_get_msglevel(struct net_device *dev)
7878{
7879 struct bnx2x *bp = netdev_priv(dev);
7880
7881 return bp->msglevel;
7882}
7883
7884static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7885{
7886 struct bnx2x *bp = netdev_priv(dev);
7887
7888 if (capable(CAP_NET_ADMIN))
7889 bp->msglevel = level;
7890}
7891
7892static int bnx2x_nway_reset(struct net_device *dev)
7893{
7894 struct bnx2x *bp = netdev_priv(dev);
7895
34f80b04
EG
7896 if (!bp->port.pmf)
7897 return 0;
a2fbb9ea 7898
34f80b04 7899 if (netif_running(dev)) {
bb2a0f7a 7900 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7901 bnx2x_link_set(bp);
7902 }
a2fbb9ea
ET
7903
7904 return 0;
7905}
7906
7907static int bnx2x_get_eeprom_len(struct net_device *dev)
7908{
7909 struct bnx2x *bp = netdev_priv(dev);
7910
34f80b04 7911 return bp->common.flash_size;
a2fbb9ea
ET
7912}
7913
7914static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7915{
34f80b04 7916 int port = BP_PORT(bp);
a2fbb9ea
ET
7917 int count, i;
7918 u32 val = 0;
7919
7920 /* adjust timeout for emulation/FPGA */
7921 count = NVRAM_TIMEOUT_COUNT;
7922 if (CHIP_REV_IS_SLOW(bp))
7923 count *= 100;
7924
7925 /* request access to nvram interface */
7926 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7927 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7928
7929 for (i = 0; i < count*10; i++) {
7930 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7931 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7932 break;
7933
7934 udelay(5);
7935 }
7936
7937 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7938 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7939 return -EBUSY;
7940 }
7941
7942 return 0;
7943}
7944
7945static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7946{
34f80b04 7947 int port = BP_PORT(bp);
a2fbb9ea
ET
7948 int count, i;
7949 u32 val = 0;
7950
7951 /* adjust timeout for emulation/FPGA */
7952 count = NVRAM_TIMEOUT_COUNT;
7953 if (CHIP_REV_IS_SLOW(bp))
7954 count *= 100;
7955
7956 /* relinquish nvram interface */
7957 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7958 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7959
7960 for (i = 0; i < count*10; i++) {
7961 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7962 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7963 break;
7964
7965 udelay(5);
7966 }
7967
7968 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7969 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7970 return -EBUSY;
7971 }
7972
7973 return 0;
7974}
7975
7976static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7977{
7978 u32 val;
7979
7980 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7981
7982 /* enable both bits, even on read */
7983 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7984 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7985 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7986}
7987
7988static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7989{
7990 u32 val;
7991
7992 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7993
7994 /* disable both bits, even after read */
7995 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7996 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7997 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7998}
7999
8000static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8001 u32 cmd_flags)
8002{
f1410647 8003 int count, i, rc;
a2fbb9ea
ET
8004 u32 val;
8005
8006 /* build the command word */
8007 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8008
8009 /* need to clear DONE bit separately */
8010 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8011
8012 /* address of the NVRAM to read from */
8013 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8014 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8015
8016 /* issue a read command */
8017 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8018
8019 /* adjust timeout for emulation/FPGA */
8020 count = NVRAM_TIMEOUT_COUNT;
8021 if (CHIP_REV_IS_SLOW(bp))
8022 count *= 100;
8023
8024 /* wait for completion */
8025 *ret_val = 0;
8026 rc = -EBUSY;
8027 for (i = 0; i < count; i++) {
8028 udelay(5);
8029 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8030
8031 if (val & MCPR_NVM_COMMAND_DONE) {
8032 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8033 /* we read nvram data in cpu order
8034 * but ethtool sees it as an array of bytes
8035 * converting to big-endian will do the work */
8036 val = cpu_to_be32(val);
8037 *ret_val = val;
8038 rc = 0;
8039 break;
8040 }
8041 }
8042
8043 return rc;
8044}
8045
8046static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8047 int buf_size)
8048{
8049 int rc;
8050 u32 cmd_flags;
8051 u32 val;
8052
8053 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8054 DP(BNX2X_MSG_NVM,
c14423fe 8055 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8056 offset, buf_size);
8057 return -EINVAL;
8058 }
8059
34f80b04
EG
8060 if (offset + buf_size > bp->common.flash_size) {
8061 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8062 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8063 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8064 return -EINVAL;
8065 }
8066
8067 /* request access to nvram interface */
8068 rc = bnx2x_acquire_nvram_lock(bp);
8069 if (rc)
8070 return rc;
8071
8072 /* enable access to nvram interface */
8073 bnx2x_enable_nvram_access(bp);
8074
8075 /* read the first word(s) */
8076 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8077 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8078 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8079 memcpy(ret_buf, &val, 4);
8080
8081 /* advance to the next dword */
8082 offset += sizeof(u32);
8083 ret_buf += sizeof(u32);
8084 buf_size -= sizeof(u32);
8085 cmd_flags = 0;
8086 }
8087
8088 if (rc == 0) {
8089 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8090 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8091 memcpy(ret_buf, &val, 4);
8092 }
8093
8094 /* disable access to nvram interface */
8095 bnx2x_disable_nvram_access(bp);
8096 bnx2x_release_nvram_lock(bp);
8097
8098 return rc;
8099}
8100
8101static int bnx2x_get_eeprom(struct net_device *dev,
8102 struct ethtool_eeprom *eeprom, u8 *eebuf)
8103{
8104 struct bnx2x *bp = netdev_priv(dev);
8105 int rc;
8106
34f80b04 8107 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8108 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8109 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8110 eeprom->len, eeprom->len);
8111
8112 /* parameters already validated in ethtool_get_eeprom */
8113
8114 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8115
8116 return rc;
8117}
8118
8119static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8120 u32 cmd_flags)
8121{
f1410647 8122 int count, i, rc;
a2fbb9ea
ET
8123
8124 /* build the command word */
8125 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8126
8127 /* need to clear DONE bit separately */
8128 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8129
8130 /* write the data */
8131 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8132
8133 /* address of the NVRAM to write to */
8134 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8135 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8136
8137 /* issue the write command */
8138 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8139
8140 /* adjust timeout for emulation/FPGA */
8141 count = NVRAM_TIMEOUT_COUNT;
8142 if (CHIP_REV_IS_SLOW(bp))
8143 count *= 100;
8144
8145 /* wait for completion */
8146 rc = -EBUSY;
8147 for (i = 0; i < count; i++) {
8148 udelay(5);
8149 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8150 if (val & MCPR_NVM_COMMAND_DONE) {
8151 rc = 0;
8152 break;
8153 }
8154 }
8155
8156 return rc;
8157}
8158
f1410647 8159#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8160
8161static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8162 int buf_size)
8163{
8164 int rc;
8165 u32 cmd_flags;
8166 u32 align_offset;
8167 u32 val;
8168
34f80b04
EG
8169 if (offset + buf_size > bp->common.flash_size) {
8170 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8171 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8172 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8173 return -EINVAL;
8174 }
8175
8176 /* request access to nvram interface */
8177 rc = bnx2x_acquire_nvram_lock(bp);
8178 if (rc)
8179 return rc;
8180
8181 /* enable access to nvram interface */
8182 bnx2x_enable_nvram_access(bp);
8183
8184 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8185 align_offset = (offset & ~0x03);
8186 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8187
8188 if (rc == 0) {
8189 val &= ~(0xff << BYTE_OFFSET(offset));
8190 val |= (*data_buf << BYTE_OFFSET(offset));
8191
8192 /* nvram data is returned as an array of bytes
8193 * convert it back to cpu order */
8194 val = be32_to_cpu(val);
8195
a2fbb9ea
ET
8196 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8197 cmd_flags);
8198 }
8199
8200 /* disable access to nvram interface */
8201 bnx2x_disable_nvram_access(bp);
8202 bnx2x_release_nvram_lock(bp);
8203
8204 return rc;
8205}
8206
8207static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8208 int buf_size)
8209{
8210 int rc;
8211 u32 cmd_flags;
8212 u32 val;
8213 u32 written_so_far;
8214
34f80b04 8215 if (buf_size == 1) /* ethtool */
a2fbb9ea 8216 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8217
8218 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8219 DP(BNX2X_MSG_NVM,
c14423fe 8220 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8221 offset, buf_size);
8222 return -EINVAL;
8223 }
8224
34f80b04
EG
8225 if (offset + buf_size > bp->common.flash_size) {
8226 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8227 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8228 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8229 return -EINVAL;
8230 }
8231
8232 /* request access to nvram interface */
8233 rc = bnx2x_acquire_nvram_lock(bp);
8234 if (rc)
8235 return rc;
8236
8237 /* enable access to nvram interface */
8238 bnx2x_enable_nvram_access(bp);
8239
8240 written_so_far = 0;
8241 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8242 while ((written_so_far < buf_size) && (rc == 0)) {
8243 if (written_so_far == (buf_size - sizeof(u32)))
8244 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8245 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8246 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8247 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8248 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8249
8250 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8251
8252 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8253
8254 /* advance to the next dword */
8255 offset += sizeof(u32);
8256 data_buf += sizeof(u32);
8257 written_so_far += sizeof(u32);
8258 cmd_flags = 0;
8259 }
8260
8261 /* disable access to nvram interface */
8262 bnx2x_disable_nvram_access(bp);
8263 bnx2x_release_nvram_lock(bp);
8264
8265 return rc;
8266}
8267
8268static int bnx2x_set_eeprom(struct net_device *dev,
8269 struct ethtool_eeprom *eeprom, u8 *eebuf)
8270{
8271 struct bnx2x *bp = netdev_priv(dev);
8272 int rc;
8273
9f4c9583
EG
8274 if (!netif_running(dev))
8275 return -EAGAIN;
8276
34f80b04 8277 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8278 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8279 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8280 eeprom->len, eeprom->len);
8281
8282 /* parameters already validated in ethtool_set_eeprom */
8283
c18487ee 8284 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8285 if (eeprom->magic == 0x00504859)
8286 if (bp->port.pmf) {
8287
4a37fb66 8288 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8289 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8290 bp->link_params.ext_phy_config,
8291 (bp->state != BNX2X_STATE_CLOSED),
8292 eebuf, eeprom->len);
bb2a0f7a
YG
8293 if ((bp->state == BNX2X_STATE_OPEN) ||
8294 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8295 rc |= bnx2x_link_reset(&bp->link_params,
8296 &bp->link_vars);
8297 rc |= bnx2x_phy_init(&bp->link_params,
8298 &bp->link_vars);
bb2a0f7a 8299 }
4a37fb66 8300 bnx2x_release_phy_lock(bp);
34f80b04
EG
8301
8302 } else /* Only the PMF can access the PHY */
8303 return -EINVAL;
8304 else
c18487ee 8305 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8306
8307 return rc;
8308}
8309
8310static int bnx2x_get_coalesce(struct net_device *dev,
8311 struct ethtool_coalesce *coal)
8312{
8313 struct bnx2x *bp = netdev_priv(dev);
8314
8315 memset(coal, 0, sizeof(struct ethtool_coalesce));
8316
8317 coal->rx_coalesce_usecs = bp->rx_ticks;
8318 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8319
8320 return 0;
8321}
8322
8323static int bnx2x_set_coalesce(struct net_device *dev,
8324 struct ethtool_coalesce *coal)
8325{
8326 struct bnx2x *bp = netdev_priv(dev);
8327
8328 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8329 if (bp->rx_ticks > 3000)
8330 bp->rx_ticks = 3000;
8331
8332 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8333 if (bp->tx_ticks > 0x3000)
8334 bp->tx_ticks = 0x3000;
8335
34f80b04 8336 if (netif_running(dev))
a2fbb9ea
ET
8337 bnx2x_update_coalesce(bp);
8338
8339 return 0;
8340}
8341
8342static void bnx2x_get_ringparam(struct net_device *dev,
8343 struct ethtool_ringparam *ering)
8344{
8345 struct bnx2x *bp = netdev_priv(dev);
8346
8347 ering->rx_max_pending = MAX_RX_AVAIL;
8348 ering->rx_mini_max_pending = 0;
8349 ering->rx_jumbo_max_pending = 0;
8350
8351 ering->rx_pending = bp->rx_ring_size;
8352 ering->rx_mini_pending = 0;
8353 ering->rx_jumbo_pending = 0;
8354
8355 ering->tx_max_pending = MAX_TX_AVAIL;
8356 ering->tx_pending = bp->tx_ring_size;
8357}
8358
8359static int bnx2x_set_ringparam(struct net_device *dev,
8360 struct ethtool_ringparam *ering)
8361{
8362 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8363 int rc = 0;
a2fbb9ea
ET
8364
8365 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8366 (ering->tx_pending > MAX_TX_AVAIL) ||
8367 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8368 return -EINVAL;
8369
8370 bp->rx_ring_size = ering->rx_pending;
8371 bp->tx_ring_size = ering->tx_pending;
8372
34f80b04
EG
8373 if (netif_running(dev)) {
8374 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8375 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8376 }
8377
34f80b04 8378 return rc;
a2fbb9ea
ET
8379}
8380
8381static void bnx2x_get_pauseparam(struct net_device *dev,
8382 struct ethtool_pauseparam *epause)
8383{
8384 struct bnx2x *bp = netdev_priv(dev);
8385
c0700f90 8386 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8387 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8388
c0700f90
DM
8389 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8390 BNX2X_FLOW_CTRL_RX);
8391 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8392 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8393
8394 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8395 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8396 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8397}
8398
8399static int bnx2x_set_pauseparam(struct net_device *dev,
8400 struct ethtool_pauseparam *epause)
8401{
8402 struct bnx2x *bp = netdev_priv(dev);
8403
34f80b04
EG
8404 if (IS_E1HMF(bp))
8405 return 0;
8406
a2fbb9ea
ET
8407 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8408 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8409 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8410
c0700f90 8411 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8412
f1410647 8413 if (epause->rx_pause)
c0700f90 8414 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8415
f1410647 8416 if (epause->tx_pause)
c0700f90 8417 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8418
c0700f90
DM
8419 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8420 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8421
c18487ee 8422 if (epause->autoneg) {
34f80b04 8423 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8424 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8425 return -EINVAL;
8426 }
a2fbb9ea 8427
c18487ee 8428 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8429 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8430 }
a2fbb9ea 8431
c18487ee
YR
8432 DP(NETIF_MSG_LINK,
8433 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8434
8435 if (netif_running(dev)) {
bb2a0f7a 8436 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8437 bnx2x_link_set(bp);
8438 }
a2fbb9ea
ET
8439
8440 return 0;
8441}
8442
df0f2343
VZ
8443static int bnx2x_set_flags(struct net_device *dev, u32 data)
8444{
8445 struct bnx2x *bp = netdev_priv(dev);
8446 int changed = 0;
8447 int rc = 0;
8448
8449 /* TPA requires Rx CSUM offloading */
8450 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8451 if (!(dev->features & NETIF_F_LRO)) {
8452 dev->features |= NETIF_F_LRO;
8453 bp->flags |= TPA_ENABLE_FLAG;
8454 changed = 1;
8455 }
8456
8457 } else if (dev->features & NETIF_F_LRO) {
8458 dev->features &= ~NETIF_F_LRO;
8459 bp->flags &= ~TPA_ENABLE_FLAG;
8460 changed = 1;
8461 }
8462
8463 if (changed && netif_running(dev)) {
8464 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8465 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8466 }
8467
8468 return rc;
8469}
8470
a2fbb9ea
ET
8471static u32 bnx2x_get_rx_csum(struct net_device *dev)
8472{
8473 struct bnx2x *bp = netdev_priv(dev);
8474
8475 return bp->rx_csum;
8476}
8477
8478static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8479{
8480 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8481 int rc = 0;
a2fbb9ea
ET
8482
8483 bp->rx_csum = data;
df0f2343
VZ
8484
8485 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8486 TPA'ed packets will be discarded due to wrong TCP CSUM */
8487 if (!data) {
8488 u32 flags = ethtool_op_get_flags(dev);
8489
8490 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8491 }
8492
8493 return rc;
a2fbb9ea
ET
8494}
8495
8496static int bnx2x_set_tso(struct net_device *dev, u32 data)
8497{
755735eb 8498 if (data) {
a2fbb9ea 8499 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8500 dev->features |= NETIF_F_TSO6;
8501 } else {
a2fbb9ea 8502 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8503 dev->features &= ~NETIF_F_TSO6;
8504 }
8505
a2fbb9ea
ET
8506 return 0;
8507}
8508
f3c87cdd 8509static const struct {
a2fbb9ea
ET
8510 char string[ETH_GSTRING_LEN];
8511} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8512 { "register_test (offline)" },
8513 { "memory_test (offline)" },
8514 { "loopback_test (offline)" },
8515 { "nvram_test (online)" },
8516 { "interrupt_test (online)" },
8517 { "link_test (online)" },
8518 { "idle check (online)" },
8519 { "MC errors (online)" }
a2fbb9ea
ET
8520};
8521
8522static int bnx2x_self_test_count(struct net_device *dev)
8523{
8524 return BNX2X_NUM_TESTS;
8525}
8526
f3c87cdd
YG
8527static int bnx2x_test_registers(struct bnx2x *bp)
8528{
8529 int idx, i, rc = -ENODEV;
8530 u32 wr_val = 0;
9dabc424 8531 int port = BP_PORT(bp);
f3c87cdd
YG
8532 static const struct {
8533 u32 offset0;
8534 u32 offset1;
8535 u32 mask;
8536 } reg_tbl[] = {
8537/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8538 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8539 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8540 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8541 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8542 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8543 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8544 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8545 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8546 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8547/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8548 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8549 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8550 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8551 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8552 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8553 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8554 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8555 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8556 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8557/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8558 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8559 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8560 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8561 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8562 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8563 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8564 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8565 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8566 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8567/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8568 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8569 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8570 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8571 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8572 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8573 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8574 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8575
8576 { 0xffffffff, 0, 0x00000000 }
8577 };
8578
8579 if (!netif_running(bp->dev))
8580 return rc;
8581
8582 /* Repeat the test twice:
8583 First by writing 0x00000000, second by writing 0xffffffff */
8584 for (idx = 0; idx < 2; idx++) {
8585
8586 switch (idx) {
8587 case 0:
8588 wr_val = 0;
8589 break;
8590 case 1:
8591 wr_val = 0xffffffff;
8592 break;
8593 }
8594
8595 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8596 u32 offset, mask, save_val, val;
f3c87cdd
YG
8597
8598 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8599 mask = reg_tbl[i].mask;
8600
8601 save_val = REG_RD(bp, offset);
8602
8603 REG_WR(bp, offset, wr_val);
8604 val = REG_RD(bp, offset);
8605
8606 /* Restore the original register's value */
8607 REG_WR(bp, offset, save_val);
8608
8609 /* verify that value is as expected value */
8610 if ((val & mask) != (wr_val & mask))
8611 goto test_reg_exit;
8612 }
8613 }
8614
8615 rc = 0;
8616
8617test_reg_exit:
8618 return rc;
8619}
8620
8621static int bnx2x_test_memory(struct bnx2x *bp)
8622{
8623 int i, j, rc = -ENODEV;
8624 u32 val;
8625 static const struct {
8626 u32 offset;
8627 int size;
8628 } mem_tbl[] = {
8629 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8630 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8631 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8632 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8633 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8634 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8635 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8636
8637 { 0xffffffff, 0 }
8638 };
8639 static const struct {
8640 char *name;
8641 u32 offset;
9dabc424
YG
8642 u32 e1_mask;
8643 u32 e1h_mask;
f3c87cdd 8644 } prty_tbl[] = {
9dabc424
YG
8645 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8646 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8647 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8648 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8649 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8650 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8651
8652 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8653 };
8654
8655 if (!netif_running(bp->dev))
8656 return rc;
8657
8658 /* Go through all the memories */
8659 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8660 for (j = 0; j < mem_tbl[i].size; j++)
8661 REG_RD(bp, mem_tbl[i].offset + j*4);
8662
8663 /* Check the parity status */
8664 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8665 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8666 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8667 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8668 DP(NETIF_MSG_HW,
8669 "%s is 0x%x\n", prty_tbl[i].name, val);
8670 goto test_mem_exit;
8671 }
8672 }
8673
8674 rc = 0;
8675
8676test_mem_exit:
8677 return rc;
8678}
8679
f3c87cdd
YG
8680static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8681{
8682 int cnt = 1000;
8683
8684 if (link_up)
8685 while (bnx2x_link_test(bp) && cnt--)
8686 msleep(10);
8687}
8688
8689static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8690{
8691 unsigned int pkt_size, num_pkts, i;
8692 struct sk_buff *skb;
8693 unsigned char *packet;
8694 struct bnx2x_fastpath *fp = &bp->fp[0];
8695 u16 tx_start_idx, tx_idx;
8696 u16 rx_start_idx, rx_idx;
8697 u16 pkt_prod;
8698 struct sw_tx_bd *tx_buf;
8699 struct eth_tx_bd *tx_bd;
8700 dma_addr_t mapping;
8701 union eth_rx_cqe *cqe;
8702 u8 cqe_fp_flags;
8703 struct sw_rx_bd *rx_buf;
8704 u16 len;
8705 int rc = -ENODEV;
8706
8707 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8708 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8709 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8710 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8711 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8712
8713 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8714 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8715 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8716 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8717 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8718 /* wait until link state is restored */
8719 bnx2x_wait_for_link(bp, link_up);
8720
8721 } else
8722 return -EINVAL;
8723
8724 pkt_size = 1514;
8725 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8726 if (!skb) {
8727 rc = -ENOMEM;
8728 goto test_loopback_exit;
8729 }
8730 packet = skb_put(skb, pkt_size);
8731 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8732 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8733 for (i = ETH_HLEN; i < pkt_size; i++)
8734 packet[i] = (unsigned char) (i & 0xff);
8735
8736 num_pkts = 0;
8737 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8738 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8739
8740 pkt_prod = fp->tx_pkt_prod++;
8741 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8742 tx_buf->first_bd = fp->tx_bd_prod;
8743 tx_buf->skb = skb;
8744
8745 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8746 mapping = pci_map_single(bp->pdev, skb->data,
8747 skb_headlen(skb), PCI_DMA_TODEVICE);
8748 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8749 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8750 tx_bd->nbd = cpu_to_le16(1);
8751 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8752 tx_bd->vlan = cpu_to_le16(pkt_prod);
8753 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8754 ETH_TX_BD_FLAGS_END_BD);
8755 tx_bd->general_data = ((UNICAST_ADDRESS <<
8756 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8757
58f4c4cf
EG
8758 wmb();
8759
f3c87cdd
YG
8760 fp->hw_tx_prods->bds_prod =
8761 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8762 mb(); /* FW restriction: must not reorder writing nbd and packets */
8763 fp->hw_tx_prods->packets_prod =
8764 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8765 DOORBELL(bp, FP_IDX(fp), 0);
8766
8767 mmiowb();
8768
8769 num_pkts++;
8770 fp->tx_bd_prod++;
8771 bp->dev->trans_start = jiffies;
8772
8773 udelay(100);
8774
8775 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8776 if (tx_idx != tx_start_idx + num_pkts)
8777 goto test_loopback_exit;
8778
8779 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8780 if (rx_idx != rx_start_idx + num_pkts)
8781 goto test_loopback_exit;
8782
8783 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8784 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8785 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8786 goto test_loopback_rx_exit;
8787
8788 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8789 if (len != pkt_size)
8790 goto test_loopback_rx_exit;
8791
8792 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8793 skb = rx_buf->skb;
8794 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8795 for (i = ETH_HLEN; i < pkt_size; i++)
8796 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8797 goto test_loopback_rx_exit;
8798
8799 rc = 0;
8800
8801test_loopback_rx_exit:
f3c87cdd
YG
8802
8803 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8804 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8805 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8806 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8807
8808 /* Update producers */
8809 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8810 fp->rx_sge_prod);
f3c87cdd
YG
8811
8812test_loopback_exit:
8813 bp->link_params.loopback_mode = LOOPBACK_NONE;
8814
8815 return rc;
8816}
8817
8818static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8819{
8820 int rc = 0;
8821
8822 if (!netif_running(bp->dev))
8823 return BNX2X_LOOPBACK_FAILED;
8824
f8ef6e44 8825 bnx2x_netif_stop(bp, 1);
f3c87cdd
YG
8826
8827 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8828 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8829 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8830 }
8831
8832 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8833 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8834 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8835 }
8836
8837 bnx2x_netif_start(bp);
8838
8839 return rc;
8840}
8841
8842#define CRC32_RESIDUAL 0xdebb20e3
8843
8844static int bnx2x_test_nvram(struct bnx2x *bp)
8845{
8846 static const struct {
8847 int offset;
8848 int size;
8849 } nvram_tbl[] = {
8850 { 0, 0x14 }, /* bootstrap */
8851 { 0x14, 0xec }, /* dir */
8852 { 0x100, 0x350 }, /* manuf_info */
8853 { 0x450, 0xf0 }, /* feature_info */
8854 { 0x640, 0x64 }, /* upgrade_key_info */
8855 { 0x6a4, 0x64 },
8856 { 0x708, 0x70 }, /* manuf_key_info */
8857 { 0x778, 0x70 },
8858 { 0, 0 }
8859 };
8860 u32 buf[0x350 / 4];
8861 u8 *data = (u8 *)buf;
8862 int i, rc;
8863 u32 magic, csum;
8864
8865 rc = bnx2x_nvram_read(bp, 0, data, 4);
8866 if (rc) {
8867 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8868 goto test_nvram_exit;
8869 }
8870
8871 magic = be32_to_cpu(buf[0]);
8872 if (magic != 0x669955aa) {
8873 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8874 rc = -ENODEV;
8875 goto test_nvram_exit;
8876 }
8877
8878 for (i = 0; nvram_tbl[i].size; i++) {
8879
8880 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8881 nvram_tbl[i].size);
8882 if (rc) {
8883 DP(NETIF_MSG_PROBE,
8884 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8885 goto test_nvram_exit;
8886 }
8887
8888 csum = ether_crc_le(nvram_tbl[i].size, data);
8889 if (csum != CRC32_RESIDUAL) {
8890 DP(NETIF_MSG_PROBE,
8891 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8892 rc = -ENODEV;
8893 goto test_nvram_exit;
8894 }
8895 }
8896
8897test_nvram_exit:
8898 return rc;
8899}
8900
8901static int bnx2x_test_intr(struct bnx2x *bp)
8902{
8903 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8904 int i, rc;
8905
8906 if (!netif_running(bp->dev))
8907 return -ENODEV;
8908
8909 config->hdr.length_6b = 0;
8910 config->hdr.offset = 0;
8911 config->hdr.client_id = BP_CL_ID(bp);
8912 config->hdr.reserved1 = 0;
8913
8914 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8915 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8916 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8917 if (rc == 0) {
8918 bp->set_mac_pending++;
8919 for (i = 0; i < 10; i++) {
8920 if (!bp->set_mac_pending)
8921 break;
8922 msleep_interruptible(10);
8923 }
8924 if (i == 10)
8925 rc = -ENODEV;
8926 }
8927
8928 return rc;
8929}
8930
a2fbb9ea
ET
8931static void bnx2x_self_test(struct net_device *dev,
8932 struct ethtool_test *etest, u64 *buf)
8933{
8934 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8935
8936 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8937
f3c87cdd 8938 if (!netif_running(dev))
a2fbb9ea 8939 return;
a2fbb9ea 8940
33471629 8941 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8942 if (IS_E1HMF(bp))
8943 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8944
8945 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8946 u8 link_up;
8947
8948 link_up = bp->link_vars.link_up;
8949 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8950 bnx2x_nic_load(bp, LOAD_DIAG);
8951 /* wait until link state is restored */
8952 bnx2x_wait_for_link(bp, link_up);
8953
8954 if (bnx2x_test_registers(bp) != 0) {
8955 buf[0] = 1;
8956 etest->flags |= ETH_TEST_FL_FAILED;
8957 }
8958 if (bnx2x_test_memory(bp) != 0) {
8959 buf[1] = 1;
8960 etest->flags |= ETH_TEST_FL_FAILED;
8961 }
8962 buf[2] = bnx2x_test_loopback(bp, link_up);
8963 if (buf[2] != 0)
8964 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8965
f3c87cdd
YG
8966 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8967 bnx2x_nic_load(bp, LOAD_NORMAL);
8968 /* wait until link state is restored */
8969 bnx2x_wait_for_link(bp, link_up);
8970 }
8971 if (bnx2x_test_nvram(bp) != 0) {
8972 buf[3] = 1;
a2fbb9ea
ET
8973 etest->flags |= ETH_TEST_FL_FAILED;
8974 }
f3c87cdd
YG
8975 if (bnx2x_test_intr(bp) != 0) {
8976 buf[4] = 1;
8977 etest->flags |= ETH_TEST_FL_FAILED;
8978 }
8979 if (bp->port.pmf)
8980 if (bnx2x_link_test(bp) != 0) {
8981 buf[5] = 1;
8982 etest->flags |= ETH_TEST_FL_FAILED;
8983 }
8984 buf[7] = bnx2x_mc_assert(bp);
8985 if (buf[7] != 0)
8986 etest->flags |= ETH_TEST_FL_FAILED;
8987
8988#ifdef BNX2X_EXTRA_DEBUG
8989 bnx2x_panic_dump(bp);
8990#endif
a2fbb9ea
ET
8991}
8992
bb2a0f7a
YG
8993static const struct {
8994 long offset;
8995 int size;
8996 u32 flags;
66e855f3
YG
8997#define STATS_FLAGS_PORT 1
8998#define STATS_FLAGS_FUNC 2
8999 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9000} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
9001/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9002 8, STATS_FLAGS_FUNC, "rx_bytes" },
9003 { STATS_OFFSET32(error_bytes_received_hi),
9004 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9005 { STATS_OFFSET32(total_bytes_transmitted_hi),
9006 8, STATS_FLAGS_FUNC, "tx_bytes" },
9007 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9008 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 9009 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 9010 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 9011 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 9012 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 9013 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 9014 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9015 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9016 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9017 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9018 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9019/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9020 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9021 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9022 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9023 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9024 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9025 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9026 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9027 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9028 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9029 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9030 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9031 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9032 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9033 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9034 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9035 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9036 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9037 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9038 8, STATS_FLAGS_PORT, "rx_fragments" },
9039/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9040 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9041 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9042 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9043 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9044 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9045 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9046 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9047 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9048 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9049 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9050 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9051 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9052 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9053 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9054 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9055 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9056 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9057 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9058 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9059/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9060 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9061 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9062 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9063 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9064 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9065 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9066 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9067 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9068 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9069 { STATS_OFFSET32(mac_filter_discard),
9070 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9071 { STATS_OFFSET32(no_buff_discard),
9072 4, STATS_FLAGS_FUNC, "rx_discards" },
9073 { STATS_OFFSET32(xxoverflow_discard),
9074 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9075 { STATS_OFFSET32(brb_drop_hi),
9076 8, STATS_FLAGS_PORT, "brb_discard" },
9077 { STATS_OFFSET32(brb_truncate_hi),
9078 8, STATS_FLAGS_PORT, "brb_truncate" },
9079/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9080 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9081 { STATS_OFFSET32(rx_skb_alloc_failed),
9082 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9083/* 42 */{ STATS_OFFSET32(hw_csum_err),
9084 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9085};
9086
66e855f3
YG
9087#define IS_NOT_E1HMF_STAT(bp, i) \
9088 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9089
a2fbb9ea
ET
9090static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9091{
bb2a0f7a
YG
9092 struct bnx2x *bp = netdev_priv(dev);
9093 int i, j;
9094
a2fbb9ea
ET
9095 switch (stringset) {
9096 case ETH_SS_STATS:
bb2a0f7a 9097 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9098 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9099 continue;
9100 strcpy(buf + j*ETH_GSTRING_LEN,
9101 bnx2x_stats_arr[i].string);
9102 j++;
9103 }
a2fbb9ea
ET
9104 break;
9105
9106 case ETH_SS_TEST:
9107 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9108 break;
9109 }
9110}
9111
9112static int bnx2x_get_stats_count(struct net_device *dev)
9113{
bb2a0f7a
YG
9114 struct bnx2x *bp = netdev_priv(dev);
9115 int i, num_stats = 0;
9116
9117 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9118 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9119 continue;
9120 num_stats++;
9121 }
9122 return num_stats;
a2fbb9ea
ET
9123}
9124
9125static void bnx2x_get_ethtool_stats(struct net_device *dev,
9126 struct ethtool_stats *stats, u64 *buf)
9127{
9128 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9129 u32 *hw_stats = (u32 *)&bp->eth_stats;
9130 int i, j;
a2fbb9ea 9131
bb2a0f7a 9132 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9133 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9134 continue;
bb2a0f7a
YG
9135
9136 if (bnx2x_stats_arr[i].size == 0) {
9137 /* skip this counter */
9138 buf[j] = 0;
9139 j++;
a2fbb9ea
ET
9140 continue;
9141 }
bb2a0f7a 9142 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9143 /* 4-byte counter */
bb2a0f7a
YG
9144 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9145 j++;
a2fbb9ea
ET
9146 continue;
9147 }
9148 /* 8-byte counter */
bb2a0f7a
YG
9149 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9150 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9151 j++;
a2fbb9ea
ET
9152 }
9153}
9154
9155static int bnx2x_phys_id(struct net_device *dev, u32 data)
9156{
9157 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9158 int port = BP_PORT(bp);
a2fbb9ea
ET
9159 int i;
9160
34f80b04
EG
9161 if (!netif_running(dev))
9162 return 0;
9163
9164 if (!bp->port.pmf)
9165 return 0;
9166
a2fbb9ea
ET
9167 if (data == 0)
9168 data = 2;
9169
9170 for (i = 0; i < (data * 2); i++) {
c18487ee 9171 if ((i % 2) == 0)
34f80b04 9172 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9173 bp->link_params.hw_led_mode,
9174 bp->link_params.chip_id);
9175 else
34f80b04 9176 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9177 bp->link_params.hw_led_mode,
9178 bp->link_params.chip_id);
9179
a2fbb9ea
ET
9180 msleep_interruptible(500);
9181 if (signal_pending(current))
9182 break;
9183 }
9184
c18487ee 9185 if (bp->link_vars.link_up)
34f80b04 9186 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9187 bp->link_vars.line_speed,
9188 bp->link_params.hw_led_mode,
9189 bp->link_params.chip_id);
a2fbb9ea
ET
9190
9191 return 0;
9192}
9193
9194static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9195 .get_settings = bnx2x_get_settings,
9196 .set_settings = bnx2x_set_settings,
9197 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9198 .get_wol = bnx2x_get_wol,
9199 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9200 .get_msglevel = bnx2x_get_msglevel,
9201 .set_msglevel = bnx2x_set_msglevel,
9202 .nway_reset = bnx2x_nway_reset,
9203 .get_link = ethtool_op_get_link,
9204 .get_eeprom_len = bnx2x_get_eeprom_len,
9205 .get_eeprom = bnx2x_get_eeprom,
9206 .set_eeprom = bnx2x_set_eeprom,
9207 .get_coalesce = bnx2x_get_coalesce,
9208 .set_coalesce = bnx2x_set_coalesce,
9209 .get_ringparam = bnx2x_get_ringparam,
9210 .set_ringparam = bnx2x_set_ringparam,
9211 .get_pauseparam = bnx2x_get_pauseparam,
9212 .set_pauseparam = bnx2x_set_pauseparam,
9213 .get_rx_csum = bnx2x_get_rx_csum,
9214 .set_rx_csum = bnx2x_set_rx_csum,
9215 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9216 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9217 .set_flags = bnx2x_set_flags,
9218 .get_flags = ethtool_op_get_flags,
9219 .get_sg = ethtool_op_get_sg,
9220 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9221 .get_tso = ethtool_op_get_tso,
9222 .set_tso = bnx2x_set_tso,
9223 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9224 .self_test = bnx2x_self_test,
9225 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9226 .phys_id = bnx2x_phys_id,
9227 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9228 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9229};
9230
9231/* end of ethtool_ops */
9232
9233/****************************************************************************
9234* General service functions
9235****************************************************************************/
9236
9237static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9238{
9239 u16 pmcsr;
9240
9241 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9242
9243 switch (state) {
9244 case PCI_D0:
34f80b04 9245 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9246 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9247 PCI_PM_CTRL_PME_STATUS));
9248
9249 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9250 /* delay required during transition out of D3hot */
a2fbb9ea 9251 msleep(20);
34f80b04 9252 break;
a2fbb9ea 9253
34f80b04
EG
9254 case PCI_D3hot:
9255 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9256 pmcsr |= 3;
a2fbb9ea 9257
34f80b04
EG
9258 if (bp->wol)
9259 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9260
34f80b04
EG
9261 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9262 pmcsr);
a2fbb9ea 9263
34f80b04
EG
9264 /* No more memory access after this point until
9265 * device is brought back to D0.
9266 */
9267 break;
9268
9269 default:
9270 return -EINVAL;
9271 }
9272 return 0;
a2fbb9ea
ET
9273}
9274
34f80b04
EG
9275/*
9276 * net_device service functions
9277 */
9278
a2fbb9ea
ET
9279static int bnx2x_poll(struct napi_struct *napi, int budget)
9280{
9281 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9282 napi);
9283 struct bnx2x *bp = fp->bp;
9284 int work_done = 0;
2772f903 9285 u16 rx_cons_sb;
a2fbb9ea
ET
9286
9287#ifdef BNX2X_STOP_ON_ERROR
9288 if (unlikely(bp->panic))
34f80b04 9289 goto poll_panic;
a2fbb9ea
ET
9290#endif
9291
9292 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9293 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9294 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9295
9296 bnx2x_update_fpsb_idx(fp);
9297
da5a662a 9298 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9299 bnx2x_tx_int(fp, budget);
9300
2772f903
EG
9301 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9302 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9303 rx_cons_sb++;
da5a662a 9304 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9305 work_done = bnx2x_rx_int(fp, budget);
9306
da5a662a 9307 rmb(); /* BNX2X_HAS_WORK() reads the status block */
2772f903
EG
9308 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9309 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9310 rx_cons_sb++;
a2fbb9ea
ET
9311
9312 /* must not complete if we consumed full budget */
da5a662a 9313 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9314
9315#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9316poll_panic:
a2fbb9ea 9317#endif
908a7a16 9318 netif_rx_complete(napi);
a2fbb9ea 9319
34f80b04 9320 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9321 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9322 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9323 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9324 }
a2fbb9ea
ET
9325 return work_done;
9326}
9327
755735eb
EG
9328
9329/* we split the first BD into headers and data BDs
33471629 9330 * to ease the pain of our fellow microcode engineers
755735eb
EG
9331 * we use one mapping for both BDs
9332 * So far this has only been observed to happen
9333 * in Other Operating Systems(TM)
9334 */
9335static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9336 struct bnx2x_fastpath *fp,
9337 struct eth_tx_bd **tx_bd, u16 hlen,
9338 u16 bd_prod, int nbd)
9339{
9340 struct eth_tx_bd *h_tx_bd = *tx_bd;
9341 struct eth_tx_bd *d_tx_bd;
9342 dma_addr_t mapping;
9343 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9344
9345 /* first fix first BD */
9346 h_tx_bd->nbd = cpu_to_le16(nbd);
9347 h_tx_bd->nbytes = cpu_to_le16(hlen);
9348
9349 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9350 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9351 h_tx_bd->addr_lo, h_tx_bd->nbd);
9352
9353 /* now get a new data BD
9354 * (after the pbd) and fill it */
9355 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9356 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9357
9358 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9359 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9360
9361 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9362 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9363 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9364 d_tx_bd->vlan = 0;
9365 /* this marks the BD as one that has no individual mapping
9366 * the FW ignores this flag in a BD not marked start
9367 */
9368 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9369 DP(NETIF_MSG_TX_QUEUED,
9370 "TSO split data size is %d (%x:%x)\n",
9371 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9372
9373 /* update tx_bd for marking the last BD flag */
9374 *tx_bd = d_tx_bd;
9375
9376 return bd_prod;
9377}
9378
9379static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9380{
9381 if (fix > 0)
9382 csum = (u16) ~csum_fold(csum_sub(csum,
9383 csum_partial(t_header - fix, fix, 0)));
9384
9385 else if (fix < 0)
9386 csum = (u16) ~csum_fold(csum_add(csum,
9387 csum_partial(t_header, -fix, 0)));
9388
9389 return swab16(csum);
9390}
9391
9392static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9393{
9394 u32 rc;
9395
9396 if (skb->ip_summed != CHECKSUM_PARTIAL)
9397 rc = XMIT_PLAIN;
9398
9399 else {
9400 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9401 rc = XMIT_CSUM_V6;
9402 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9403 rc |= XMIT_CSUM_TCP;
9404
9405 } else {
9406 rc = XMIT_CSUM_V4;
9407 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9408 rc |= XMIT_CSUM_TCP;
9409 }
9410 }
9411
9412 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9413 rc |= XMIT_GSO_V4;
9414
9415 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9416 rc |= XMIT_GSO_V6;
9417
9418 return rc;
9419}
9420
9421/* check if packet requires linearization (packet is too fragmented) */
9422static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9423 u32 xmit_type)
9424{
9425 int to_copy = 0;
9426 int hlen = 0;
9427 int first_bd_sz = 0;
9428
9429 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9430 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9431
9432 if (xmit_type & XMIT_GSO) {
9433 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9434 /* Check if LSO packet needs to be copied:
9435 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9436 int wnd_size = MAX_FETCH_BD - 3;
33471629 9437 /* Number of windows to check */
755735eb
EG
9438 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9439 int wnd_idx = 0;
9440 int frag_idx = 0;
9441 u32 wnd_sum = 0;
9442
9443 /* Headers length */
9444 hlen = (int)(skb_transport_header(skb) - skb->data) +
9445 tcp_hdrlen(skb);
9446
9447 /* Amount of data (w/o headers) on linear part of SKB*/
9448 first_bd_sz = skb_headlen(skb) - hlen;
9449
9450 wnd_sum = first_bd_sz;
9451
9452 /* Calculate the first sum - it's special */
9453 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9454 wnd_sum +=
9455 skb_shinfo(skb)->frags[frag_idx].size;
9456
9457 /* If there was data on linear skb data - check it */
9458 if (first_bd_sz > 0) {
9459 if (unlikely(wnd_sum < lso_mss)) {
9460 to_copy = 1;
9461 goto exit_lbl;
9462 }
9463
9464 wnd_sum -= first_bd_sz;
9465 }
9466
9467 /* Others are easier: run through the frag list and
9468 check all windows */
9469 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9470 wnd_sum +=
9471 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9472
9473 if (unlikely(wnd_sum < lso_mss)) {
9474 to_copy = 1;
9475 break;
9476 }
9477 wnd_sum -=
9478 skb_shinfo(skb)->frags[wnd_idx].size;
9479 }
9480
9481 } else {
9482 /* in non-LSO too fragmented packet should always
9483 be linearized */
9484 to_copy = 1;
9485 }
9486 }
9487
9488exit_lbl:
9489 if (unlikely(to_copy))
9490 DP(NETIF_MSG_TX_QUEUED,
9491 "Linearization IS REQUIRED for %s packet. "
9492 "num_frags %d hlen %d first_bd_sz %d\n",
9493 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9494 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9495
9496 return to_copy;
9497}
9498
9499/* called with netif_tx_lock
a2fbb9ea 9500 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9501 * netif_wake_queue()
a2fbb9ea
ET
9502 */
9503static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9504{
9505 struct bnx2x *bp = netdev_priv(dev);
9506 struct bnx2x_fastpath *fp;
9507 struct sw_tx_bd *tx_buf;
9508 struct eth_tx_bd *tx_bd;
9509 struct eth_tx_parse_bd *pbd = NULL;
9510 u16 pkt_prod, bd_prod;
755735eb 9511 int nbd, fp_index;
a2fbb9ea 9512 dma_addr_t mapping;
755735eb
EG
9513 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9514 int vlan_off = (bp->e1hov ? 4 : 0);
9515 int i;
9516 u8 hlen = 0;
a2fbb9ea
ET
9517
9518#ifdef BNX2X_STOP_ON_ERROR
9519 if (unlikely(bp->panic))
9520 return NETDEV_TX_BUSY;
9521#endif
9522
755735eb 9523 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9524 fp = &bp->fp[fp_index];
755735eb 9525
231fd58a 9526 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9527 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9528 netif_stop_queue(dev);
9529 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9530 return NETDEV_TX_BUSY;
9531 }
9532
755735eb
EG
9533 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9534 " gso type %x xmit_type %x\n",
9535 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9536 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9537
33471629 9538 /* First, check if we need to linearize the skb
755735eb
EG
9539 (due to FW restrictions) */
9540 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9541 /* Statistics of linearization */
9542 bp->lin_cnt++;
9543 if (skb_linearize(skb) != 0) {
9544 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9545 "silently dropping this SKB\n");
9546 dev_kfree_skb_any(skb);
da5a662a 9547 return NETDEV_TX_OK;
755735eb
EG
9548 }
9549 }
9550
a2fbb9ea 9551 /*
755735eb 9552 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9553 then for TSO or xsum we have a parsing info BD,
755735eb 9554 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9555 (don't forget to mark the last one as last,
9556 and to unmap only AFTER you write to the BD ...)
755735eb 9557 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9558 */
9559
9560 pkt_prod = fp->tx_pkt_prod++;
755735eb 9561 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9562
755735eb 9563 /* get a tx_buf and first BD */
a2fbb9ea
ET
9564 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9565 tx_bd = &fp->tx_desc_ring[bd_prod];
9566
9567 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9568 tx_bd->general_data = (UNICAST_ADDRESS <<
9569 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9570 /* header nbd */
9571 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9572
755735eb
EG
9573 /* remember the first BD of the packet */
9574 tx_buf->first_bd = fp->tx_bd_prod;
9575 tx_buf->skb = skb;
a2fbb9ea
ET
9576
9577 DP(NETIF_MSG_TX_QUEUED,
9578 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9579 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9580
0c6671b0
EG
9581#ifdef BCM_VLAN
9582 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9583 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
9584 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9585 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9586 vlan_off += 4;
9587 } else
0c6671b0 9588#endif
755735eb 9589 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9590
755735eb 9591 if (xmit_type) {
755735eb 9592 /* turn on parsing and get a BD */
a2fbb9ea
ET
9593 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9594 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9595
9596 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9597 }
9598
9599 if (xmit_type & XMIT_CSUM) {
9600 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9601
9602 /* for now NS flag is not used in Linux */
755735eb 9603 pbd->global_data = (hlen |
96fc1784 9604 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9605 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9606
755735eb
EG
9607 pbd->ip_hlen = (skb_transport_header(skb) -
9608 skb_network_header(skb)) / 2;
9609
9610 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9611
755735eb
EG
9612 pbd->total_hlen = cpu_to_le16(hlen);
9613 hlen = hlen*2 - vlan_off;
a2fbb9ea 9614
755735eb
EG
9615 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9616
9617 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9618 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9619 ETH_TX_BD_FLAGS_IP_CSUM;
9620 else
9621 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9622
9623 if (xmit_type & XMIT_CSUM_TCP) {
9624 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9625
9626 } else {
9627 s8 fix = SKB_CS_OFF(skb); /* signed! */
9628
a2fbb9ea 9629 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9630 pbd->cs_offset = fix / 2;
a2fbb9ea 9631
755735eb
EG
9632 DP(NETIF_MSG_TX_QUEUED,
9633 "hlen %d offset %d fix %d csum before fix %x\n",
9634 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9635 SKB_CS(skb));
9636
9637 /* HW bug: fixup the CSUM */
9638 pbd->tcp_pseudo_csum =
9639 bnx2x_csum_fix(skb_transport_header(skb),
9640 SKB_CS(skb), fix);
9641
9642 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9643 pbd->tcp_pseudo_csum);
9644 }
a2fbb9ea
ET
9645 }
9646
9647 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9648 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9649
9650 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9651 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9652 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9653 tx_bd->nbd = cpu_to_le16(nbd);
9654 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9655
9656 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9657 " nbytes %d flags %x vlan %x\n",
9658 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9659 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9660 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9661
755735eb 9662 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9663
9664 DP(NETIF_MSG_TX_QUEUED,
9665 "TSO packet len %d hlen %d total len %d tso size %d\n",
9666 skb->len, hlen, skb_headlen(skb),
9667 skb_shinfo(skb)->gso_size);
9668
9669 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9670
755735eb
EG
9671 if (unlikely(skb_headlen(skb) > hlen))
9672 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9673 bd_prod, ++nbd);
a2fbb9ea
ET
9674
9675 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9676 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9677 pbd->tcp_flags = pbd_tcp_flags(skb);
9678
9679 if (xmit_type & XMIT_GSO_V4) {
9680 pbd->ip_id = swab16(ip_hdr(skb)->id);
9681 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9682 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9683 ip_hdr(skb)->daddr,
9684 0, IPPROTO_TCP, 0));
755735eb
EG
9685
9686 } else
9687 pbd->tcp_pseudo_csum =
9688 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9689 &ipv6_hdr(skb)->daddr,
9690 0, IPPROTO_TCP, 0));
9691
a2fbb9ea
ET
9692 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9693 }
9694
755735eb
EG
9695 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9696 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9697
755735eb
EG
9698 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9699 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9700
755735eb
EG
9701 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9702 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9703
755735eb
EG
9704 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9705 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9706 tx_bd->nbytes = cpu_to_le16(frag->size);
9707 tx_bd->vlan = cpu_to_le16(pkt_prod);
9708 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9709
755735eb
EG
9710 DP(NETIF_MSG_TX_QUEUED,
9711 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9712 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9713 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9714 }
9715
755735eb 9716 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9717 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9718
9719 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9720 tx_bd, tx_bd->bd_flags.as_bitfield);
9721
a2fbb9ea
ET
9722 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9723
755735eb 9724 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9725 * if the packet contains or ends with it
9726 */
9727 if (TX_BD_POFF(bd_prod) < nbd)
9728 nbd++;
9729
9730 if (pbd)
9731 DP(NETIF_MSG_TX_QUEUED,
9732 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9733 " tcp_flags %x xsum %x seq %u hlen %u\n",
9734 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9735 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9736 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9737
755735eb 9738 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9739
58f4c4cf
EG
9740 /*
9741 * Make sure that the BD data is updated before updating the producer
9742 * since FW might read the BD right after the producer is updated.
9743 * This is only applicable for weak-ordered memory model archs such
9744 * as IA-64. The following barrier is also mandatory since FW will
9745 * assumes packets must have BDs.
9746 */
9747 wmb();
9748
96fc1784
ET
9749 fp->hw_tx_prods->bds_prod =
9750 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9751 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9752 fp->hw_tx_prods->packets_prod =
9753 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9754 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9755
9756 mmiowb();
9757
755735eb 9758 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9759 dev->trans_start = jiffies;
9760
9761 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9762 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9763 if we put Tx into XOFF state. */
9764 smp_mb();
a2fbb9ea 9765 netif_stop_queue(dev);
bb2a0f7a 9766 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9767 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9768 netif_wake_queue(dev);
9769 }
9770 fp->tx_pkt++;
9771
9772 return NETDEV_TX_OK;
9773}
9774
bb2a0f7a 9775/* called with rtnl_lock */
a2fbb9ea
ET
9776static int bnx2x_open(struct net_device *dev)
9777{
9778 struct bnx2x *bp = netdev_priv(dev);
9779
9780 bnx2x_set_power_state(bp, PCI_D0);
9781
bb2a0f7a 9782 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9783}
9784
bb2a0f7a 9785/* called with rtnl_lock */
a2fbb9ea
ET
9786static int bnx2x_close(struct net_device *dev)
9787{
a2fbb9ea
ET
9788 struct bnx2x *bp = netdev_priv(dev);
9789
9790 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9791 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9792 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9793 if (!CHIP_REV_IS_SLOW(bp))
9794 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9795
9796 return 0;
9797}
9798
34f80b04
EG
9799/* called with netif_tx_lock from set_multicast */
9800static void bnx2x_set_rx_mode(struct net_device *dev)
9801{
9802 struct bnx2x *bp = netdev_priv(dev);
9803 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9804 int port = BP_PORT(bp);
9805
9806 if (bp->state != BNX2X_STATE_OPEN) {
9807 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9808 return;
9809 }
9810
9811 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9812
9813 if (dev->flags & IFF_PROMISC)
9814 rx_mode = BNX2X_RX_MODE_PROMISC;
9815
9816 else if ((dev->flags & IFF_ALLMULTI) ||
9817 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9818 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9819
9820 else { /* some multicasts */
9821 if (CHIP_IS_E1(bp)) {
9822 int i, old, offset;
9823 struct dev_mc_list *mclist;
9824 struct mac_configuration_cmd *config =
9825 bnx2x_sp(bp, mcast_config);
9826
9827 for (i = 0, mclist = dev->mc_list;
9828 mclist && (i < dev->mc_count);
9829 i++, mclist = mclist->next) {
9830
9831 config->config_table[i].
9832 cam_entry.msb_mac_addr =
9833 swab16(*(u16 *)&mclist->dmi_addr[0]);
9834 config->config_table[i].
9835 cam_entry.middle_mac_addr =
9836 swab16(*(u16 *)&mclist->dmi_addr[2]);
9837 config->config_table[i].
9838 cam_entry.lsb_mac_addr =
9839 swab16(*(u16 *)&mclist->dmi_addr[4]);
9840 config->config_table[i].cam_entry.flags =
9841 cpu_to_le16(port);
9842 config->config_table[i].
9843 target_table_entry.flags = 0;
9844 config->config_table[i].
9845 target_table_entry.client_id = 0;
9846 config->config_table[i].
9847 target_table_entry.vlan_id = 0;
9848
9849 DP(NETIF_MSG_IFUP,
9850 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9851 config->config_table[i].
9852 cam_entry.msb_mac_addr,
9853 config->config_table[i].
9854 cam_entry.middle_mac_addr,
9855 config->config_table[i].
9856 cam_entry.lsb_mac_addr);
9857 }
9858 old = config->hdr.length_6b;
9859 if (old > i) {
9860 for (; i < old; i++) {
9861 if (CAM_IS_INVALID(config->
9862 config_table[i])) {
9863 i--; /* already invalidated */
9864 break;
9865 }
9866 /* invalidate */
9867 CAM_INVALIDATE(config->
9868 config_table[i]);
9869 }
9870 }
9871
9872 if (CHIP_REV_IS_SLOW(bp))
9873 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9874 else
9875 offset = BNX2X_MAX_MULTICAST*(1 + port);
9876
9877 config->hdr.length_6b = i;
9878 config->hdr.offset = offset;
9879 config->hdr.client_id = BP_CL_ID(bp);
9880 config->hdr.reserved1 = 0;
9881
9882 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9883 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9884 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9885 0);
9886 } else { /* E1H */
9887 /* Accept one or more multicasts */
9888 struct dev_mc_list *mclist;
9889 u32 mc_filter[MC_HASH_SIZE];
9890 u32 crc, bit, regidx;
9891 int i;
9892
9893 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9894
9895 for (i = 0, mclist = dev->mc_list;
9896 mclist && (i < dev->mc_count);
9897 i++, mclist = mclist->next) {
9898
7c510e4b
JB
9899 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9900 mclist->dmi_addr);
34f80b04
EG
9901
9902 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9903 bit = (crc >> 24) & 0xff;
9904 regidx = bit >> 5;
9905 bit &= 0x1f;
9906 mc_filter[regidx] |= (1 << bit);
9907 }
9908
9909 for (i = 0; i < MC_HASH_SIZE; i++)
9910 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9911 mc_filter[i]);
9912 }
9913 }
9914
9915 bp->rx_mode = rx_mode;
9916 bnx2x_set_storm_rx_mode(bp);
9917}
9918
9919/* called with rtnl_lock */
a2fbb9ea
ET
9920static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9921{
9922 struct sockaddr *addr = p;
9923 struct bnx2x *bp = netdev_priv(dev);
9924
34f80b04 9925 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9926 return -EINVAL;
9927
9928 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9929 if (netif_running(dev)) {
9930 if (CHIP_IS_E1(bp))
3101c2bc 9931 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9932 else
3101c2bc 9933 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9934 }
a2fbb9ea
ET
9935
9936 return 0;
9937}
9938
c18487ee 9939/* called with rtnl_lock */
a2fbb9ea
ET
9940static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9941{
9942 struct mii_ioctl_data *data = if_mii(ifr);
9943 struct bnx2x *bp = netdev_priv(dev);
3196a88a 9944 int port = BP_PORT(bp);
a2fbb9ea
ET
9945 int err;
9946
9947 switch (cmd) {
9948 case SIOCGMIIPHY:
34f80b04 9949 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9950
c14423fe 9951 /* fallthrough */
c18487ee 9952
a2fbb9ea 9953 case SIOCGMIIREG: {
c18487ee 9954 u16 mii_regval;
a2fbb9ea 9955
c18487ee
YR
9956 if (!netif_running(dev))
9957 return -EAGAIN;
a2fbb9ea 9958
34f80b04 9959 mutex_lock(&bp->port.phy_mutex);
3196a88a 9960 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9961 DEFAULT_PHY_DEV_ADDR,
9962 (data->reg_num & 0x1f), &mii_regval);
9963 data->val_out = mii_regval;
34f80b04 9964 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9965 return err;
9966 }
9967
9968 case SIOCSMIIREG:
9969 if (!capable(CAP_NET_ADMIN))
9970 return -EPERM;
9971
c18487ee
YR
9972 if (!netif_running(dev))
9973 return -EAGAIN;
9974
34f80b04 9975 mutex_lock(&bp->port.phy_mutex);
3196a88a 9976 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9977 DEFAULT_PHY_DEV_ADDR,
9978 (data->reg_num & 0x1f), data->val_in);
34f80b04 9979 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9980 return err;
9981
9982 default:
9983 /* do nothing */
9984 break;
9985 }
9986
9987 return -EOPNOTSUPP;
9988}
9989
34f80b04 9990/* called with rtnl_lock */
a2fbb9ea
ET
9991static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9992{
9993 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9994 int rc = 0;
a2fbb9ea
ET
9995
9996 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9997 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9998 return -EINVAL;
9999
10000 /* This does not race with packet allocation
c14423fe 10001 * because the actual alloc size is
a2fbb9ea
ET
10002 * only updated as part of load
10003 */
10004 dev->mtu = new_mtu;
10005
10006 if (netif_running(dev)) {
34f80b04
EG
10007 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10008 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10009 }
34f80b04
EG
10010
10011 return rc;
a2fbb9ea
ET
10012}
10013
10014static void bnx2x_tx_timeout(struct net_device *dev)
10015{
10016 struct bnx2x *bp = netdev_priv(dev);
10017
10018#ifdef BNX2X_STOP_ON_ERROR
10019 if (!bp->panic)
10020 bnx2x_panic();
10021#endif
10022 /* This allows the netif to be shutdown gracefully before resetting */
10023 schedule_work(&bp->reset_task);
10024}
10025
10026#ifdef BCM_VLAN
34f80b04 10027/* called with rtnl_lock */
a2fbb9ea
ET
10028static void bnx2x_vlan_rx_register(struct net_device *dev,
10029 struct vlan_group *vlgrp)
10030{
10031 struct bnx2x *bp = netdev_priv(dev);
10032
10033 bp->vlgrp = vlgrp;
0c6671b0
EG
10034
10035 /* Set flags according to the required capabilities */
10036 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10037
10038 if (dev->features & NETIF_F_HW_VLAN_TX)
10039 bp->flags |= HW_VLAN_TX_FLAG;
10040
10041 if (dev->features & NETIF_F_HW_VLAN_RX)
10042 bp->flags |= HW_VLAN_RX_FLAG;
10043
a2fbb9ea 10044 if (netif_running(dev))
49d66772 10045 bnx2x_set_client_config(bp);
a2fbb9ea 10046}
34f80b04 10047
a2fbb9ea
ET
10048#endif
10049
10050#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10051static void poll_bnx2x(struct net_device *dev)
10052{
10053 struct bnx2x *bp = netdev_priv(dev);
10054
10055 disable_irq(bp->pdev->irq);
10056 bnx2x_interrupt(bp->pdev->irq, dev);
10057 enable_irq(bp->pdev->irq);
10058}
10059#endif
10060
c64213cd
SH
10061static const struct net_device_ops bnx2x_netdev_ops = {
10062 .ndo_open = bnx2x_open,
10063 .ndo_stop = bnx2x_close,
10064 .ndo_start_xmit = bnx2x_start_xmit,
10065 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10066 .ndo_set_mac_address = bnx2x_change_mac_addr,
10067 .ndo_validate_addr = eth_validate_addr,
10068 .ndo_do_ioctl = bnx2x_ioctl,
10069 .ndo_change_mtu = bnx2x_change_mtu,
10070 .ndo_tx_timeout = bnx2x_tx_timeout,
10071#ifdef BCM_VLAN
10072 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10073#endif
10074#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10075 .ndo_poll_controller = poll_bnx2x,
10076#endif
10077};
10078
10079
34f80b04
EG
10080static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10081 struct net_device *dev)
a2fbb9ea
ET
10082{
10083 struct bnx2x *bp;
10084 int rc;
10085
10086 SET_NETDEV_DEV(dev, &pdev->dev);
10087 bp = netdev_priv(dev);
10088
34f80b04
EG
10089 bp->dev = dev;
10090 bp->pdev = pdev;
a2fbb9ea 10091 bp->flags = 0;
34f80b04 10092 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10093
10094 rc = pci_enable_device(pdev);
10095 if (rc) {
10096 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10097 goto err_out;
10098 }
10099
10100 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10101 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10102 " aborting\n");
10103 rc = -ENODEV;
10104 goto err_out_disable;
10105 }
10106
10107 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10108 printk(KERN_ERR PFX "Cannot find second PCI device"
10109 " base address, aborting\n");
10110 rc = -ENODEV;
10111 goto err_out_disable;
10112 }
10113
34f80b04
EG
10114 if (atomic_read(&pdev->enable_cnt) == 1) {
10115 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10116 if (rc) {
10117 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10118 " aborting\n");
10119 goto err_out_disable;
10120 }
a2fbb9ea 10121
34f80b04
EG
10122 pci_set_master(pdev);
10123 pci_save_state(pdev);
10124 }
a2fbb9ea
ET
10125
10126 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10127 if (bp->pm_cap == 0) {
10128 printk(KERN_ERR PFX "Cannot find power management"
10129 " capability, aborting\n");
10130 rc = -EIO;
10131 goto err_out_release;
10132 }
10133
10134 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10135 if (bp->pcie_cap == 0) {
10136 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10137 " aborting\n");
10138 rc = -EIO;
10139 goto err_out_release;
10140 }
10141
10142 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10143 bp->flags |= USING_DAC_FLAG;
10144 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10145 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10146 " failed, aborting\n");
10147 rc = -EIO;
10148 goto err_out_release;
10149 }
10150
10151 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10152 printk(KERN_ERR PFX "System does not support DMA,"
10153 " aborting\n");
10154 rc = -EIO;
10155 goto err_out_release;
10156 }
10157
34f80b04
EG
10158 dev->mem_start = pci_resource_start(pdev, 0);
10159 dev->base_addr = dev->mem_start;
10160 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10161
10162 dev->irq = pdev->irq;
10163
275f165f 10164 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10165 if (!bp->regview) {
10166 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10167 rc = -ENOMEM;
10168 goto err_out_release;
10169 }
10170
34f80b04
EG
10171 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10172 min_t(u64, BNX2X_DB_SIZE,
10173 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10174 if (!bp->doorbells) {
10175 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10176 rc = -ENOMEM;
10177 goto err_out_unmap;
10178 }
10179
10180 bnx2x_set_power_state(bp, PCI_D0);
10181
34f80b04
EG
10182 /* clean indirect addresses */
10183 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10184 PCICFG_VENDOR_ID_OFFSET);
10185 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10186 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10187 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10188 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10189
34f80b04 10190 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10191
c64213cd 10192 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10193 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10194 dev->features |= NETIF_F_SG;
10195 dev->features |= NETIF_F_HW_CSUM;
10196 if (bp->flags & USING_DAC_FLAG)
10197 dev->features |= NETIF_F_HIGHDMA;
10198#ifdef BCM_VLAN
10199 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10200 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10201#endif
10202 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10203 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10204
10205 return 0;
10206
10207err_out_unmap:
10208 if (bp->regview) {
10209 iounmap(bp->regview);
10210 bp->regview = NULL;
10211 }
a2fbb9ea
ET
10212 if (bp->doorbells) {
10213 iounmap(bp->doorbells);
10214 bp->doorbells = NULL;
10215 }
10216
10217err_out_release:
34f80b04
EG
10218 if (atomic_read(&pdev->enable_cnt) == 1)
10219 pci_release_regions(pdev);
a2fbb9ea
ET
10220
10221err_out_disable:
10222 pci_disable_device(pdev);
10223 pci_set_drvdata(pdev, NULL);
10224
10225err_out:
10226 return rc;
10227}
10228
25047950
ET
10229static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10230{
10231 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10232
10233 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10234 return val;
10235}
10236
10237/* return value of 1=2.5GHz 2=5GHz */
10238static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10239{
10240 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10241
10242 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10243 return val;
10244}
10245
a2fbb9ea
ET
10246static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10247 const struct pci_device_id *ent)
10248{
10249 static int version_printed;
10250 struct net_device *dev = NULL;
10251 struct bnx2x *bp;
25047950 10252 int rc;
a2fbb9ea
ET
10253
10254 if (version_printed++ == 0)
10255 printk(KERN_INFO "%s", version);
10256
10257 /* dev zeroed in init_etherdev */
10258 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10259 if (!dev) {
10260 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10261 return -ENOMEM;
34f80b04 10262 }
a2fbb9ea 10263
a2fbb9ea
ET
10264 bp = netdev_priv(dev);
10265 bp->msglevel = debug;
10266
34f80b04 10267 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10268 if (rc < 0) {
10269 free_netdev(dev);
10270 return rc;
10271 }
10272
a2fbb9ea
ET
10273 rc = register_netdev(dev);
10274 if (rc) {
c14423fe 10275 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10276 goto init_one_exit;
a2fbb9ea
ET
10277 }
10278
10279 pci_set_drvdata(pdev, dev);
10280
34f80b04
EG
10281 rc = bnx2x_init_bp(bp);
10282 if (rc) {
10283 unregister_netdev(dev);
10284 goto init_one_exit;
10285 }
10286
12b56ea8
EG
10287 netif_carrier_off(dev);
10288
34f80b04 10289 bp->common.name = board_info[ent->driver_data].name;
25047950 10290 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10291 " IRQ %d, ", dev->name, bp->common.name,
10292 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10293 bnx2x_get_pcie_width(bp),
10294 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10295 dev->base_addr, bp->pdev->irq);
e174961c 10296 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10297 return 0;
34f80b04
EG
10298
10299init_one_exit:
10300 if (bp->regview)
10301 iounmap(bp->regview);
10302
10303 if (bp->doorbells)
10304 iounmap(bp->doorbells);
10305
10306 free_netdev(dev);
10307
10308 if (atomic_read(&pdev->enable_cnt) == 1)
10309 pci_release_regions(pdev);
10310
10311 pci_disable_device(pdev);
10312 pci_set_drvdata(pdev, NULL);
10313
10314 return rc;
a2fbb9ea
ET
10315}
10316
10317static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10318{
10319 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10320 struct bnx2x *bp;
10321
10322 if (!dev) {
228241eb
ET
10323 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10324 return;
10325 }
228241eb 10326 bp = netdev_priv(dev);
a2fbb9ea 10327
a2fbb9ea
ET
10328 unregister_netdev(dev);
10329
10330 if (bp->regview)
10331 iounmap(bp->regview);
10332
10333 if (bp->doorbells)
10334 iounmap(bp->doorbells);
10335
10336 free_netdev(dev);
34f80b04
EG
10337
10338 if (atomic_read(&pdev->enable_cnt) == 1)
10339 pci_release_regions(pdev);
10340
a2fbb9ea
ET
10341 pci_disable_device(pdev);
10342 pci_set_drvdata(pdev, NULL);
10343}
10344
10345static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10346{
10347 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10348 struct bnx2x *bp;
10349
34f80b04
EG
10350 if (!dev) {
10351 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10352 return -ENODEV;
10353 }
10354 bp = netdev_priv(dev);
a2fbb9ea 10355
34f80b04 10356 rtnl_lock();
a2fbb9ea 10357
34f80b04 10358 pci_save_state(pdev);
228241eb 10359
34f80b04
EG
10360 if (!netif_running(dev)) {
10361 rtnl_unlock();
10362 return 0;
10363 }
a2fbb9ea
ET
10364
10365 netif_device_detach(dev);
a2fbb9ea 10366
da5a662a 10367 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10368
a2fbb9ea 10369 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10370
34f80b04
EG
10371 rtnl_unlock();
10372
a2fbb9ea
ET
10373 return 0;
10374}
10375
10376static int bnx2x_resume(struct pci_dev *pdev)
10377{
10378 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10379 struct bnx2x *bp;
a2fbb9ea
ET
10380 int rc;
10381
228241eb
ET
10382 if (!dev) {
10383 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10384 return -ENODEV;
10385 }
228241eb 10386 bp = netdev_priv(dev);
a2fbb9ea 10387
34f80b04
EG
10388 rtnl_lock();
10389
228241eb 10390 pci_restore_state(pdev);
34f80b04
EG
10391
10392 if (!netif_running(dev)) {
10393 rtnl_unlock();
10394 return 0;
10395 }
10396
a2fbb9ea
ET
10397 bnx2x_set_power_state(bp, PCI_D0);
10398 netif_device_attach(dev);
10399
da5a662a 10400 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10401
34f80b04
EG
10402 rtnl_unlock();
10403
10404 return rc;
a2fbb9ea
ET
10405}
10406
f8ef6e44
YG
10407static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10408{
10409 int i;
10410
10411 bp->state = BNX2X_STATE_ERROR;
10412
10413 bp->rx_mode = BNX2X_RX_MODE_NONE;
10414
10415 bnx2x_netif_stop(bp, 0);
10416
10417 del_timer_sync(&bp->timer);
10418 bp->stats_state = STATS_STATE_DISABLED;
10419 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10420
10421 /* Release IRQs */
10422 bnx2x_free_irq(bp);
10423
10424 if (CHIP_IS_E1(bp)) {
10425 struct mac_configuration_cmd *config =
10426 bnx2x_sp(bp, mcast_config);
10427
10428 for (i = 0; i < config->hdr.length_6b; i++)
10429 CAM_INVALIDATE(config->config_table[i]);
10430 }
10431
10432 /* Free SKBs, SGEs, TPA pool and driver internals */
10433 bnx2x_free_skbs(bp);
10434 for_each_queue(bp, i)
10435 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10436 bnx2x_free_mem(bp);
10437
10438 bp->state = BNX2X_STATE_CLOSED;
10439
10440 netif_carrier_off(bp->dev);
10441
10442 return 0;
10443}
10444
10445static void bnx2x_eeh_recover(struct bnx2x *bp)
10446{
10447 u32 val;
10448
10449 mutex_init(&bp->port.phy_mutex);
10450
10451 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10452 bp->link_params.shmem_base = bp->common.shmem_base;
10453 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10454
10455 if (!bp->common.shmem_base ||
10456 (bp->common.shmem_base < 0xA0000) ||
10457 (bp->common.shmem_base >= 0xC0000)) {
10458 BNX2X_DEV_INFO("MCP not active\n");
10459 bp->flags |= NO_MCP_FLAG;
10460 return;
10461 }
10462
10463 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10464 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10465 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10466 BNX2X_ERR("BAD MCP validity signature\n");
10467
10468 if (!BP_NOMCP(bp)) {
10469 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10470 & DRV_MSG_SEQ_NUMBER_MASK);
10471 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10472 }
10473}
10474
493adb1f
WX
10475/**
10476 * bnx2x_io_error_detected - called when PCI error is detected
10477 * @pdev: Pointer to PCI device
10478 * @state: The current pci connection state
10479 *
10480 * This function is called after a PCI bus error affecting
10481 * this device has been detected.
10482 */
10483static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10484 pci_channel_state_t state)
10485{
10486 struct net_device *dev = pci_get_drvdata(pdev);
10487 struct bnx2x *bp = netdev_priv(dev);
10488
10489 rtnl_lock();
10490
10491 netif_device_detach(dev);
10492
10493 if (netif_running(dev))
f8ef6e44 10494 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10495
10496 pci_disable_device(pdev);
10497
10498 rtnl_unlock();
10499
10500 /* Request a slot reset */
10501 return PCI_ERS_RESULT_NEED_RESET;
10502}
10503
10504/**
10505 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10506 * @pdev: Pointer to PCI device
10507 *
10508 * Restart the card from scratch, as if from a cold-boot.
10509 */
10510static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10511{
10512 struct net_device *dev = pci_get_drvdata(pdev);
10513 struct bnx2x *bp = netdev_priv(dev);
10514
10515 rtnl_lock();
10516
10517 if (pci_enable_device(pdev)) {
10518 dev_err(&pdev->dev,
10519 "Cannot re-enable PCI device after reset\n");
10520 rtnl_unlock();
10521 return PCI_ERS_RESULT_DISCONNECT;
10522 }
10523
10524 pci_set_master(pdev);
10525 pci_restore_state(pdev);
10526
10527 if (netif_running(dev))
10528 bnx2x_set_power_state(bp, PCI_D0);
10529
10530 rtnl_unlock();
10531
10532 return PCI_ERS_RESULT_RECOVERED;
10533}
10534
10535/**
10536 * bnx2x_io_resume - called when traffic can start flowing again
10537 * @pdev: Pointer to PCI device
10538 *
10539 * This callback is called when the error recovery driver tells us that
10540 * its OK to resume normal operation.
10541 */
10542static void bnx2x_io_resume(struct pci_dev *pdev)
10543{
10544 struct net_device *dev = pci_get_drvdata(pdev);
10545 struct bnx2x *bp = netdev_priv(dev);
10546
10547 rtnl_lock();
10548
f8ef6e44
YG
10549 bnx2x_eeh_recover(bp);
10550
493adb1f 10551 if (netif_running(dev))
f8ef6e44 10552 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10553
10554 netif_device_attach(dev);
10555
10556 rtnl_unlock();
10557}
10558
10559static struct pci_error_handlers bnx2x_err_handler = {
10560 .error_detected = bnx2x_io_error_detected,
10561 .slot_reset = bnx2x_io_slot_reset,
10562 .resume = bnx2x_io_resume,
10563};
10564
a2fbb9ea 10565static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10566 .name = DRV_MODULE_NAME,
10567 .id_table = bnx2x_pci_tbl,
10568 .probe = bnx2x_init_one,
10569 .remove = __devexit_p(bnx2x_remove_one),
10570 .suspend = bnx2x_suspend,
10571 .resume = bnx2x_resume,
10572 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10573};
10574
10575static int __init bnx2x_init(void)
10576{
1cf167f2
EG
10577 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10578 if (bnx2x_wq == NULL) {
10579 printk(KERN_ERR PFX "Cannot create workqueue\n");
10580 return -ENOMEM;
10581 }
10582
a2fbb9ea
ET
10583 return pci_register_driver(&bnx2x_pci_driver);
10584}
10585
10586static void __exit bnx2x_cleanup(void)
10587{
10588 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10589
10590 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10591}
10592
10593module_init(bnx2x_init);
10594module_exit(bnx2x_cleanup);
10595