]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Link order with external PHY
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
e35c3269
EG
63#define DRV_MODULE_VERSION "1.45.6"
64#define DRV_MODULE_RELDATE "2008/06/23"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 78
19680c48 79static int disable_tpa;
a2fbb9ea
ET
80static int use_inta;
81static int poll;
a2fbb9ea 82static int debug;
34f80b04 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
84static int use_multi;
85
19680c48 86module_param(disable_tpa, int, 0);
a2fbb9ea
ET
87module_param(use_inta, int, 0);
88module_param(poll, int, 0);
a2fbb9ea 89module_param(debug, int, 0);
19680c48 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 93MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
94
95#ifdef BNX2X_MULTI
96module_param(use_multi, int, 0);
97MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98#endif
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
473 u32 data[9];
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
507 for_each_queue(bp, i) {
508 struct bnx2x_fastpath *fp = &bp->fp[i];
509 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
518 fp->rx_bd_prod, fp->rx_bd_cons,
519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
523 " *sb_u_idx(%x) bd data(%x,%x)\n",
524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525 fp->status_blk->c_status_block.status_block_index,
526 fp->fp_u_idx,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
529
530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532 for (j = start; j < end; j++) {
533 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536 sw_bd->skb, sw_bd->first_bd);
537 }
538
539 start = TX_BD(fp->tx_bd_cons - 10);
540 end = TX_BD(fp->tx_bd_cons + 254);
541 for (j = start; j < end; j++) {
542 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 }
547
548 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550 for (j = start; j < end; j++) {
551 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
556 }
557
7a9b2557
VZ
558 start = 0;
559 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
560 for (j = start; j < end; j++) {
561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
565 j, rx_sge[1], rx_sge[0], sw_page->page);
566 }
567
a2fbb9ea
ET
568 start = RCQ_BD(fp->rx_comp_cons - 10);
569 end = RCQ_BD(fp->rx_comp_cons + 503);
570 for (j = start; j < end; j++) {
571 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574 j, cqe[0], cqe[1], cqe[2], cqe[3]);
575 }
576 }
577
49d66772
ET
578 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
579 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 580 " spq_prod_idx(%u)\n",
49d66772 581 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
582 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
34f80b04 584 bnx2x_fw_dump(bp);
a2fbb9ea
ET
585 bnx2x_mc_assert(bp);
586 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
587}
588
615f8fd9 589static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 590{
34f80b04 591 int port = BP_PORT(bp);
a2fbb9ea
ET
592 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593 u32 val = REG_RD(bp, addr);
594 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596 if (msix) {
597 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600 } else {
601 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 602 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
603 HC_CONFIG_0_REG_INT_LINE_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 605
615f8fd9
ET
606 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
607 val, port, addr, msix);
608
609 REG_WR(bp, addr, val);
610
a2fbb9ea
ET
611 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 }
613
615f8fd9 614 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
615 val, port, addr, msix);
616
617 REG_WR(bp, addr, val);
34f80b04
EG
618
619 if (CHIP_IS_E1H(bp)) {
620 /* init leading/trailing edge */
621 if (IS_E1HMF(bp)) {
622 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623 if (bp->port.pmf)
624 /* enable nig attention */
625 val |= 0x0100;
626 } else
627 val = 0xffff;
628
629 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631 }
a2fbb9ea
ET
632}
633
615f8fd9 634static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 635{
34f80b04 636 int port = BP_PORT(bp);
a2fbb9ea
ET
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
639
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_INT_LINE_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 val, port, addr);
647
648 REG_WR(bp, addr, val);
649 if (REG_RD(bp, addr) != val)
650 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651}
652
615f8fd9 653static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 654{
a2fbb9ea
ET
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 int i;
657
34f80b04 658 /* disable interrupt handling */
a2fbb9ea 659 atomic_inc(&bp->intr_sem);
c14423fe 660 /* prevent the HW from sending interrupts */
615f8fd9 661 bnx2x_int_disable(bp);
a2fbb9ea
ET
662
663 /* make sure all ISRs are done */
664 if (msix) {
665 for_each_queue(bp, i)
666 synchronize_irq(bp->msix_table[i].vector);
667
668 /* one more for the Slow Path IRQ */
669 synchronize_irq(bp->msix_table[i].vector);
670 } else
671 synchronize_irq(bp->pdev->irq);
672
673 /* make sure sp_task is not running */
674 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
675}
676
34f80b04 677/* fast path */
a2fbb9ea
ET
678
679/*
34f80b04 680 * General service functions
a2fbb9ea
ET
681 */
682
34f80b04 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
684 u8 storm, u16 index, u8 op, u8 update)
685{
5c862848
EG
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
34f80b04 692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
5c862848
EG
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
700}
701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703{
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717}
718
a2fbb9ea
ET
719static u16 bnx2x_ack_int(struct bnx2x *bp)
720{
5c862848
EG
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 724
5c862848
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
a2fbb9ea 727
a2fbb9ea
ET
728 return result;
729}
730
731
732/*
733 * fast path service functions
734 */
735
736/* free skb in the packet ring at pos idx
737 * return idx of last bd freed
738 */
739static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740 u16 idx)
741{
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
34f80b04 745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
746 int nbd;
747
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
749 idx, tx_buf, skb);
750
751 /* unmap first bd */
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 758 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
759#ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 761 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
762 bnx2x_panic();
763 }
764#endif
765
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
768 if (nbd)
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
774 if (--nbd)
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779 if (--nbd)
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781 }
782 }
783
784 /* now free frags */
785 while (nbd > 0) {
786
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791 if (--nbd)
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793 }
794
795 /* release skb */
53e5e96e 796 WARN_ON(!skb);
a2fbb9ea
ET
797 dev_kfree_skb(skb);
798 tx_buf->first_bd = 0;
799 tx_buf->skb = NULL;
800
34f80b04 801 return new_cons;
a2fbb9ea
ET
802}
803
34f80b04 804static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 805{
34f80b04
EG
806 s16 used;
807 u16 prod;
808 u16 cons;
a2fbb9ea 809
34f80b04 810 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
813
34f80b04
EG
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 817
34f80b04 818#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
819 WARN_ON(used < 0);
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 822#endif
a2fbb9ea 823
34f80b04 824 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
825}
826
827static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828{
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831 int done = 0;
832
833#ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
835 return;
836#endif
837
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
840
841 while (sw_cons != hw_cons) {
842 u16 pkt_cons;
843
844 pkt_cons = TX_BD(sw_cons);
845
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
34f80b04 848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
849 hw_cons, sw_cons, pkt_cons);
850
34f80b04 851/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
852 rmb();
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854 }
855*/
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857 sw_cons++;
858 done++;
859
860 if (done == work)
861 break;
862 }
863
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
866
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
871 */
872 smp_mb();
873
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
876
877 netif_tx_lock(bp->dev);
878
879 if (netif_queue_stopped(bp->dev) &&
da5a662a 880 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
883
884 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
885 }
886}
887
888static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889 union eth_rx_cqe *rr_cqe)
890{
891 struct bnx2x *bp = fp->bp;
892 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894
34f80b04 895 DP(BNX2X_MSG_SP,
a2fbb9ea 896 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
897 FP_IDX(fp), cid, command, bp->state,
898 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
899
900 bp->spq_left++;
901
34f80b04 902 if (FP_IDX(fp)) {
a2fbb9ea
ET
903 switch (command | fp->state) {
904 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905 BNX2X_FP_STATE_OPENING):
906 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
907 cid);
908 fp->state = BNX2X_FP_STATE_OPEN;
909 break;
910
911 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
913 cid);
914 fp->state = BNX2X_FP_STATE_HALTED;
915 break;
916
917 default:
34f80b04
EG
918 BNX2X_ERR("unexpected MC reply (%d) "
919 "fp->state is %x\n", command, fp->state);
920 break;
a2fbb9ea 921 }
34f80b04 922 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
923 return;
924 }
c14423fe 925
a2fbb9ea
ET
926 switch (command | bp->state) {
927 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929 bp->state = BNX2X_STATE_OPEN;
930 break;
931
932 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935 fp->state = BNX2X_FP_STATE_HALTED;
936 break;
937
a2fbb9ea 938 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 939 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 940 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
941 break;
942
943 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 944 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 945 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 946 bp->set_mac_pending = 0;
a2fbb9ea
ET
947 break;
948
49d66772 949 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 950 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
951 break;
952
a2fbb9ea 953 default:
34f80b04 954 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 955 command, bp->state);
34f80b04 956 break;
a2fbb9ea 957 }
34f80b04 958 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
959}
960
7a9b2557
VZ
961static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
962 struct bnx2x_fastpath *fp, u16 index)
963{
964 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
965 struct page *page = sw_buf->page;
966 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
967
968 /* Skip "next page" elements */
969 if (!page)
970 return;
971
972 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
973 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
974 __free_pages(page, PAGES_PER_SGE_SHIFT);
975
976 sw_buf->page = NULL;
977 sge->addr_hi = 0;
978 sge->addr_lo = 0;
979}
980
981static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
982 struct bnx2x_fastpath *fp, int last)
983{
984 int i;
985
986 for (i = 0; i < last; i++)
987 bnx2x_free_rx_sge(bp, fp, i);
988}
989
990static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
991 struct bnx2x_fastpath *fp, u16 index)
992{
993 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
994 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
995 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
996 dma_addr_t mapping;
997
998 if (unlikely(page == NULL))
999 return -ENOMEM;
1000
1001 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1002 PCI_DMA_FROMDEVICE);
8d8bb39b 1003 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1004 __free_pages(page, PAGES_PER_SGE_SHIFT);
1005 return -ENOMEM;
1006 }
1007
1008 sw_buf->page = page;
1009 pci_unmap_addr_set(sw_buf, mapping, mapping);
1010
1011 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1012 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1013
1014 return 0;
1015}
1016
a2fbb9ea
ET
1017static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1018 struct bnx2x_fastpath *fp, u16 index)
1019{
1020 struct sk_buff *skb;
1021 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1022 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1023 dma_addr_t mapping;
1024
1025 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1026 if (unlikely(skb == NULL))
1027 return -ENOMEM;
1028
1029 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1030 PCI_DMA_FROMDEVICE);
8d8bb39b 1031 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1032 dev_kfree_skb(skb);
1033 return -ENOMEM;
1034 }
1035
1036 rx_buf->skb = skb;
1037 pci_unmap_addr_set(rx_buf, mapping, mapping);
1038
1039 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1040 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1041
1042 return 0;
1043}
1044
1045/* note that we are not allocating a new skb,
1046 * we are just moving one from cons to prod
1047 * we are not creating a new mapping,
1048 * so there is no need to check for dma_mapping_error().
1049 */
1050static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1051 struct sk_buff *skb, u16 cons, u16 prod)
1052{
1053 struct bnx2x *bp = fp->bp;
1054 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1055 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1056 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1057 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1058
1059 pci_dma_sync_single_for_device(bp->pdev,
1060 pci_unmap_addr(cons_rx_buf, mapping),
1061 bp->rx_offset + RX_COPY_THRESH,
1062 PCI_DMA_FROMDEVICE);
1063
1064 prod_rx_buf->skb = cons_rx_buf->skb;
1065 pci_unmap_addr_set(prod_rx_buf, mapping,
1066 pci_unmap_addr(cons_rx_buf, mapping));
1067 *prod_bd = *cons_bd;
1068}
1069
7a9b2557
VZ
1070static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1071 u16 idx)
1072{
1073 u16 last_max = fp->last_max_sge;
1074
1075 if (SUB_S16(idx, last_max) > 0)
1076 fp->last_max_sge = idx;
1077}
1078
1079static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1080{
1081 int i, j;
1082
1083 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1084 int idx = RX_SGE_CNT * i - 1;
1085
1086 for (j = 0; j < 2; j++) {
1087 SGE_MASK_CLEAR_BIT(fp, idx);
1088 idx--;
1089 }
1090 }
1091}
1092
1093static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1094 struct eth_fast_path_rx_cqe *fp_cqe)
1095{
1096 struct bnx2x *bp = fp->bp;
1097 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1098 le16_to_cpu(fp_cqe->len_on_bd)) >>
1099 BCM_PAGE_SHIFT;
1100 u16 last_max, last_elem, first_elem;
1101 u16 delta = 0;
1102 u16 i;
1103
1104 if (!sge_len)
1105 return;
1106
1107 /* First mark all used pages */
1108 for (i = 0; i < sge_len; i++)
1109 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1110
1111 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1112 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1113
1114 /* Here we assume that the last SGE index is the biggest */
1115 prefetch((void *)(fp->sge_mask));
1116 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118 last_max = RX_SGE(fp->last_max_sge);
1119 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1120 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1121
1122 /* If ring is not full */
1123 if (last_elem + 1 != first_elem)
1124 last_elem++;
1125
1126 /* Now update the prod */
1127 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1128 if (likely(fp->sge_mask[i]))
1129 break;
1130
1131 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1132 delta += RX_SGE_MASK_ELEM_SZ;
1133 }
1134
1135 if (delta > 0) {
1136 fp->rx_sge_prod += delta;
1137 /* clear page-end entries */
1138 bnx2x_clear_sge_mask_next_elems(fp);
1139 }
1140
1141 DP(NETIF_MSG_RX_STATUS,
1142 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1143 fp->last_max_sge, fp->rx_sge_prod);
1144}
1145
1146static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1147{
1148 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1149 memset(fp->sge_mask, 0xff,
1150 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1151
1152 /* Clear the two last indeces in the page to 1:
1153 these are the indeces that correspond to the "next" element,
1154 hence will never be indicated and should be removed from
1155 the calculations. */
1156 bnx2x_clear_sge_mask_next_elems(fp);
1157}
1158
1159static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1160 struct sk_buff *skb, u16 cons, u16 prod)
1161{
1162 struct bnx2x *bp = fp->bp;
1163 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1164 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1165 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1166 dma_addr_t mapping;
1167
1168 /* move empty skb from pool to prod and map it */
1169 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1170 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1171 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1172 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1173
1174 /* move partial skb from cons to pool (don't unmap yet) */
1175 fp->tpa_pool[queue] = *cons_rx_buf;
1176
1177 /* mark bin state as start - print error if current state != stop */
1178 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1179 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1180
1181 fp->tpa_state[queue] = BNX2X_TPA_START;
1182
1183 /* point prod_bd to new skb */
1184 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1185 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1186
1187#ifdef BNX2X_STOP_ON_ERROR
1188 fp->tpa_queue_used |= (1 << queue);
1189#ifdef __powerpc64__
1190 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1191#else
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1193#endif
1194 fp->tpa_queue_used);
1195#endif
1196}
1197
1198static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1199 struct sk_buff *skb,
1200 struct eth_fast_path_rx_cqe *fp_cqe,
1201 u16 cqe_idx)
1202{
1203 struct sw_rx_page *rx_pg, old_rx_pg;
1204 struct page *sge;
1205 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1206 u32 i, frag_len, frag_size, pages;
1207 int err;
1208 int j;
1209
1210 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1211 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1212
1213 /* This is needed in order to enable forwarding support */
1214 if (frag_size)
1215 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1216 max(frag_size, (u32)len_on_bd));
1217
1218#ifdef BNX2X_STOP_ON_ERROR
1219 if (pages > 8*PAGES_PER_SGE) {
1220 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1221 pages, cqe_idx);
1222 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1223 fp_cqe->pkt_len, len_on_bd);
1224 bnx2x_panic();
1225 return -EINVAL;
1226 }
1227#endif
1228
1229 /* Run through the SGL and compose the fragmented skb */
1230 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1231 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1232
1233 /* FW gives the indices of the SGE as if the ring is an array
1234 (meaning that "next" element will consume 2 indices) */
1235 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1236 rx_pg = &fp->rx_page_ring[sge_idx];
1237 sge = rx_pg->page;
1238 old_rx_pg = *rx_pg;
1239
1240 /* If we fail to allocate a substitute page, we simply stop
1241 where we are and drop the whole packet */
1242 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1243 if (unlikely(err)) {
66e855f3 1244 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1245 return err;
1246 }
1247
1248 /* Unmap the page as we r going to pass it to the stack */
1249 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1250 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1251
1252 /* Add one frag and update the appropriate fields in the skb */
1253 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1254
1255 skb->data_len += frag_len;
1256 skb->truesize += frag_len;
1257 skb->len += frag_len;
1258
1259 frag_size -= frag_len;
1260 }
1261
1262 return 0;
1263}
1264
1265static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1266 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1267 u16 cqe_idx)
1268{
1269 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1270 struct sk_buff *skb = rx_buf->skb;
1271 /* alloc new skb */
1272 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1273
1274 /* Unmap skb in the pool anyway, as we are going to change
1275 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1276 fails. */
1277 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1278 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1279
7a9b2557 1280 if (likely(new_skb)) {
66e855f3
YG
1281 /* fix ip xsum and give it to the stack */
1282 /* (no need to map the new skb) */
7a9b2557
VZ
1283
1284 prefetch(skb);
1285 prefetch(((char *)(skb)) + 128);
1286
7a9b2557
VZ
1287#ifdef BNX2X_STOP_ON_ERROR
1288 if (pad + len > bp->rx_buf_size) {
1289 BNX2X_ERR("skb_put is about to fail... "
1290 "pad %d len %d rx_buf_size %d\n",
1291 pad, len, bp->rx_buf_size);
1292 bnx2x_panic();
1293 return;
1294 }
1295#endif
1296
1297 skb_reserve(skb, pad);
1298 skb_put(skb, len);
1299
1300 skb->protocol = eth_type_trans(skb, bp->dev);
1301 skb->ip_summed = CHECKSUM_UNNECESSARY;
1302
1303 {
1304 struct iphdr *iph;
1305
1306 iph = (struct iphdr *)skb->data;
1307 iph->check = 0;
1308 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1309 }
1310
1311 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1312 &cqe->fast_path_cqe, cqe_idx)) {
1313#ifdef BCM_VLAN
1314 if ((bp->vlgrp != NULL) &&
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN))
1317 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1318 le16_to_cpu(cqe->fast_path_cqe.
1319 vlan_tag));
1320 else
1321#endif
1322 netif_receive_skb(skb);
1323 } else {
1324 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1325 " - dropping packet!\n");
1326 dev_kfree_skb(skb);
1327 }
1328
1329 bp->dev->last_rx = jiffies;
1330
1331 /* put new skb in bin */
1332 fp->tpa_pool[queue].skb = new_skb;
1333
1334 } else {
66e855f3 1335 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1336 DP(NETIF_MSG_RX_STATUS,
1337 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1338 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1339 }
1340
1341 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1342}
1343
1344static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1345 struct bnx2x_fastpath *fp,
1346 u16 bd_prod, u16 rx_comp_prod,
1347 u16 rx_sge_prod)
1348{
1349 struct tstorm_eth_rx_producers rx_prods = {0};
1350 int i;
1351
1352 /* Update producers */
1353 rx_prods.bd_prod = bd_prod;
1354 rx_prods.cqe_prod = rx_comp_prod;
1355 rx_prods.sge_prod = rx_sge_prod;
1356
1357 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1358 REG_WR(bp, BAR_TSTRORM_INTMEM +
1359 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1360 ((u32 *)&rx_prods)[i]);
1361
1362 DP(NETIF_MSG_RX_STATUS,
1363 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1364 bd_prod, rx_comp_prod, rx_sge_prod);
1365}
1366
a2fbb9ea
ET
1367static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1368{
1369 struct bnx2x *bp = fp->bp;
34f80b04 1370 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1371 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1372 int rx_pkt = 0;
7a9b2557 1373 u16 queue;
a2fbb9ea
ET
1374
1375#ifdef BNX2X_STOP_ON_ERROR
1376 if (unlikely(bp->panic))
1377 return 0;
1378#endif
1379
34f80b04
EG
1380 /* CQ "next element" is of the size of the regular element,
1381 that's why it's ok here */
a2fbb9ea
ET
1382 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384 hw_comp_cons++;
1385
1386 bd_cons = fp->rx_bd_cons;
1387 bd_prod = fp->rx_bd_prod;
34f80b04 1388 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1389 sw_comp_cons = fp->rx_comp_cons;
1390 sw_comp_prod = fp->rx_comp_prod;
1391
1392 /* Memory barrier necessary as speculative reads of the rx
1393 * buffer can be ahead of the index in the status block
1394 */
1395 rmb();
1396
1397 DP(NETIF_MSG_RX_STATUS,
1398 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1399 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1400
1401 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1402 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1403 struct sk_buff *skb;
1404 union eth_rx_cqe *cqe;
34f80b04
EG
1405 u8 cqe_fp_flags;
1406 u16 len, pad;
a2fbb9ea
ET
1407
1408 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409 bd_prod = RX_BD(bd_prod);
1410 bd_cons = RX_BD(bd_cons);
1411
1412 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1413 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1414
a2fbb9ea 1415 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1416 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1417 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1418 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1419 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1421
1422 /* is this a slowpath msg? */
34f80b04 1423 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1424 bnx2x_sp_event(fp, cqe);
1425 goto next_cqe;
1426
1427 /* this is an rx packet */
1428 } else {
1429 rx_buf = &fp->rx_buf_ring[bd_cons];
1430 skb = rx_buf->skb;
a2fbb9ea
ET
1431 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432 pad = cqe->fast_path_cqe.placement_offset;
1433
7a9b2557
VZ
1434 /* If CQE is marked both TPA_START and TPA_END
1435 it is a non-TPA CQE */
1436 if ((!fp->disable_tpa) &&
1437 (TPA_TYPE(cqe_fp_flags) !=
1438 (TPA_TYPE_START | TPA_TYPE_END))) {
1439 queue = cqe->fast_path_cqe.queue_index;
1440
1441 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442 DP(NETIF_MSG_RX_STATUS,
1443 "calling tpa_start on queue %d\n",
1444 queue);
1445
1446 bnx2x_tpa_start(fp, queue, skb,
1447 bd_cons, bd_prod);
1448 goto next_rx;
1449 }
1450
1451 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452 DP(NETIF_MSG_RX_STATUS,
1453 "calling tpa_stop on queue %d\n",
1454 queue);
1455
1456 if (!BNX2X_RX_SUM_FIX(cqe))
1457 BNX2X_ERR("STOP on none TCP "
1458 "data\n");
1459
1460 /* This is a size of the linear data
1461 on this skb */
1462 len = le16_to_cpu(cqe->fast_path_cqe.
1463 len_on_bd);
1464 bnx2x_tpa_stop(bp, fp, queue, pad,
1465 len, cqe, comp_ring_cons);
1466#ifdef BNX2X_STOP_ON_ERROR
1467 if (bp->panic)
1468 return -EINVAL;
1469#endif
1470
1471 bnx2x_update_sge_prod(fp,
1472 &cqe->fast_path_cqe);
1473 goto next_cqe;
1474 }
1475 }
1476
a2fbb9ea
ET
1477 pci_dma_sync_single_for_device(bp->pdev,
1478 pci_unmap_addr(rx_buf, mapping),
1479 pad + RX_COPY_THRESH,
1480 PCI_DMA_FROMDEVICE);
1481 prefetch(skb);
1482 prefetch(((char *)(skb)) + 128);
1483
1484 /* is this an error packet? */
34f80b04 1485 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1486 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1487 "ERROR flags %x rx packet %u\n",
1488 cqe_fp_flags, sw_comp_cons);
66e855f3 1489 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1490 goto reuse_rx;
1491 }
1492
1493 /* Since we don't have a jumbo ring
1494 * copy small packets if mtu > 1500
1495 */
1496 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497 (len <= RX_COPY_THRESH)) {
1498 struct sk_buff *new_skb;
1499
1500 new_skb = netdev_alloc_skb(bp->dev,
1501 len + pad);
1502 if (new_skb == NULL) {
1503 DP(NETIF_MSG_RX_ERR,
34f80b04 1504 "ERROR packet dropped "
a2fbb9ea 1505 "because of alloc failure\n");
66e855f3 1506 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1507 goto reuse_rx;
1508 }
1509
1510 /* aligned copy */
1511 skb_copy_from_linear_data_offset(skb, pad,
1512 new_skb->data + pad, len);
1513 skb_reserve(new_skb, pad);
1514 skb_put(new_skb, len);
1515
1516 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518 skb = new_skb;
1519
1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521 pci_unmap_single(bp->pdev,
1522 pci_unmap_addr(rx_buf, mapping),
1523 bp->rx_buf_use_size,
1524 PCI_DMA_FROMDEVICE);
1525 skb_reserve(skb, pad);
1526 skb_put(skb, len);
1527
1528 } else {
1529 DP(NETIF_MSG_RX_ERR,
34f80b04 1530 "ERROR packet dropped because "
a2fbb9ea 1531 "of alloc failure\n");
66e855f3 1532 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1533reuse_rx:
1534 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535 goto next_rx;
1536 }
1537
1538 skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1541 if (bp->rx_csum) {
1adcd8be
EG
1542 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1544 else
1545 bp->eth_stats.hw_csum_err++;
1546 }
a2fbb9ea
ET
1547 }
1548
1549#ifdef BCM_VLAN
34f80b04
EG
1550 if ((bp->vlgrp != NULL) &&
1551 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1553 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555 else
1556#endif
34f80b04 1557 netif_receive_skb(skb);
a2fbb9ea
ET
1558
1559 bp->dev->last_rx = jiffies;
1560
1561next_rx:
1562 rx_buf->skb = NULL;
1563
1564 bd_cons = NEXT_RX_IDX(bd_cons);
1565 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1566 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1567 rx_pkt++;
a2fbb9ea
ET
1568next_cqe:
1569 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1571
34f80b04 1572 if (rx_pkt == budget)
a2fbb9ea
ET
1573 break;
1574 } /* while */
1575
1576 fp->rx_bd_cons = bd_cons;
34f80b04 1577 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1578 fp->rx_comp_cons = sw_comp_cons;
1579 fp->rx_comp_prod = sw_comp_prod;
1580
7a9b2557
VZ
1581 /* Update producers */
1582 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1583 fp->rx_sge_prod);
a2fbb9ea
ET
1584 mmiowb(); /* keep prod updates ordered */
1585
1586 fp->rx_pkt += rx_pkt;
1587 fp->rx_calls++;
1588
1589 return rx_pkt;
1590}
1591
1592static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1593{
1594 struct bnx2x_fastpath *fp = fp_cookie;
1595 struct bnx2x *bp = fp->bp;
1596 struct net_device *dev = bp->dev;
34f80b04 1597 int index = FP_IDX(fp);
a2fbb9ea 1598
da5a662a
VZ
1599 /* Return here if interrupt is disabled */
1600 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602 return IRQ_HANDLED;
1603 }
1604
34f80b04
EG
1605 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606 index, FP_SB_ID(fp));
1607 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1608
1609#ifdef BNX2X_STOP_ON_ERROR
1610 if (unlikely(bp->panic))
1611 return IRQ_HANDLED;
1612#endif
1613
1614 prefetch(fp->rx_cons_sb);
1615 prefetch(fp->tx_cons_sb);
1616 prefetch(&fp->status_blk->c_status_block.status_block_index);
1617 prefetch(&fp->status_blk->u_status_block.status_block_index);
1618
1619 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1620
a2fbb9ea
ET
1621 return IRQ_HANDLED;
1622}
1623
1624static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1625{
1626 struct net_device *dev = dev_instance;
1627 struct bnx2x *bp = netdev_priv(dev);
1628 u16 status = bnx2x_ack_int(bp);
34f80b04 1629 u16 mask;
a2fbb9ea 1630
34f80b04 1631 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1632 if (unlikely(status == 0)) {
1633 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1634 return IRQ_NONE;
1635 }
34f80b04 1636 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1637
1638#ifdef BNX2X_STOP_ON_ERROR
1639 if (unlikely(bp->panic))
1640 return IRQ_HANDLED;
1641#endif
1642
34f80b04 1643 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1644 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1645 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1646 return IRQ_HANDLED;
1647 }
1648
34f80b04
EG
1649 mask = 0x2 << bp->fp[0].sb_id;
1650 if (status & mask) {
a2fbb9ea
ET
1651 struct bnx2x_fastpath *fp = &bp->fp[0];
1652
1653 prefetch(fp->rx_cons_sb);
1654 prefetch(fp->tx_cons_sb);
1655 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1659
34f80b04 1660 status &= ~mask;
a2fbb9ea
ET
1661 }
1662
a2fbb9ea 1663
34f80b04 1664 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1665 schedule_work(&bp->sp_task);
1666
1667 status &= ~0x1;
1668 if (!status)
1669 return IRQ_HANDLED;
1670 }
1671
34f80b04
EG
1672 if (status)
1673 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1674 status);
a2fbb9ea 1675
c18487ee 1676 return IRQ_HANDLED;
a2fbb9ea
ET
1677}
1678
c18487ee 1679/* end of fast path */
a2fbb9ea 1680
bb2a0f7a 1681static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1682
c18487ee
YR
1683/* Link */
1684
1685/*
1686 * General service functions
1687 */
a2fbb9ea 1688
4a37fb66 1689static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1690{
1691 u32 lock_status;
1692 u32 resource_bit = (1 << resource);
4a37fb66
YG
1693 int func = BP_FUNC(bp);
1694 u32 hw_lock_control_reg;
c18487ee 1695 int cnt;
a2fbb9ea 1696
c18487ee
YR
1697 /* Validating that the resource is within range */
1698 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1699 DP(NETIF_MSG_HW,
1700 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1702 return -EINVAL;
1703 }
a2fbb9ea 1704
4a37fb66
YG
1705 if (func <= 5) {
1706 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707 } else {
1708 hw_lock_control_reg =
1709 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710 }
1711
c18487ee 1712 /* Validating that the resource is not already taken */
4a37fb66 1713 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1714 if (lock_status & resource_bit) {
1715 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1716 lock_status, resource_bit);
1717 return -EEXIST;
1718 }
a2fbb9ea 1719
c18487ee
YR
1720 /* Try for 1 second every 5ms */
1721 for (cnt = 0; cnt < 200; cnt++) {
1722 /* Try to acquire the lock */
4a37fb66
YG
1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1725 if (lock_status & resource_bit)
1726 return 0;
a2fbb9ea 1727
c18487ee 1728 msleep(5);
a2fbb9ea 1729 }
c18487ee
YR
1730 DP(NETIF_MSG_HW, "Timeout\n");
1731 return -EAGAIN;
1732}
a2fbb9ea 1733
4a37fb66 1734static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1735{
1736 u32 lock_status;
1737 u32 resource_bit = (1 << resource);
4a37fb66
YG
1738 int func = BP_FUNC(bp);
1739 u32 hw_lock_control_reg;
a2fbb9ea 1740
c18487ee
YR
1741 /* Validating that the resource is within range */
1742 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743 DP(NETIF_MSG_HW,
1744 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746 return -EINVAL;
1747 }
1748
4a37fb66
YG
1749 if (func <= 5) {
1750 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751 } else {
1752 hw_lock_control_reg =
1753 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754 }
1755
c18487ee 1756 /* Validating that the resource is currently taken */
4a37fb66 1757 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1758 if (!(lock_status & resource_bit)) {
1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1760 lock_status, resource_bit);
1761 return -EFAULT;
a2fbb9ea
ET
1762 }
1763
4a37fb66 1764 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1765 return 0;
1766}
1767
1768/* HW Lock for shared dual port PHYs */
4a37fb66 1769static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1770{
1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1772
34f80b04 1773 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1774
c18487ee
YR
1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1777 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1778}
a2fbb9ea 1779
4a37fb66 1780static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1781{
1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1783
c18487ee
YR
1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1786 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1787
34f80b04 1788 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1789}
a2fbb9ea 1790
c18487ee
YR
1791int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1792{
1793 /* The GPIO should be swapped if swap register is set and active */
1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
34f80b04 1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
c18487ee
YR
1796 int gpio_shift = gpio_num +
1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798 u32 gpio_mask = (1 << gpio_shift);
1799 u32 gpio_reg;
a2fbb9ea 1800
c18487ee
YR
1801 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803 return -EINVAL;
1804 }
a2fbb9ea 1805
4a37fb66 1806 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1807 /* read GPIO and mask except the float bits */
1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1809
c18487ee
YR
1810 switch (mode) {
1811 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813 gpio_num, gpio_shift);
1814 /* clear FLOAT and set CLR */
1815 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817 break;
a2fbb9ea 1818
c18487ee
YR
1819 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821 gpio_num, gpio_shift);
1822 /* clear FLOAT and set SET */
1823 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825 break;
a2fbb9ea 1826
c18487ee
YR
1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829 gpio_num, gpio_shift);
1830 /* set FLOAT */
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832 break;
a2fbb9ea 1833
c18487ee
YR
1834 default:
1835 break;
a2fbb9ea
ET
1836 }
1837
c18487ee 1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1839 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1840
c18487ee 1841 return 0;
a2fbb9ea
ET
1842}
1843
c18487ee 1844static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1845{
c18487ee
YR
1846 u32 spio_mask = (1 << spio_num);
1847 u32 spio_reg;
a2fbb9ea 1848
c18487ee
YR
1849 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850 (spio_num > MISC_REGISTERS_SPIO_7)) {
1851 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852 return -EINVAL;
a2fbb9ea
ET
1853 }
1854
4a37fb66 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1856 /* read SPIO and mask except the float bits */
1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1858
c18487ee
YR
1859 switch (mode) {
1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862 /* clear FLOAT and set CLR */
1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865 break;
a2fbb9ea 1866
c18487ee
YR
1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869 /* clear FLOAT and set SET */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872 break;
a2fbb9ea 1873
c18487ee
YR
1874 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876 /* set FLOAT */
1877 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878 break;
a2fbb9ea 1879
c18487ee
YR
1880 default:
1881 break;
a2fbb9ea
ET
1882 }
1883
c18487ee 1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1886
a2fbb9ea
ET
1887 return 0;
1888}
1889
c18487ee 1890static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1891{
c18487ee
YR
1892 switch (bp->link_vars.ieee_fc) {
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1894 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1895 ADVERTISED_Pause);
1896 break;
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1898 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1899 ADVERTISED_Pause);
1900 break;
1901 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1902 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1903 break;
1904 default:
34f80b04 1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 }
1909}
f1410647 1910
c18487ee
YR
1911static void bnx2x_link_report(struct bnx2x *bp)
1912{
1913 if (bp->link_vars.link_up) {
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_carrier_on(bp->dev);
1916 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1917
c18487ee 1918 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1919
c18487ee
YR
1920 if (bp->link_vars.duplex == DUPLEX_FULL)
1921 printk("full duplex");
1922 else
1923 printk("half duplex");
f1410647 1924
c18487ee
YR
1925 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927 printk(", receive ");
1928 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929 printk("& transmit ");
1930 } else {
1931 printk(", transmit ");
1932 }
1933 printk("flow control ON");
1934 }
1935 printk("\n");
f1410647 1936
c18487ee
YR
1937 } else { /* link_down */
1938 netif_carrier_off(bp->dev);
1939 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1940 }
c18487ee
YR
1941}
1942
1943static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944{
19680c48
EG
1945 if (!BP_NOMCP(bp)) {
1946 u8 rc;
a2fbb9ea 1947
19680c48
EG
1948 /* Initialize link parameters structure variables */
1949 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1950
4a37fb66 1951 bnx2x_acquire_phy_lock(bp);
19680c48 1952 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1953 bnx2x_release_phy_lock(bp);
a2fbb9ea 1954
19680c48
EG
1955 if (bp->link_vars.link_up)
1956 bnx2x_link_report(bp);
a2fbb9ea 1957
19680c48 1958 bnx2x_calc_fc_adv(bp);
34f80b04 1959
19680c48
EG
1960 return rc;
1961 }
1962 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1963 return -EINVAL;
a2fbb9ea
ET
1964}
1965
c18487ee 1966static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1967{
19680c48 1968 if (!BP_NOMCP(bp)) {
4a37fb66 1969 bnx2x_acquire_phy_lock(bp);
19680c48 1970 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1971 bnx2x_release_phy_lock(bp);
a2fbb9ea 1972
19680c48
EG
1973 bnx2x_calc_fc_adv(bp);
1974 } else
1975 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1976}
a2fbb9ea 1977
c18487ee
YR
1978static void bnx2x__link_reset(struct bnx2x *bp)
1979{
19680c48 1980 if (!BP_NOMCP(bp)) {
4a37fb66 1981 bnx2x_acquire_phy_lock(bp);
19680c48 1982 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 1983 bnx2x_release_phy_lock(bp);
19680c48
EG
1984 } else
1985 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1986}
a2fbb9ea 1987
c18487ee
YR
1988static u8 bnx2x_link_test(struct bnx2x *bp)
1989{
1990 u8 rc;
a2fbb9ea 1991
4a37fb66 1992 bnx2x_acquire_phy_lock(bp);
c18487ee 1993 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 1994 bnx2x_release_phy_lock(bp);
a2fbb9ea 1995
c18487ee
YR
1996 return rc;
1997}
a2fbb9ea 1998
34f80b04
EG
1999/* Calculates the sum of vn_min_rates.
2000 It's needed for further normalizing of the min_rates.
2001
2002 Returns:
2003 sum of vn_min_rates
2004 or
2005 0 - if all the min_rates are 0.
2006 In the later case fainess algorithm should be deactivated.
2007 If not all min_rates are zero then those that are zeroes will
2008 be set to 1.
2009 */
2010static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2011{
2012 int i, port = BP_PORT(bp);
2013 u32 wsum = 0;
2014 int all_zero = 1;
2015
2016 for (i = 0; i < E1HVN_MAX; i++) {
2017 u32 vn_cfg =
2018 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2019 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2020 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2021 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2022 /* If min rate is zero - set it to 1 */
2023 if (!vn_min_rate)
2024 vn_min_rate = DEF_MIN_RATE;
2025 else
2026 all_zero = 0;
2027
2028 wsum += vn_min_rate;
2029 }
2030 }
2031
2032 /* ... only if all min rates are zeros - disable FAIRNESS */
2033 if (all_zero)
2034 return 0;
2035
2036 return wsum;
2037}
2038
2039static void bnx2x_init_port_minmax(struct bnx2x *bp,
2040 int en_fness,
2041 u16 port_rate,
2042 struct cmng_struct_per_port *m_cmng_port)
2043{
2044 u32 r_param = port_rate / 8;
2045 int port = BP_PORT(bp);
2046 int i;
2047
2048 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2049
2050 /* Enable minmax only if we are in e1hmf mode */
2051 if (IS_E1HMF(bp)) {
2052 u32 fair_periodic_timeout_usec;
2053 u32 t_fair;
2054
2055 /* Enable rate shaping and fairness */
2056 m_cmng_port->flags.cmng_vn_enable = 1;
2057 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2058 m_cmng_port->flags.rate_shaping_enable = 1;
2059
2060 if (!en_fness)
2061 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2062 " fairness will be disabled\n");
2063
2064 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2065 m_cmng_port->rs_vars.rs_periodic_timeout =
2066 RS_PERIODIC_TIMEOUT_USEC / 4;
2067
2068 /* this is the threshold below which no timer arming will occur
2069 1.25 coefficient is for the threshold to be a little bigger
2070 than the real time, to compensate for timer in-accuracy */
2071 m_cmng_port->rs_vars.rs_threshold =
2072 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2073
2074 /* resolution of fairness timer */
2075 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2076 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2077 t_fair = T_FAIR_COEF / port_rate;
2078
2079 /* this is the threshold below which we won't arm
2080 the timer anymore */
2081 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2082
2083 /* we multiply by 1e3/8 to get bytes/msec.
2084 We don't want the credits to pass a credit
2085 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2086 m_cmng_port->fair_vars.upper_bound =
2087 r_param * t_fair * FAIR_MEM;
2088 /* since each tick is 4 usec */
2089 m_cmng_port->fair_vars.fairness_timeout =
2090 fair_periodic_timeout_usec / 4;
2091
2092 } else {
2093 /* Disable rate shaping and fairness */
2094 m_cmng_port->flags.cmng_vn_enable = 0;
2095 m_cmng_port->flags.fairness_enable = 0;
2096 m_cmng_port->flags.rate_shaping_enable = 0;
2097
2098 DP(NETIF_MSG_IFUP,
2099 "Single function mode minmax will be disabled\n");
2100 }
2101
2102 /* Store it to internal memory */
2103 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2104 REG_WR(bp, BAR_XSTRORM_INTMEM +
2105 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2106 ((u32 *)(m_cmng_port))[i]);
2107}
2108
2109static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2110 u32 wsum, u16 port_rate,
2111 struct cmng_struct_per_port *m_cmng_port)
2112{
2113 struct rate_shaping_vars_per_vn m_rs_vn;
2114 struct fairness_vars_per_vn m_fair_vn;
2115 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2116 u16 vn_min_rate, vn_max_rate;
2117 int i;
2118
2119 /* If function is hidden - set min and max to zeroes */
2120 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2121 vn_min_rate = 0;
2122 vn_max_rate = 0;
2123
2124 } else {
2125 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2126 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2127 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2128 if current min rate is zero - set it to 1.
2129 This is a requirment of the algorithm. */
2130 if ((vn_min_rate == 0) && wsum)
2131 vn_min_rate = DEF_MIN_RATE;
2132 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2133 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2134 }
2135
2136 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2137 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2138
2139 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2140 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2141
2142 /* global vn counter - maximal Mbps for this vn */
2143 m_rs_vn.vn_counter.rate = vn_max_rate;
2144
2145 /* quota - number of bytes transmitted in this period */
2146 m_rs_vn.vn_counter.quota =
2147 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2148
2149#ifdef BNX2X_PER_PROT_QOS
2150 /* per protocol counter */
2151 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2152 /* maximal Mbps for this protocol */
2153 m_rs_vn.protocol_counters[protocol].rate =
2154 protocol_max_rate[protocol];
2155 /* the quota in each timer period -
2156 number of bytes transmitted in this period */
2157 m_rs_vn.protocol_counters[protocol].quota =
2158 (u32)(rs_periodic_timeout_usec *
2159 ((double)m_rs_vn.
2160 protocol_counters[protocol].rate/8));
2161 }
2162#endif
2163
2164 if (wsum) {
2165 /* credit for each period of the fairness algorithm:
2166 number of bytes in T_FAIR (the vn share the port rate).
2167 wsum should not be larger than 10000, thus
2168 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2169 m_fair_vn.vn_credit_delta =
2170 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2171 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2172 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2173 m_fair_vn.vn_credit_delta);
2174 }
2175
2176#ifdef BNX2X_PER_PROT_QOS
2177 do {
2178 u32 protocolWeightSum = 0;
2179
2180 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2181 protocolWeightSum +=
2182 drvInit.protocol_min_rate[protocol];
2183 /* per protocol counter -
2184 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2185 if (protocolWeightSum > 0) {
2186 for (protocol = 0;
2187 protocol < NUM_OF_PROTOCOLS; protocol++)
2188 /* credit for each period of the
2189 fairness algorithm - number of bytes in
2190 T_FAIR (the protocol share the vn rate) */
2191 m_fair_vn.protocol_credit_delta[protocol] =
2192 (u32)((vn_min_rate / 8) * t_fair *
2193 protocol_min_rate / protocolWeightSum);
2194 }
2195 } while (0);
2196#endif
2197
2198 /* Store it to internal memory */
2199 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2200 REG_WR(bp, BAR_XSTRORM_INTMEM +
2201 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2202 ((u32 *)(&m_rs_vn))[i]);
2203
2204 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2205 REG_WR(bp, BAR_XSTRORM_INTMEM +
2206 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2207 ((u32 *)(&m_fair_vn))[i]);
2208}
2209
c18487ee
YR
2210/* This function is called upon link interrupt */
2211static void bnx2x_link_attn(struct bnx2x *bp)
2212{
34f80b04
EG
2213 int vn;
2214
bb2a0f7a
YG
2215 /* Make sure that we are synced with the current statistics */
2216 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2217
4a37fb66 2218 bnx2x_acquire_phy_lock(bp);
c18487ee 2219 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2220 bnx2x_release_phy_lock(bp);
a2fbb9ea 2221
bb2a0f7a
YG
2222 if (bp->link_vars.link_up) {
2223
2224 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2225 struct host_port_stats *pstats;
2226
2227 pstats = bnx2x_sp(bp, port_stats);
2228 /* reset old bmac stats */
2229 memset(&(pstats->mac_stx[0]), 0,
2230 sizeof(struct mac_stx));
2231 }
2232 if ((bp->state == BNX2X_STATE_OPEN) ||
2233 (bp->state == BNX2X_STATE_DISABLED))
2234 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2235 }
2236
c18487ee
YR
2237 /* indicate link status */
2238 bnx2x_link_report(bp);
34f80b04
EG
2239
2240 if (IS_E1HMF(bp)) {
2241 int func;
2242
2243 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2244 if (vn == BP_E1HVN(bp))
2245 continue;
2246
2247 func = ((vn << 1) | BP_PORT(bp));
2248
2249 /* Set the attention towards other drivers
2250 on the same port */
2251 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2252 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2253 }
2254 }
2255
2256 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2257 struct cmng_struct_per_port m_cmng_port;
2258 u32 wsum;
2259 int port = BP_PORT(bp);
2260
2261 /* Init RATE SHAPING and FAIRNESS contexts */
2262 wsum = bnx2x_calc_vn_wsum(bp);
2263 bnx2x_init_port_minmax(bp, (int)wsum,
2264 bp->link_vars.line_speed,
2265 &m_cmng_port);
2266 if (IS_E1HMF(bp))
2267 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2268 bnx2x_init_vn_minmax(bp, 2*vn + port,
2269 wsum, bp->link_vars.line_speed,
2270 &m_cmng_port);
2271 }
c18487ee 2272}
a2fbb9ea 2273
c18487ee
YR
2274static void bnx2x__link_status_update(struct bnx2x *bp)
2275{
2276 if (bp->state != BNX2X_STATE_OPEN)
2277 return;
a2fbb9ea 2278
c18487ee 2279 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2280
bb2a0f7a
YG
2281 if (bp->link_vars.link_up)
2282 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2283 else
2284 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2285
c18487ee
YR
2286 /* indicate link status */
2287 bnx2x_link_report(bp);
a2fbb9ea 2288}
a2fbb9ea 2289
34f80b04
EG
2290static void bnx2x_pmf_update(struct bnx2x *bp)
2291{
2292 int port = BP_PORT(bp);
2293 u32 val;
2294
2295 bp->port.pmf = 1;
2296 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2297
2298 /* enable nig attention */
2299 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2300 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2301 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2302
2303 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2304}
2305
c18487ee 2306/* end of Link */
a2fbb9ea
ET
2307
2308/* slow path */
2309
2310/*
2311 * General service functions
2312 */
2313
2314/* the slow path queue is odd since completions arrive on the fastpath ring */
2315static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2316 u32 data_hi, u32 data_lo, int common)
2317{
34f80b04 2318 int func = BP_FUNC(bp);
a2fbb9ea 2319
34f80b04
EG
2320 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2321 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2322 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2323 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2324 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2325
2326#ifdef BNX2X_STOP_ON_ERROR
2327 if (unlikely(bp->panic))
2328 return -EIO;
2329#endif
2330
34f80b04 2331 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2332
2333 if (!bp->spq_left) {
2334 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2335 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2336 bnx2x_panic();
2337 return -EBUSY;
2338 }
f1410647 2339
a2fbb9ea
ET
2340 /* CID needs port number to be encoded int it */
2341 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2342 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2343 HW_CID(bp, cid)));
2344 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2345 if (common)
2346 bp->spq_prod_bd->hdr.type |=
2347 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2348
2349 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2350 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2351
2352 bp->spq_left--;
2353
2354 if (bp->spq_prod_bd == bp->spq_last_bd) {
2355 bp->spq_prod_bd = bp->spq;
2356 bp->spq_prod_idx = 0;
2357 DP(NETIF_MSG_TIMER, "end of spq\n");
2358
2359 } else {
2360 bp->spq_prod_bd++;
2361 bp->spq_prod_idx++;
2362 }
2363
34f80b04 2364 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2365 bp->spq_prod_idx);
2366
34f80b04 2367 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2368 return 0;
2369}
2370
2371/* acquire split MCP access lock register */
4a37fb66 2372static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2373{
a2fbb9ea 2374 u32 i, j, val;
34f80b04 2375 int rc = 0;
a2fbb9ea
ET
2376
2377 might_sleep();
2378 i = 100;
2379 for (j = 0; j < i*10; j++) {
2380 val = (1UL << 31);
2381 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2382 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2383 if (val & (1L << 31))
2384 break;
2385
2386 msleep(5);
2387 }
a2fbb9ea 2388 if (!(val & (1L << 31))) {
19680c48 2389 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2390 rc = -EBUSY;
2391 }
2392
2393 return rc;
2394}
2395
4a37fb66
YG
2396/* release split MCP access lock register */
2397static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2398{
2399 u32 val = 0;
2400
2401 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2402}
2403
2404static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2405{
2406 struct host_def_status_block *def_sb = bp->def_status_blk;
2407 u16 rc = 0;
2408
2409 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2410 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2411 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2412 rc |= 1;
2413 }
2414 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2415 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2416 rc |= 2;
2417 }
2418 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2419 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2420 rc |= 4;
2421 }
2422 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2423 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2424 rc |= 8;
2425 }
2426 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2427 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2428 rc |= 16;
2429 }
2430 return rc;
2431}
2432
2433/*
2434 * slow path service functions
2435 */
2436
2437static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2438{
34f80b04 2439 int port = BP_PORT(bp);
5c862848
EG
2440 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2441 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2442 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2443 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2444 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2445 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2446 u32 aeu_mask;
a2fbb9ea 2447
a2fbb9ea
ET
2448 if (bp->attn_state & asserted)
2449 BNX2X_ERR("IGU ERROR\n");
2450
3fcaf2e5
EG
2451 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2452 aeu_mask = REG_RD(bp, aeu_addr);
2453
a2fbb9ea 2454 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2455 aeu_mask, asserted);
2456 aeu_mask &= ~(asserted & 0xff);
2457 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2458
3fcaf2e5
EG
2459 REG_WR(bp, aeu_addr, aeu_mask);
2460 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2461
3fcaf2e5 2462 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2463 bp->attn_state |= asserted;
3fcaf2e5 2464 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2465
2466 if (asserted & ATTN_HARD_WIRED_MASK) {
2467 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2468
877e9aa4
ET
2469 /* save nig interrupt mask */
2470 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2471 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2472
c18487ee 2473 bnx2x_link_attn(bp);
a2fbb9ea
ET
2474
2475 /* handle unicore attn? */
2476 }
2477 if (asserted & ATTN_SW_TIMER_4_FUNC)
2478 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2479
2480 if (asserted & GPIO_2_FUNC)
2481 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2482
2483 if (asserted & GPIO_3_FUNC)
2484 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2485
2486 if (asserted & GPIO_4_FUNC)
2487 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2488
2489 if (port == 0) {
2490 if (asserted & ATTN_GENERAL_ATTN_1) {
2491 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2493 }
2494 if (asserted & ATTN_GENERAL_ATTN_2) {
2495 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2496 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2497 }
2498 if (asserted & ATTN_GENERAL_ATTN_3) {
2499 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2500 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2501 }
2502 } else {
2503 if (asserted & ATTN_GENERAL_ATTN_4) {
2504 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2505 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2506 }
2507 if (asserted & ATTN_GENERAL_ATTN_5) {
2508 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2509 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2510 }
2511 if (asserted & ATTN_GENERAL_ATTN_6) {
2512 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2513 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2514 }
2515 }
2516
2517 } /* if hardwired */
2518
5c862848
EG
2519 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2520 asserted, hc_addr);
2521 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2522
2523 /* now set back the mask */
2524 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2525 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2526}
2527
877e9aa4 2528static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2529{
34f80b04 2530 int port = BP_PORT(bp);
877e9aa4
ET
2531 int reg_offset;
2532 u32 val;
2533
34f80b04
EG
2534 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2535 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2536
34f80b04 2537 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2538
2539 val = REG_RD(bp, reg_offset);
2540 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2541 REG_WR(bp, reg_offset, val);
2542
2543 BNX2X_ERR("SPIO5 hw attention\n");
2544
34f80b04 2545 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2546 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2547 /* Fan failure attention */
2548
2549 /* The PHY reset is controled by GPIO 1 */
2550 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2551 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2552 /* Low power mode is controled by GPIO 2 */
2553 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2554 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2555 /* mark the failure */
c18487ee 2556 bp->link_params.ext_phy_config &=
877e9aa4 2557 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2558 bp->link_params.ext_phy_config |=
877e9aa4
ET
2559 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2560 SHMEM_WR(bp,
2561 dev_info.port_hw_config[port].
2562 external_phy_config,
c18487ee 2563 bp->link_params.ext_phy_config);
877e9aa4
ET
2564 /* log the failure */
2565 printk(KERN_ERR PFX "Fan Failure on Network"
2566 " Controller %s has caused the driver to"
2567 " shutdown the card to prevent permanent"
2568 " damage. Please contact Dell Support for"
2569 " assistance\n", bp->dev->name);
2570 break;
2571
2572 default:
2573 break;
2574 }
2575 }
34f80b04
EG
2576
2577 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2578
2579 val = REG_RD(bp, reg_offset);
2580 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2581 REG_WR(bp, reg_offset, val);
2582
2583 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2584 (attn & HW_INTERRUT_ASSERT_SET_0));
2585 bnx2x_panic();
2586 }
877e9aa4
ET
2587}
2588
2589static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2590{
2591 u32 val;
2592
2593 if (attn & BNX2X_DOORQ_ASSERT) {
2594
2595 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2596 BNX2X_ERR("DB hw attention 0x%x\n", val);
2597 /* DORQ discard attention */
2598 if (val & 0x2)
2599 BNX2X_ERR("FATAL error from DORQ\n");
2600 }
34f80b04
EG
2601
2602 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2603
2604 int port = BP_PORT(bp);
2605 int reg_offset;
2606
2607 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2608 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2609
2610 val = REG_RD(bp, reg_offset);
2611 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2612 REG_WR(bp, reg_offset, val);
2613
2614 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2615 (attn & HW_INTERRUT_ASSERT_SET_1));
2616 bnx2x_panic();
2617 }
877e9aa4
ET
2618}
2619
2620static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2621{
2622 u32 val;
2623
2624 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2625
2626 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2627 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2628 /* CFC error attention */
2629 if (val & 0x2)
2630 BNX2X_ERR("FATAL error from CFC\n");
2631 }
2632
2633 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2634
2635 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2636 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2637 /* RQ_USDMDP_FIFO_OVERFLOW */
2638 if (val & 0x18000)
2639 BNX2X_ERR("FATAL error from PXP\n");
2640 }
34f80b04
EG
2641
2642 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2643
2644 int port = BP_PORT(bp);
2645 int reg_offset;
2646
2647 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2648 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2649
2650 val = REG_RD(bp, reg_offset);
2651 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2652 REG_WR(bp, reg_offset, val);
2653
2654 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2655 (attn & HW_INTERRUT_ASSERT_SET_2));
2656 bnx2x_panic();
2657 }
877e9aa4
ET
2658}
2659
2660static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2661{
34f80b04
EG
2662 u32 val;
2663
877e9aa4
ET
2664 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2665
34f80b04
EG
2666 if (attn & BNX2X_PMF_LINK_ASSERT) {
2667 int func = BP_FUNC(bp);
2668
2669 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2670 bnx2x__link_status_update(bp);
2671 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2672 DRV_STATUS_PMF)
2673 bnx2x_pmf_update(bp);
2674
2675 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2676
2677 BNX2X_ERR("MC assert!\n");
2678 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2679 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2680 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2681 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2682 bnx2x_panic();
2683
2684 } else if (attn & BNX2X_MCP_ASSERT) {
2685
2686 BNX2X_ERR("MCP assert!\n");
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2688 bnx2x_fw_dump(bp);
877e9aa4
ET
2689
2690 } else
2691 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2692 }
2693
2694 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2695 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2696 if (attn & BNX2X_GRC_TIMEOUT) {
2697 val = CHIP_IS_E1H(bp) ?
2698 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2699 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2700 }
2701 if (attn & BNX2X_GRC_RSV) {
2702 val = CHIP_IS_E1H(bp) ?
2703 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2704 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2705 }
877e9aa4 2706 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2707 }
2708}
2709
2710static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2711{
a2fbb9ea
ET
2712 struct attn_route attn;
2713 struct attn_route group_mask;
34f80b04 2714 int port = BP_PORT(bp);
877e9aa4 2715 int index;
a2fbb9ea
ET
2716 u32 reg_addr;
2717 u32 val;
3fcaf2e5 2718 u32 aeu_mask;
a2fbb9ea
ET
2719
2720 /* need to take HW lock because MCP or other port might also
2721 try to handle this event */
4a37fb66 2722 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2723
2724 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2725 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2726 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2727 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2728 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2729 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2730
2731 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2732 if (deasserted & (1 << index)) {
2733 group_mask = bp->attn_group[index];
2734
34f80b04
EG
2735 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2736 index, group_mask.sig[0], group_mask.sig[1],
2737 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2738
877e9aa4
ET
2739 bnx2x_attn_int_deasserted3(bp,
2740 attn.sig[3] & group_mask.sig[3]);
2741 bnx2x_attn_int_deasserted1(bp,
2742 attn.sig[1] & group_mask.sig[1]);
2743 bnx2x_attn_int_deasserted2(bp,
2744 attn.sig[2] & group_mask.sig[2]);
2745 bnx2x_attn_int_deasserted0(bp,
2746 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2747
a2fbb9ea
ET
2748 if ((attn.sig[0] & group_mask.sig[0] &
2749 HW_PRTY_ASSERT_SET_0) ||
2750 (attn.sig[1] & group_mask.sig[1] &
2751 HW_PRTY_ASSERT_SET_1) ||
2752 (attn.sig[2] & group_mask.sig[2] &
2753 HW_PRTY_ASSERT_SET_2))
877e9aa4 2754 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2755 }
2756 }
2757
4a37fb66 2758 bnx2x_release_alr(bp);
a2fbb9ea 2759
5c862848 2760 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2761
2762 val = ~deasserted;
3fcaf2e5
EG
2763 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2764 val, reg_addr);
5c862848 2765 REG_WR(bp, reg_addr, val);
a2fbb9ea 2766
a2fbb9ea 2767 if (~bp->attn_state & deasserted)
3fcaf2e5 2768 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2769
2770 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2771 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2772
3fcaf2e5
EG
2773 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2774 aeu_mask = REG_RD(bp, reg_addr);
2775
2776 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2777 aeu_mask, deasserted);
2778 aeu_mask |= (deasserted & 0xff);
2779 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2780
3fcaf2e5
EG
2781 REG_WR(bp, reg_addr, aeu_mask);
2782 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2783
2784 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2785 bp->attn_state &= ~deasserted;
2786 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2787}
2788
2789static void bnx2x_attn_int(struct bnx2x *bp)
2790{
2791 /* read local copy of bits */
2792 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2793 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2794 u32 attn_state = bp->attn_state;
2795
2796 /* look for changed bits */
2797 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2798 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2799
2800 DP(NETIF_MSG_HW,
2801 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2802 attn_bits, attn_ack, asserted, deasserted);
2803
2804 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2805 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2806
2807 /* handle bits that were raised */
2808 if (asserted)
2809 bnx2x_attn_int_asserted(bp, asserted);
2810
2811 if (deasserted)
2812 bnx2x_attn_int_deasserted(bp, deasserted);
2813}
2814
2815static void bnx2x_sp_task(struct work_struct *work)
2816{
2817 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2818 u16 status;
2819
34f80b04 2820
a2fbb9ea
ET
2821 /* Return here if interrupt is disabled */
2822 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2823 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2824 return;
2825 }
2826
2827 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2828/* if (status == 0) */
2829/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2830
34f80b04 2831 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2832
877e9aa4
ET
2833 /* HW attentions */
2834 if (status & 0x1)
a2fbb9ea 2835 bnx2x_attn_int(bp);
a2fbb9ea 2836
bb2a0f7a
YG
2837 /* CStorm events: query_stats, port delete ramrod */
2838 if (status & 0x2)
2839 bp->stats_pending = 0;
2840
a2fbb9ea
ET
2841 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2842 IGU_INT_NOP, 1);
2843 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2844 IGU_INT_NOP, 1);
2845 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2846 IGU_INT_NOP, 1);
2847 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2848 IGU_INT_NOP, 1);
2849 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2850 IGU_INT_ENABLE, 1);
877e9aa4 2851
a2fbb9ea
ET
2852}
2853
2854static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2855{
2856 struct net_device *dev = dev_instance;
2857 struct bnx2x *bp = netdev_priv(dev);
2858
2859 /* Return here if interrupt is disabled */
2860 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2861 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2862 return IRQ_HANDLED;
2863 }
2864
877e9aa4 2865 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2866
2867#ifdef BNX2X_STOP_ON_ERROR
2868 if (unlikely(bp->panic))
2869 return IRQ_HANDLED;
2870#endif
2871
2872 schedule_work(&bp->sp_task);
2873
2874 return IRQ_HANDLED;
2875}
2876
2877/* end of slow path */
2878
2879/* Statistics */
2880
2881/****************************************************************************
2882* Macros
2883****************************************************************************/
2884
a2fbb9ea
ET
2885/* sum[hi:lo] += add[hi:lo] */
2886#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2887 do { \
2888 s_lo += a_lo; \
2889 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2890 } while (0)
2891
2892/* difference = minuend - subtrahend */
2893#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2894 do { \
bb2a0f7a
YG
2895 if (m_lo < s_lo) { \
2896 /* underflow */ \
a2fbb9ea 2897 d_hi = m_hi - s_hi; \
bb2a0f7a
YG
2898 if (d_hi > 0) { \
2899 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2900 d_hi--; \
2901 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a
YG
2902 } else { \
2903 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2904 d_hi = 0; \
2905 d_lo = 0; \
2906 } \
bb2a0f7a
YG
2907 } else { \
2908 /* m_lo >= s_lo */ \
a2fbb9ea 2909 if (m_hi < s_hi) { \
bb2a0f7a
YG
2910 d_hi = 0; \
2911 d_lo = 0; \
2912 } else { \
2913 /* m_hi >= s_hi */ \
2914 d_hi = m_hi - s_hi; \
2915 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2916 } \
2917 } \
2918 } while (0)
2919
bb2a0f7a 2920#define UPDATE_STAT64(s, t) \
a2fbb9ea 2921 do { \
bb2a0f7a
YG
2922 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2923 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2924 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2925 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2926 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2927 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2928 } while (0)
2929
bb2a0f7a 2930#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2931 do { \
bb2a0f7a
YG
2932 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2933 diff.lo, new->s##_lo, old->s##_lo); \
2934 ADD_64(estats->t##_hi, diff.hi, \
2935 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2936 } while (0)
2937
2938/* sum[hi:lo] += add */
2939#define ADD_EXTEND_64(s_hi, s_lo, a) \
2940 do { \
2941 s_lo += a; \
2942 s_hi += (s_lo < a) ? 1 : 0; \
2943 } while (0)
2944
bb2a0f7a 2945#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2946 do { \
bb2a0f7a
YG
2947 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2948 pstats->mac_stx[1].s##_lo, \
2949 new->s); \
a2fbb9ea
ET
2950 } while (0)
2951
bb2a0f7a 2952#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2953 do { \
2954 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2955 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2956 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2957 } while (0)
2958
2959#define UPDATE_EXTEND_XSTAT(s, t) \
2960 do { \
2961 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2962 old_xclient->s = le32_to_cpu(xclient->s); \
2963 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2964 } while (0)
2965
2966/*
2967 * General service functions
2968 */
2969
2970static inline long bnx2x_hilo(u32 *hiref)
2971{
2972 u32 lo = *(hiref + 1);
2973#if (BITS_PER_LONG == 64)
2974 u32 hi = *hiref;
2975
2976 return HILO_U64(hi, lo);
2977#else
2978 return lo;
2979#endif
2980}
2981
2982/*
2983 * Init service functions
2984 */
2985
bb2a0f7a
YG
2986static void bnx2x_storm_stats_post(struct bnx2x *bp)
2987{
2988 if (!bp->stats_pending) {
2989 struct eth_query_ramrod_data ramrod_data = {0};
2990 int rc;
2991
2992 ramrod_data.drv_counter = bp->stats_counter++;
2993 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2994 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2995
2996 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2997 ((u32 *)&ramrod_data)[1],
2998 ((u32 *)&ramrod_data)[0], 0);
2999 if (rc == 0) {
3000 /* stats ramrod has it's own slot on the spq */
3001 bp->spq_left++;
3002 bp->stats_pending = 1;
3003 }
3004 }
3005}
3006
3007static void bnx2x_stats_init(struct bnx2x *bp)
3008{
3009 int port = BP_PORT(bp);
3010
3011 bp->executer_idx = 0;
3012 bp->stats_counter = 0;
3013
3014 /* port stats */
3015 if (!BP_NOMCP(bp))
3016 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3017 else
3018 bp->port.port_stx = 0;
3019 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3020
3021 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3022 bp->port.old_nig_stats.brb_discard =
3023 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3024 bp->port.old_nig_stats.brb_truncate =
3025 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3026 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3027 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3028 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3029 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3030
3031 /* function stats */
3032 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3033 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3034 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3035 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3036
3037 bp->stats_state = STATS_STATE_DISABLED;
3038 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3039 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3040}
3041
3042static void bnx2x_hw_stats_post(struct bnx2x *bp)
3043{
3044 struct dmae_command *dmae = &bp->stats_dmae;
3045 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3046
3047 *stats_comp = DMAE_COMP_VAL;
3048
3049 /* loader */
3050 if (bp->executer_idx) {
3051 int loader_idx = PMF_DMAE_C(bp);
3052
3053 memset(dmae, 0, sizeof(struct dmae_command));
3054
3055 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3056 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3057 DMAE_CMD_DST_RESET |
3058#ifdef __BIG_ENDIAN
3059 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3060#else
3061 DMAE_CMD_ENDIANITY_DW_SWAP |
3062#endif
3063 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3064 DMAE_CMD_PORT_0) |
3065 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3066 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3067 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3068 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3069 sizeof(struct dmae_command) *
3070 (loader_idx + 1)) >> 2;
3071 dmae->dst_addr_hi = 0;
3072 dmae->len = sizeof(struct dmae_command) >> 2;
3073 if (CHIP_IS_E1(bp))
3074 dmae->len--;
3075 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3076 dmae->comp_addr_hi = 0;
3077 dmae->comp_val = 1;
3078
3079 *stats_comp = 0;
3080 bnx2x_post_dmae(bp, dmae, loader_idx);
3081
3082 } else if (bp->func_stx) {
3083 *stats_comp = 0;
3084 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3085 }
3086}
3087
3088static int bnx2x_stats_comp(struct bnx2x *bp)
3089{
3090 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3091 int cnt = 10;
3092
3093 might_sleep();
3094 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3095 if (!cnt) {
3096 BNX2X_ERR("timeout waiting for stats finished\n");
3097 break;
3098 }
3099 cnt--;
12469401 3100 msleep(1);
bb2a0f7a
YG
3101 }
3102 return 1;
3103}
3104
3105/*
3106 * Statistics service functions
3107 */
3108
3109static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3110{
3111 struct dmae_command *dmae;
3112 u32 opcode;
3113 int loader_idx = PMF_DMAE_C(bp);
3114 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3115
3116 /* sanity */
3117 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3118 BNX2X_ERR("BUG!\n");
3119 return;
3120 }
3121
3122 bp->executer_idx = 0;
3123
3124 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3125 DMAE_CMD_C_ENABLE |
3126 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3127#ifdef __BIG_ENDIAN
3128 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3129#else
3130 DMAE_CMD_ENDIANITY_DW_SWAP |
3131#endif
3132 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3133 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3134
3135 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3136 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3137 dmae->src_addr_lo = bp->port.port_stx >> 2;
3138 dmae->src_addr_hi = 0;
3139 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3140 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3141 dmae->len = DMAE_LEN32_RD_MAX;
3142 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3143 dmae->comp_addr_hi = 0;
3144 dmae->comp_val = 1;
3145
3146 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3147 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3148 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3149 dmae->src_addr_hi = 0;
7a9b2557
VZ
3150 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3151 DMAE_LEN32_RD_MAX * 4);
3152 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3153 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3154 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3155 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3156 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3157 dmae->comp_val = DMAE_COMP_VAL;
3158
3159 *stats_comp = 0;
3160 bnx2x_hw_stats_post(bp);
3161 bnx2x_stats_comp(bp);
3162}
3163
3164static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3165{
3166 struct dmae_command *dmae;
34f80b04 3167 int port = BP_PORT(bp);
bb2a0f7a 3168 int vn = BP_E1HVN(bp);
a2fbb9ea 3169 u32 opcode;
bb2a0f7a 3170 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3171 u32 mac_addr;
bb2a0f7a
YG
3172 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3173
3174 /* sanity */
3175 if (!bp->link_vars.link_up || !bp->port.pmf) {
3176 BNX2X_ERR("BUG!\n");
3177 return;
3178 }
a2fbb9ea
ET
3179
3180 bp->executer_idx = 0;
bb2a0f7a
YG
3181
3182 /* MCP */
3183 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3184 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3185 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3186#ifdef __BIG_ENDIAN
bb2a0f7a 3187 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3188#else
bb2a0f7a 3189 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3190#endif
bb2a0f7a
YG
3191 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3192 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3193
bb2a0f7a 3194 if (bp->port.port_stx) {
a2fbb9ea
ET
3195
3196 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3197 dmae->opcode = opcode;
bb2a0f7a
YG
3198 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3199 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3200 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3201 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3202 dmae->len = sizeof(struct host_port_stats) >> 2;
3203 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3204 dmae->comp_addr_hi = 0;
3205 dmae->comp_val = 1;
a2fbb9ea
ET
3206 }
3207
bb2a0f7a
YG
3208 if (bp->func_stx) {
3209
3210 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3211 dmae->opcode = opcode;
3212 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3213 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3214 dmae->dst_addr_lo = bp->func_stx >> 2;
3215 dmae->dst_addr_hi = 0;
3216 dmae->len = sizeof(struct host_func_stats) >> 2;
3217 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3218 dmae->comp_addr_hi = 0;
3219 dmae->comp_val = 1;
a2fbb9ea
ET
3220 }
3221
bb2a0f7a 3222 /* MAC */
a2fbb9ea
ET
3223 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3224 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3225 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3226#ifdef __BIG_ENDIAN
3227 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3228#else
3229 DMAE_CMD_ENDIANITY_DW_SWAP |
3230#endif
bb2a0f7a
YG
3231 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3232 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3233
c18487ee 3234 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3235
3236 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3237 NIG_REG_INGRESS_BMAC0_MEM);
3238
3239 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3240 BIGMAC_REGISTER_TX_STAT_GTBYT */
3241 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3242 dmae->opcode = opcode;
3243 dmae->src_addr_lo = (mac_addr +
3244 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3245 dmae->src_addr_hi = 0;
3246 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3247 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3248 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3249 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3250 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3251 dmae->comp_addr_hi = 0;
3252 dmae->comp_val = 1;
3253
3254 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3255 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3256 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3257 dmae->opcode = opcode;
3258 dmae->src_addr_lo = (mac_addr +
3259 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3260 dmae->src_addr_hi = 0;
3261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3262 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3263 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3264 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3265 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3266 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3268 dmae->comp_addr_hi = 0;
3269 dmae->comp_val = 1;
3270
c18487ee 3271 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3272
3273 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3274
3275 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3276 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3277 dmae->opcode = opcode;
3278 dmae->src_addr_lo = (mac_addr +
3279 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3280 dmae->src_addr_hi = 0;
3281 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3282 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3283 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3284 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285 dmae->comp_addr_hi = 0;
3286 dmae->comp_val = 1;
3287
3288 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3289 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3290 dmae->opcode = opcode;
3291 dmae->src_addr_lo = (mac_addr +
3292 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3293 dmae->src_addr_hi = 0;
3294 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3295 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3296 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3297 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3298 dmae->len = 1;
3299 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3300 dmae->comp_addr_hi = 0;
3301 dmae->comp_val = 1;
3302
3303 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3304 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3305 dmae->opcode = opcode;
3306 dmae->src_addr_lo = (mac_addr +
3307 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3308 dmae->src_addr_hi = 0;
3309 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3310 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3311 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3312 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3313 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3314 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315 dmae->comp_addr_hi = 0;
3316 dmae->comp_val = 1;
3317 }
3318
3319 /* NIG */
bb2a0f7a
YG
3320 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3321 dmae->opcode = opcode;
3322 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3323 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3324 dmae->src_addr_hi = 0;
3325 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3326 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3327 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329 dmae->comp_addr_hi = 0;
3330 dmae->comp_val = 1;
3331
3332 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3333 dmae->opcode = opcode;
3334 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3335 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3336 dmae->src_addr_hi = 0;
3337 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3338 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3339 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3340 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3341 dmae->len = (2*sizeof(u32)) >> 2;
3342 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3343 dmae->comp_addr_hi = 0;
3344 dmae->comp_val = 1;
3345
a2fbb9ea
ET
3346 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3347 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3348 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3349 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3350#ifdef __BIG_ENDIAN
3351 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3352#else
3353 DMAE_CMD_ENDIANITY_DW_SWAP |
3354#endif
bb2a0f7a
YG
3355 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3356 (vn << DMAE_CMD_E1HVN_SHIFT));
3357 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3358 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3359 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3360 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3361 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3362 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3363 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3364 dmae->len = (2*sizeof(u32)) >> 2;
3365 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3366 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3367 dmae->comp_val = DMAE_COMP_VAL;
3368
3369 *stats_comp = 0;
a2fbb9ea
ET
3370}
3371
bb2a0f7a 3372static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3373{
bb2a0f7a
YG
3374 struct dmae_command *dmae = &bp->stats_dmae;
3375 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3376
bb2a0f7a
YG
3377 /* sanity */
3378 if (!bp->func_stx) {
3379 BNX2X_ERR("BUG!\n");
3380 return;
3381 }
a2fbb9ea 3382
bb2a0f7a
YG
3383 bp->executer_idx = 0;
3384 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3385
bb2a0f7a
YG
3386 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3387 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3388 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3389#ifdef __BIG_ENDIAN
3390 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3391#else
3392 DMAE_CMD_ENDIANITY_DW_SWAP |
3393#endif
3394 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3395 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3396 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3397 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3398 dmae->dst_addr_lo = bp->func_stx >> 2;
3399 dmae->dst_addr_hi = 0;
3400 dmae->len = sizeof(struct host_func_stats) >> 2;
3401 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3402 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3403 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3404
bb2a0f7a
YG
3405 *stats_comp = 0;
3406}
a2fbb9ea 3407
bb2a0f7a
YG
3408static void bnx2x_stats_start(struct bnx2x *bp)
3409{
3410 if (bp->port.pmf)
3411 bnx2x_port_stats_init(bp);
3412
3413 else if (bp->func_stx)
3414 bnx2x_func_stats_init(bp);
3415
3416 bnx2x_hw_stats_post(bp);
3417 bnx2x_storm_stats_post(bp);
3418}
3419
3420static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3421{
3422 bnx2x_stats_comp(bp);
3423 bnx2x_stats_pmf_update(bp);
3424 bnx2x_stats_start(bp);
3425}
3426
3427static void bnx2x_stats_restart(struct bnx2x *bp)
3428{
3429 bnx2x_stats_comp(bp);
3430 bnx2x_stats_start(bp);
3431}
3432
3433static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3434{
3435 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3436 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3437 struct regpair diff;
3438
3439 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3440 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3441 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3442 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3443 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3444 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3445 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3446 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3447 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3448 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3449 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3450 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3451 UPDATE_STAT64(tx_stat_gt127,
3452 tx_stat_etherstatspkts65octetsto127octets);
3453 UPDATE_STAT64(tx_stat_gt255,
3454 tx_stat_etherstatspkts128octetsto255octets);
3455 UPDATE_STAT64(tx_stat_gt511,
3456 tx_stat_etherstatspkts256octetsto511octets);
3457 UPDATE_STAT64(tx_stat_gt1023,
3458 tx_stat_etherstatspkts512octetsto1023octets);
3459 UPDATE_STAT64(tx_stat_gt1518,
3460 tx_stat_etherstatspkts1024octetsto1522octets);
3461 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3462 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3463 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3464 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3465 UPDATE_STAT64(tx_stat_gterr,
3466 tx_stat_dot3statsinternalmactransmiterrors);
3467 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3468}
3469
3470static void bnx2x_emac_stats_update(struct bnx2x *bp)
3471{
3472 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3473 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3474
3475 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3476 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3477 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3478 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3479 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3480 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3481 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3482 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3483 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3484 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3485 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3486 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3487 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3488 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3489 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3490 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3491 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3492 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3493 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3494 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3495 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3496 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3497 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3498 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3499 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3500 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3501 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3502 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3503 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3504 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3505 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3506}
3507
3508static int bnx2x_hw_stats_update(struct bnx2x *bp)
3509{
3510 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3511 struct nig_stats *old = &(bp->port.old_nig_stats);
3512 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3513 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3514 struct regpair diff;
3515
3516 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3517 bnx2x_bmac_stats_update(bp);
3518
3519 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3520 bnx2x_emac_stats_update(bp);
3521
3522 else { /* unreached */
3523 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3524 return -1;
3525 }
a2fbb9ea 3526
bb2a0f7a
YG
3527 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3528 new->brb_discard - old->brb_discard);
66e855f3
YG
3529 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3530 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3531
bb2a0f7a
YG
3532 UPDATE_STAT64_NIG(egress_mac_pkt0,
3533 etherstatspkts1024octetsto1522octets);
3534 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3535
bb2a0f7a 3536 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3537
bb2a0f7a
YG
3538 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3539 sizeof(struct mac_stx));
3540 estats->brb_drop_hi = pstats->brb_drop_hi;
3541 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3542
bb2a0f7a 3543 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3544
bb2a0f7a 3545 return 0;
a2fbb9ea
ET
3546}
3547
bb2a0f7a 3548static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3549{
3550 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3551 int cl_id = BP_CL_ID(bp);
3552 struct tstorm_per_port_stats *tport =
3553 &stats->tstorm_common.port_statistics;
a2fbb9ea 3554 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3555 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3556 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3557 struct xstorm_per_client_stats *xclient =
3558 &stats->xstorm_common.client_statistics[cl_id];
3559 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3560 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3561 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3562 u32 diff;
3563
bb2a0f7a
YG
3564 /* are storm stats valid? */
3565 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3566 bp->stats_counter) {
3567 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3568 " tstorm counter (%d) != stats_counter (%d)\n",
3569 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3570 return -1;
3571 }
bb2a0f7a
YG
3572 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3573 bp->stats_counter) {
3574 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3575 " xstorm counter (%d) != stats_counter (%d)\n",
3576 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3577 return -2;
3578 }
a2fbb9ea 3579
bb2a0f7a
YG
3580 fstats->total_bytes_received_hi =
3581 fstats->valid_bytes_received_hi =
a2fbb9ea 3582 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3583 fstats->total_bytes_received_lo =
3584 fstats->valid_bytes_received_lo =
a2fbb9ea 3585 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3586
3587 estats->error_bytes_received_hi =
3588 le32_to_cpu(tclient->rcv_error_bytes.hi);
3589 estats->error_bytes_received_lo =
3590 le32_to_cpu(tclient->rcv_error_bytes.lo);
3591 ADD_64(estats->error_bytes_received_hi,
3592 estats->rx_stat_ifhcinbadoctets_hi,
3593 estats->error_bytes_received_lo,
3594 estats->rx_stat_ifhcinbadoctets_lo);
3595
3596 ADD_64(fstats->total_bytes_received_hi,
3597 estats->error_bytes_received_hi,
3598 fstats->total_bytes_received_lo,
3599 estats->error_bytes_received_lo);
3600
3601 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3602 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3603 total_multicast_packets_received);
a2fbb9ea 3604 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3605 total_broadcast_packets_received);
3606
3607 fstats->total_bytes_transmitted_hi =
3608 le32_to_cpu(xclient->total_sent_bytes.hi);
3609 fstats->total_bytes_transmitted_lo =
3610 le32_to_cpu(xclient->total_sent_bytes.lo);
3611
3612 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3613 total_unicast_packets_transmitted);
3614 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3615 total_multicast_packets_transmitted);
3616 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3617 total_broadcast_packets_transmitted);
3618
3619 memcpy(estats, &(fstats->total_bytes_received_hi),
3620 sizeof(struct host_func_stats) - 2*sizeof(u32));
3621
3622 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3623 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3624 estats->brb_truncate_discard =
3625 le32_to_cpu(tport->brb_truncate_discard);
3626 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3627
3628 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3629 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3630 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3631 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3632 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3633 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3634 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3635 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3636 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3637 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3638 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3639 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3640 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3641
bb2a0f7a
YG
3642 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3643 old_tclient->packets_too_big_discard =
a2fbb9ea 3644 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3645 estats->no_buff_discard =
3646 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3647 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3648
3649 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3650 old_xclient->unicast_bytes_sent.hi =
3651 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3652 old_xclient->unicast_bytes_sent.lo =
3653 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3654 old_xclient->multicast_bytes_sent.hi =
3655 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3656 old_xclient->multicast_bytes_sent.lo =
3657 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3658 old_xclient->broadcast_bytes_sent.hi =
3659 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3660 old_xclient->broadcast_bytes_sent.lo =
3661 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3662
3663 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3664
3665 return 0;
3666}
3667
bb2a0f7a 3668static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3669{
bb2a0f7a
YG
3670 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3671 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3672 struct net_device_stats *nstats = &bp->dev->stats;
3673
3674 nstats->rx_packets =
3675 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3676 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3677 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3678
3679 nstats->tx_packets =
3680 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3681 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3682 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3683
bb2a0f7a 3684 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3685
0e39e645 3686 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3687
bb2a0f7a
YG
3688 nstats->rx_dropped = old_tclient->checksum_discard +
3689 estats->mac_discard;
a2fbb9ea
ET
3690 nstats->tx_dropped = 0;
3691
3692 nstats->multicast =
3693 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3694
bb2a0f7a
YG
3695 nstats->collisions =
3696 estats->tx_stat_dot3statssinglecollisionframes_lo +
3697 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3698 estats->tx_stat_dot3statslatecollisions_lo +
3699 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3700
bb2a0f7a
YG
3701 estats->jabber_packets_received =
3702 old_tclient->packets_too_big_discard +
3703 estats->rx_stat_dot3statsframestoolong_lo;
3704
3705 nstats->rx_length_errors =
3706 estats->rx_stat_etherstatsundersizepkts_lo +
3707 estats->jabber_packets_received;
66e855f3 3708 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3709 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3710 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3711 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3712 nstats->rx_missed_errors = estats->xxoverflow_discard;
3713
3714 nstats->rx_errors = nstats->rx_length_errors +
3715 nstats->rx_over_errors +
3716 nstats->rx_crc_errors +
3717 nstats->rx_frame_errors +
0e39e645
ET
3718 nstats->rx_fifo_errors +
3719 nstats->rx_missed_errors;
a2fbb9ea 3720
bb2a0f7a
YG
3721 nstats->tx_aborted_errors =
3722 estats->tx_stat_dot3statslatecollisions_lo +
3723 estats->tx_stat_dot3statsexcessivecollisions_lo;
3724 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3725 nstats->tx_fifo_errors = 0;
3726 nstats->tx_heartbeat_errors = 0;
3727 nstats->tx_window_errors = 0;
3728
3729 nstats->tx_errors = nstats->tx_aborted_errors +
3730 nstats->tx_carrier_errors;
a2fbb9ea
ET
3731}
3732
bb2a0f7a 3733static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3734{
bb2a0f7a
YG
3735 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3736 int update = 0;
a2fbb9ea 3737
bb2a0f7a
YG
3738 if (*stats_comp != DMAE_COMP_VAL)
3739 return;
3740
3741 if (bp->port.pmf)
3742 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3743
bb2a0f7a 3744 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3745
bb2a0f7a
YG
3746 if (update)
3747 bnx2x_net_stats_update(bp);
a2fbb9ea 3748
bb2a0f7a
YG
3749 else {
3750 if (bp->stats_pending) {
3751 bp->stats_pending++;
3752 if (bp->stats_pending == 3) {
3753 BNX2X_ERR("stats not updated for 3 times\n");
3754 bnx2x_panic();
3755 return;
3756 }
3757 }
a2fbb9ea
ET
3758 }
3759
3760 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3761 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3762 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3763 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3764 int i;
a2fbb9ea
ET
3765
3766 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3767 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3768 " tx pkt (%lx)\n",
3769 bnx2x_tx_avail(bp->fp),
7a9b2557 3770 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3771 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3772 " rx pkt (%lx)\n",
7a9b2557
VZ
3773 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3774 bp->fp->rx_comp_cons),
3775 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea
ET
3776 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3777 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
bb2a0f7a 3778 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3779 printk(KERN_DEBUG "tstats: checksum_discard %u "
3780 "packets_too_big_discard %u no_buff_discard %u "
3781 "mac_discard %u mac_filter_discard %u "
3782 "xxovrflow_discard %u brb_truncate_discard %u "
3783 "ttl0_discard %u\n",
bb2a0f7a
YG
3784 old_tclient->checksum_discard,
3785 old_tclient->packets_too_big_discard,
3786 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3787 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3788 estats->brb_truncate_discard,
3789 old_tclient->ttl0_discard);
a2fbb9ea
ET
3790
3791 for_each_queue(bp, i) {
3792 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3793 bnx2x_fp(bp, i, tx_pkt),
3794 bnx2x_fp(bp, i, rx_pkt),
3795 bnx2x_fp(bp, i, rx_calls));
3796 }
3797 }
3798
bb2a0f7a
YG
3799 bnx2x_hw_stats_post(bp);
3800 bnx2x_storm_stats_post(bp);
3801}
a2fbb9ea 3802
bb2a0f7a
YG
3803static void bnx2x_port_stats_stop(struct bnx2x *bp)
3804{
3805 struct dmae_command *dmae;
3806 u32 opcode;
3807 int loader_idx = PMF_DMAE_C(bp);
3808 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3809
bb2a0f7a 3810 bp->executer_idx = 0;
a2fbb9ea 3811
bb2a0f7a
YG
3812 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3813 DMAE_CMD_C_ENABLE |
3814 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3815#ifdef __BIG_ENDIAN
bb2a0f7a 3816 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3817#else
bb2a0f7a 3818 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3819#endif
bb2a0f7a
YG
3820 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3821 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3822
3823 if (bp->port.port_stx) {
3824
3825 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3826 if (bp->func_stx)
3827 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3828 else
3829 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3830 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3831 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3832 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3833 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3834 dmae->len = sizeof(struct host_port_stats) >> 2;
3835 if (bp->func_stx) {
3836 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3837 dmae->comp_addr_hi = 0;
3838 dmae->comp_val = 1;
3839 } else {
3840 dmae->comp_addr_lo =
3841 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3842 dmae->comp_addr_hi =
3843 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3844 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3845
bb2a0f7a
YG
3846 *stats_comp = 0;
3847 }
a2fbb9ea
ET
3848 }
3849
bb2a0f7a
YG
3850 if (bp->func_stx) {
3851
3852 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3853 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3854 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3855 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3856 dmae->dst_addr_lo = bp->func_stx >> 2;
3857 dmae->dst_addr_hi = 0;
3858 dmae->len = sizeof(struct host_func_stats) >> 2;
3859 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3860 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3861 dmae->comp_val = DMAE_COMP_VAL;
3862
3863 *stats_comp = 0;
a2fbb9ea 3864 }
bb2a0f7a
YG
3865}
3866
3867static void bnx2x_stats_stop(struct bnx2x *bp)
3868{
3869 int update = 0;
3870
3871 bnx2x_stats_comp(bp);
3872
3873 if (bp->port.pmf)
3874 update = (bnx2x_hw_stats_update(bp) == 0);
3875
3876 update |= (bnx2x_storm_stats_update(bp) == 0);
3877
3878 if (update) {
3879 bnx2x_net_stats_update(bp);
a2fbb9ea 3880
bb2a0f7a
YG
3881 if (bp->port.pmf)
3882 bnx2x_port_stats_stop(bp);
3883
3884 bnx2x_hw_stats_post(bp);
3885 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3886 }
3887}
3888
bb2a0f7a
YG
3889static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3890{
3891}
3892
3893static const struct {
3894 void (*action)(struct bnx2x *bp);
3895 enum bnx2x_stats_state next_state;
3896} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3897/* state event */
3898{
3899/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3900/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3901/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3902/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3903},
3904{
3905/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3906/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3907/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3908/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3909}
3910};
3911
3912static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3913{
3914 enum bnx2x_stats_state state = bp->stats_state;
3915
3916 bnx2x_stats_stm[state][event].action(bp);
3917 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3918
3919 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3920 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3921 state, event, bp->stats_state);
3922}
3923
a2fbb9ea
ET
3924static void bnx2x_timer(unsigned long data)
3925{
3926 struct bnx2x *bp = (struct bnx2x *) data;
3927
3928 if (!netif_running(bp->dev))
3929 return;
3930
3931 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3932 goto timer_restart;
a2fbb9ea
ET
3933
3934 if (poll) {
3935 struct bnx2x_fastpath *fp = &bp->fp[0];
3936 int rc;
3937
3938 bnx2x_tx_int(fp, 1000);
3939 rc = bnx2x_rx_int(fp, 1000);
3940 }
3941
34f80b04
EG
3942 if (!BP_NOMCP(bp)) {
3943 int func = BP_FUNC(bp);
a2fbb9ea
ET
3944 u32 drv_pulse;
3945 u32 mcp_pulse;
3946
3947 ++bp->fw_drv_pulse_wr_seq;
3948 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3949 /* TBD - add SYSTEM_TIME */
3950 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3951 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3952
34f80b04 3953 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3954 MCP_PULSE_SEQ_MASK);
3955 /* The delta between driver pulse and mcp response
3956 * should be 1 (before mcp response) or 0 (after mcp response)
3957 */
3958 if ((drv_pulse != mcp_pulse) &&
3959 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3960 /* someone lost a heartbeat... */
3961 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3962 drv_pulse, mcp_pulse);
3963 }
3964 }
3965
bb2a0f7a
YG
3966 if ((bp->state == BNX2X_STATE_OPEN) ||
3967 (bp->state == BNX2X_STATE_DISABLED))
3968 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3969
f1410647 3970timer_restart:
a2fbb9ea
ET
3971 mod_timer(&bp->timer, jiffies + bp->current_interval);
3972}
3973
3974/* end of Statistics */
3975
3976/* nic init */
3977
3978/*
3979 * nic init service functions
3980 */
3981
34f80b04 3982static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3983{
34f80b04
EG
3984 int port = BP_PORT(bp);
3985
3986 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3987 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 3988 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
3989 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3990 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 3991 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
3992}
3993
5c862848
EG
3994static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
3995 dma_addr_t mapping, int sb_id)
34f80b04
EG
3996{
3997 int port = BP_PORT(bp);
bb2a0f7a 3998 int func = BP_FUNC(bp);
a2fbb9ea 3999 int index;
34f80b04 4000 u64 section;
a2fbb9ea
ET
4001
4002 /* USTORM */
4003 section = ((u64)mapping) + offsetof(struct host_status_block,
4004 u_status_block);
34f80b04 4005 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4006
4007 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4008 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4009 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4010 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4011 U64_HI(section));
bb2a0f7a
YG
4012 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4013 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4014
4015 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4016 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4017 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4018
4019 /* CSTORM */
4020 section = ((u64)mapping) + offsetof(struct host_status_block,
4021 c_status_block);
34f80b04 4022 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4023
4024 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4025 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4026 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4027 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4028 U64_HI(section));
7a9b2557
VZ
4029 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4030 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4031
4032 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4033 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4034 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4035
4036 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4037}
4038
4039static void bnx2x_zero_def_sb(struct bnx2x *bp)
4040{
4041 int func = BP_FUNC(bp);
a2fbb9ea 4042
34f80b04
EG
4043 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4044 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4045 sizeof(struct ustorm_def_status_block)/4);
4046 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4047 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4048 sizeof(struct cstorm_def_status_block)/4);
4049 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4050 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4051 sizeof(struct xstorm_def_status_block)/4);
4052 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4053 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4054 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4055}
4056
4057static void bnx2x_init_def_sb(struct bnx2x *bp,
4058 struct host_def_status_block *def_sb,
34f80b04 4059 dma_addr_t mapping, int sb_id)
a2fbb9ea 4060{
34f80b04
EG
4061 int port = BP_PORT(bp);
4062 int func = BP_FUNC(bp);
a2fbb9ea
ET
4063 int index, val, reg_offset;
4064 u64 section;
4065
4066 /* ATTN */
4067 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4068 atten_status_block);
34f80b04 4069 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4070
49d66772
ET
4071 bp->attn_state = 0;
4072
a2fbb9ea
ET
4073 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4074 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4075
34f80b04 4076 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4077 bp->attn_group[index].sig[0] = REG_RD(bp,
4078 reg_offset + 0x10*index);
4079 bp->attn_group[index].sig[1] = REG_RD(bp,
4080 reg_offset + 0x4 + 0x10*index);
4081 bp->attn_group[index].sig[2] = REG_RD(bp,
4082 reg_offset + 0x8 + 0x10*index);
4083 bp->attn_group[index].sig[3] = REG_RD(bp,
4084 reg_offset + 0xc + 0x10*index);
4085 }
4086
a2fbb9ea
ET
4087 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4088 HC_REG_ATTN_MSG0_ADDR_L);
4089
4090 REG_WR(bp, reg_offset, U64_LO(section));
4091 REG_WR(bp, reg_offset + 4, U64_HI(section));
4092
4093 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4094
4095 val = REG_RD(bp, reg_offset);
34f80b04 4096 val |= sb_id;
a2fbb9ea
ET
4097 REG_WR(bp, reg_offset, val);
4098
4099 /* USTORM */
4100 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4101 u_def_status_block);
34f80b04 4102 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4103
4104 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4105 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4106 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4107 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4108 U64_HI(section));
5c862848 4109 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4110 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4111
4112 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4113 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4114 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4115
4116 /* CSTORM */
4117 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4118 c_def_status_block);
34f80b04 4119 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4120
4121 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4122 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4123 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4124 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4125 U64_HI(section));
5c862848 4126 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4127 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4128
4129 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4130 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4131 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4132
4133 /* TSTORM */
4134 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135 t_def_status_block);
34f80b04 4136 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4137
4138 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4139 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4140 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4141 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4142 U64_HI(section));
5c862848 4143 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4144 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4145
4146 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4147 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4148 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4149
4150 /* XSTORM */
4151 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152 x_def_status_block);
34f80b04 4153 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4154
4155 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4156 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4157 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4158 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4159 U64_HI(section));
5c862848 4160 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4161 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4162
4163 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4164 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4165 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4166
bb2a0f7a 4167 bp->stats_pending = 0;
66e855f3 4168 bp->set_mac_pending = 0;
bb2a0f7a 4169
34f80b04 4170 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4171}
4172
4173static void bnx2x_update_coalesce(struct bnx2x *bp)
4174{
34f80b04 4175 int port = BP_PORT(bp);
a2fbb9ea
ET
4176 int i;
4177
4178 for_each_queue(bp, i) {
34f80b04 4179 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4180
4181 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4182 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4183 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4184 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4185 bp->rx_ticks/12);
a2fbb9ea 4186 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4187 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4188 U_SB_ETH_RX_CQ_INDEX),
4189 bp->rx_ticks ? 0 : 1);
4190 REG_WR16(bp, BAR_USTRORM_INTMEM +
4191 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4192 U_SB_ETH_RX_BD_INDEX),
34f80b04 4193 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4194
4195 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4196 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4197 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4198 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4199 bp->tx_ticks/12);
a2fbb9ea 4200 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4201 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4202 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4203 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4204 }
4205}
4206
7a9b2557
VZ
4207static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4208 struct bnx2x_fastpath *fp, int last)
4209{
4210 int i;
4211
4212 for (i = 0; i < last; i++) {
4213 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4214 struct sk_buff *skb = rx_buf->skb;
4215
4216 if (skb == NULL) {
4217 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4218 continue;
4219 }
4220
4221 if (fp->tpa_state[i] == BNX2X_TPA_START)
4222 pci_unmap_single(bp->pdev,
4223 pci_unmap_addr(rx_buf, mapping),
4224 bp->rx_buf_use_size,
4225 PCI_DMA_FROMDEVICE);
4226
4227 dev_kfree_skb(skb);
4228 rx_buf->skb = NULL;
4229 }
4230}
4231
a2fbb9ea
ET
4232static void bnx2x_init_rx_rings(struct bnx2x *bp)
4233{
7a9b2557 4234 int func = BP_FUNC(bp);
32626230
EG
4235 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4236 ETH_MAX_AGGREGATION_QUEUES_E1H;
4237 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4238 int i, j;
a2fbb9ea
ET
4239
4240 bp->rx_buf_use_size = bp->dev->mtu;
a2fbb9ea
ET
4241 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4242 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4243
7a9b2557
VZ
4244 if (bp->flags & TPA_ENABLE_FLAG) {
4245 DP(NETIF_MSG_IFUP,
4246 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4247 bp->rx_buf_use_size, bp->rx_buf_size,
4248 bp->dev->mtu + ETH_OVREHEAD);
4249
4250 for_each_queue(bp, j) {
32626230 4251 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4252
32626230 4253 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4254 fp->tpa_pool[i].skb =
4255 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4256 if (!fp->tpa_pool[i].skb) {
4257 BNX2X_ERR("Failed to allocate TPA "
4258 "skb pool for queue[%d] - "
4259 "disabling TPA on this "
4260 "queue!\n", j);
4261 bnx2x_free_tpa_pool(bp, fp, i);
4262 fp->disable_tpa = 1;
4263 break;
4264 }
4265 pci_unmap_addr_set((struct sw_rx_bd *)
4266 &bp->fp->tpa_pool[i],
4267 mapping, 0);
4268 fp->tpa_state[i] = BNX2X_TPA_STOP;
4269 }
4270 }
4271 }
4272
a2fbb9ea
ET
4273 for_each_queue(bp, j) {
4274 struct bnx2x_fastpath *fp = &bp->fp[j];
4275
4276 fp->rx_bd_cons = 0;
4277 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4278 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4279
4280 /* "next page" elements initialization */
4281 /* SGE ring */
4282 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4283 struct eth_rx_sge *sge;
4284
4285 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4286 sge->addr_hi =
4287 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4288 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4289 sge->addr_lo =
4290 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4291 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4292 }
4293
4294 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4295
7a9b2557 4296 /* RX BD ring */
a2fbb9ea
ET
4297 for (i = 1; i <= NUM_RX_RINGS; i++) {
4298 struct eth_rx_bd *rx_bd;
4299
4300 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4301 rx_bd->addr_hi =
4302 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4303 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4304 rx_bd->addr_lo =
4305 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4306 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4307 }
4308
34f80b04 4309 /* CQ ring */
a2fbb9ea
ET
4310 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4311 struct eth_rx_cqe_next_page *nextpg;
4312
4313 nextpg = (struct eth_rx_cqe_next_page *)
4314 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4315 nextpg->addr_hi =
4316 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4317 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4318 nextpg->addr_lo =
4319 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4320 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4321 }
4322
7a9b2557
VZ
4323 /* Allocate SGEs and initialize the ring elements */
4324 for (i = 0, ring_prod = 0;
4325 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4326
7a9b2557
VZ
4327 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4328 BNX2X_ERR("was only able to allocate "
4329 "%d rx sges\n", i);
4330 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4331 /* Cleanup already allocated elements */
4332 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4333 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4334 fp->disable_tpa = 1;
4335 ring_prod = 0;
4336 break;
4337 }
4338 ring_prod = NEXT_SGE_IDX(ring_prod);
4339 }
4340 fp->rx_sge_prod = ring_prod;
4341
4342 /* Allocate BDs and initialize BD ring */
66e855f3 4343 fp->rx_comp_cons = 0;
7a9b2557 4344 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4345 for (i = 0; i < bp->rx_ring_size; i++) {
4346 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4347 BNX2X_ERR("was only able to allocate "
4348 "%d rx skbs\n", i);
66e855f3 4349 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4350 break;
4351 }
4352 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4353 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4354 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4355 }
4356
7a9b2557
VZ
4357 fp->rx_bd_prod = ring_prod;
4358 /* must not have more available CQEs than BDs */
4359 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4360 cqe_ring_prod);
a2fbb9ea
ET
4361 fp->rx_pkt = fp->rx_calls = 0;
4362
7a9b2557
VZ
4363 /* Warning!
4364 * this will generate an interrupt (to the TSTORM)
4365 * must only be done after chip is initialized
4366 */
4367 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4368 fp->rx_sge_prod);
a2fbb9ea
ET
4369 if (j != 0)
4370 continue;
4371
4372 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4373 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4374 U64_LO(fp->rx_comp_mapping));
4375 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4376 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4377 U64_HI(fp->rx_comp_mapping));
4378 }
4379}
4380
4381static void bnx2x_init_tx_ring(struct bnx2x *bp)
4382{
4383 int i, j;
4384
4385 for_each_queue(bp, j) {
4386 struct bnx2x_fastpath *fp = &bp->fp[j];
4387
4388 for (i = 1; i <= NUM_TX_RINGS; i++) {
4389 struct eth_tx_bd *tx_bd =
4390 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4391
4392 tx_bd->addr_hi =
4393 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4394 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4395 tx_bd->addr_lo =
4396 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4397 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4398 }
4399
4400 fp->tx_pkt_prod = 0;
4401 fp->tx_pkt_cons = 0;
4402 fp->tx_bd_prod = 0;
4403 fp->tx_bd_cons = 0;
4404 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4405 fp->tx_pkt = 0;
4406 }
4407}
4408
4409static void bnx2x_init_sp_ring(struct bnx2x *bp)
4410{
34f80b04 4411 int func = BP_FUNC(bp);
a2fbb9ea
ET
4412
4413 spin_lock_init(&bp->spq_lock);
4414
4415 bp->spq_left = MAX_SPQ_PENDING;
4416 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4417 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4418 bp->spq_prod_bd = bp->spq;
4419 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4420
34f80b04 4421 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4422 U64_LO(bp->spq_mapping));
34f80b04
EG
4423 REG_WR(bp,
4424 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4425 U64_HI(bp->spq_mapping));
4426
34f80b04 4427 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4428 bp->spq_prod_idx);
4429}
4430
4431static void bnx2x_init_context(struct bnx2x *bp)
4432{
4433 int i;
4434
4435 for_each_queue(bp, i) {
4436 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4437 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4438 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4439
4440 context->xstorm_st_context.tx_bd_page_base_hi =
4441 U64_HI(fp->tx_desc_mapping);
4442 context->xstorm_st_context.tx_bd_page_base_lo =
4443 U64_LO(fp->tx_desc_mapping);
4444 context->xstorm_st_context.db_data_addr_hi =
4445 U64_HI(fp->tx_prods_mapping);
4446 context->xstorm_st_context.db_data_addr_lo =
4447 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4448 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4449 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4450
4451 context->ustorm_st_context.common.sb_index_numbers =
4452 BNX2X_RX_SB_INDEX_NUM;
4453 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4454 context->ustorm_st_context.common.status_block_id = sb_id;
4455 context->ustorm_st_context.common.flags =
4456 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4457 context->ustorm_st_context.common.mc_alignment_size = 64;
4458 context->ustorm_st_context.common.bd_buff_size =
4459 bp->rx_buf_use_size;
4460 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4461 U64_HI(fp->rx_desc_mapping);
34f80b04 4462 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4463 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4464 if (!fp->disable_tpa) {
4465 context->ustorm_st_context.common.flags |=
4466 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4467 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4468 context->ustorm_st_context.common.sge_buff_size =
4469 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4470 context->ustorm_st_context.common.sge_page_base_hi =
4471 U64_HI(fp->rx_sge_mapping);
4472 context->ustorm_st_context.common.sge_page_base_lo =
4473 U64_LO(fp->rx_sge_mapping);
4474 }
4475
a2fbb9ea 4476 context->cstorm_st_context.sb_index_number =
5c862848 4477 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4478 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4479
4480 context->xstorm_ag_context.cdu_reserved =
4481 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4482 CDU_REGION_NUMBER_XCM_AG,
4483 ETH_CONNECTION_TYPE);
4484 context->ustorm_ag_context.cdu_usage =
4485 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4486 CDU_REGION_NUMBER_UCM_AG,
4487 ETH_CONNECTION_TYPE);
4488 }
4489}
4490
4491static void bnx2x_init_ind_table(struct bnx2x *bp)
4492{
34f80b04 4493 int port = BP_PORT(bp);
a2fbb9ea
ET
4494 int i;
4495
4496 if (!is_multi(bp))
4497 return;
4498
34f80b04 4499 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4500 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4501 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4502 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4503 i % bp->num_queues);
4504
4505 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4506}
4507
49d66772
ET
4508static void bnx2x_set_client_config(struct bnx2x *bp)
4509{
49d66772 4510 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4511 int port = BP_PORT(bp);
4512 int i;
49d66772 4513
34f80b04 4514 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4515 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4516 tstorm_client.config_flags =
4517 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4518#ifdef BCM_VLAN
34f80b04 4519 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4520 tstorm_client.config_flags |=
4521 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4522 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4523 }
4524#endif
49d66772 4525
7a9b2557
VZ
4526 if (bp->flags & TPA_ENABLE_FLAG) {
4527 tstorm_client.max_sges_for_packet =
4528 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4529 tstorm_client.max_sges_for_packet =
4530 ((tstorm_client.max_sges_for_packet +
4531 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4532 PAGES_PER_SGE_SHIFT;
4533
4534 tstorm_client.config_flags |=
4535 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4536 }
4537
49d66772
ET
4538 for_each_queue(bp, i) {
4539 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4540 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4541 ((u32 *)&tstorm_client)[0]);
4542 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4543 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4544 ((u32 *)&tstorm_client)[1]);
4545 }
4546
34f80b04
EG
4547 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4548 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4549}
4550
a2fbb9ea
ET
4551static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4552{
a2fbb9ea 4553 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4554 int mode = bp->rx_mode;
4555 int mask = (1 << BP_L_ID(bp));
4556 int func = BP_FUNC(bp);
a2fbb9ea
ET
4557 int i;
4558
4559 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4560
4561 switch (mode) {
4562 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4563 tstorm_mac_filter.ucast_drop_all = mask;
4564 tstorm_mac_filter.mcast_drop_all = mask;
4565 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4566 break;
4567 case BNX2X_RX_MODE_NORMAL:
34f80b04 4568 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4569 break;
4570 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4571 tstorm_mac_filter.mcast_accept_all = mask;
4572 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4573 break;
4574 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4575 tstorm_mac_filter.ucast_accept_all = mask;
4576 tstorm_mac_filter.mcast_accept_all = mask;
4577 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4578 break;
4579 default:
34f80b04
EG
4580 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4581 break;
a2fbb9ea
ET
4582 }
4583
4584 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4585 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4586 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4587 ((u32 *)&tstorm_mac_filter)[i]);
4588
34f80b04 4589/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4590 ((u32 *)&tstorm_mac_filter)[i]); */
4591 }
a2fbb9ea 4592
49d66772
ET
4593 if (mode != BNX2X_RX_MODE_NONE)
4594 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4595}
4596
471de716
EG
4597static void bnx2x_init_internal_common(struct bnx2x *bp)
4598{
4599 int i;
4600
4601 /* Zero this manually as its initialization is
4602 currently missing in the initTool */
4603 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4604 REG_WR(bp, BAR_USTRORM_INTMEM +
4605 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4606}
4607
4608static void bnx2x_init_internal_port(struct bnx2x *bp)
4609{
4610 int port = BP_PORT(bp);
4611
4612 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4613 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4614 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4615 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4616}
4617
4618static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4619{
a2fbb9ea
ET
4620 struct tstorm_eth_function_common_config tstorm_config = {0};
4621 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4622 int port = BP_PORT(bp);
4623 int func = BP_FUNC(bp);
4624 int i;
471de716 4625 u16 max_agg_size;
a2fbb9ea
ET
4626
4627 if (is_multi(bp)) {
4628 tstorm_config.config_flags = MULTI_FLAGS;
4629 tstorm_config.rss_result_mask = MULTI_MASK;
4630 }
4631
34f80b04
EG
4632 tstorm_config.leading_client_id = BP_L_ID(bp);
4633
a2fbb9ea 4634 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4635 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4636 (*(u32 *)&tstorm_config));
4637
c14423fe 4638 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4639 bnx2x_set_storm_rx_mode(bp);
4640
66e855f3
YG
4641 /* reset xstorm per client statistics */
4642 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4643 REG_WR(bp, BAR_XSTRORM_INTMEM +
4644 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4645 i*4, 0);
4646 }
4647 /* reset tstorm per client statistics */
4648 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4649 REG_WR(bp, BAR_TSTRORM_INTMEM +
4650 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4651 i*4, 0);
4652 }
4653
4654 /* Init statistics related context */
34f80b04 4655 stats_flags.collect_eth = 1;
a2fbb9ea 4656
66e855f3 4657 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4658 ((u32 *)&stats_flags)[0]);
66e855f3 4659 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4660 ((u32 *)&stats_flags)[1]);
4661
66e855f3 4662 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4663 ((u32 *)&stats_flags)[0]);
66e855f3 4664 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4665 ((u32 *)&stats_flags)[1]);
4666
66e855f3 4667 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4668 ((u32 *)&stats_flags)[0]);
66e855f3 4669 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4670 ((u32 *)&stats_flags)[1]);
4671
66e855f3
YG
4672 REG_WR(bp, BAR_XSTRORM_INTMEM +
4673 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4674 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4675 REG_WR(bp, BAR_XSTRORM_INTMEM +
4676 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4677 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4678
4679 REG_WR(bp, BAR_TSTRORM_INTMEM +
4680 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682 REG_WR(bp, BAR_TSTRORM_INTMEM +
4683 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4685
4686 if (CHIP_IS_E1H(bp)) {
4687 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4688 IS_E1HMF(bp));
4689 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4690 IS_E1HMF(bp));
4691 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4692 IS_E1HMF(bp));
4693 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4694 IS_E1HMF(bp));
4695
7a9b2557
VZ
4696 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4697 bp->e1hov);
34f80b04
EG
4698 }
4699
471de716
EG
4700 /* Init CQ ring mapping and aggregation size */
4701 max_agg_size = min((u32)(bp->rx_buf_use_size +
4702 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4703 (u32)0xffff);
7a9b2557
VZ
4704 for_each_queue(bp, i) {
4705 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4706
4707 REG_WR(bp, BAR_USTRORM_INTMEM +
4708 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4709 U64_LO(fp->rx_comp_mapping));
4710 REG_WR(bp, BAR_USTRORM_INTMEM +
4711 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4712 U64_HI(fp->rx_comp_mapping));
4713
7a9b2557
VZ
4714 REG_WR16(bp, BAR_USTRORM_INTMEM +
4715 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4716 max_agg_size);
4717 }
a2fbb9ea
ET
4718}
4719
471de716
EG
4720static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4721{
4722 switch (load_code) {
4723 case FW_MSG_CODE_DRV_LOAD_COMMON:
4724 bnx2x_init_internal_common(bp);
4725 /* no break */
4726
4727 case FW_MSG_CODE_DRV_LOAD_PORT:
4728 bnx2x_init_internal_port(bp);
4729 /* no break */
4730
4731 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4732 bnx2x_init_internal_func(bp);
4733 break;
4734
4735 default:
4736 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4737 break;
4738 }
4739}
4740
4741static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4742{
4743 int i;
4744
4745 for_each_queue(bp, i) {
4746 struct bnx2x_fastpath *fp = &bp->fp[i];
4747
34f80b04 4748 fp->bp = bp;
a2fbb9ea 4749 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4750 fp->index = i;
34f80b04
EG
4751 fp->cl_id = BP_L_ID(bp) + i;
4752 fp->sb_id = fp->cl_id;
4753 DP(NETIF_MSG_IFUP,
4754 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4755 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4756 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4757 FP_SB_ID(fp));
4758 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4759 }
4760
5c862848
EG
4761 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4762 DEF_SB_ID);
4763 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4764 bnx2x_update_coalesce(bp);
4765 bnx2x_init_rx_rings(bp);
4766 bnx2x_init_tx_ring(bp);
4767 bnx2x_init_sp_ring(bp);
4768 bnx2x_init_context(bp);
471de716 4769 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4770 bnx2x_init_ind_table(bp);
615f8fd9 4771 bnx2x_int_enable(bp);
a2fbb9ea
ET
4772}
4773
4774/* end of nic init */
4775
4776/*
4777 * gzip service functions
4778 */
4779
4780static int bnx2x_gunzip_init(struct bnx2x *bp)
4781{
4782 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4783 &bp->gunzip_mapping);
4784 if (bp->gunzip_buf == NULL)
4785 goto gunzip_nomem1;
4786
4787 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4788 if (bp->strm == NULL)
4789 goto gunzip_nomem2;
4790
4791 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4792 GFP_KERNEL);
4793 if (bp->strm->workspace == NULL)
4794 goto gunzip_nomem3;
4795
4796 return 0;
4797
4798gunzip_nomem3:
4799 kfree(bp->strm);
4800 bp->strm = NULL;
4801
4802gunzip_nomem2:
4803 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4804 bp->gunzip_mapping);
4805 bp->gunzip_buf = NULL;
4806
4807gunzip_nomem1:
4808 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4809 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4810 return -ENOMEM;
4811}
4812
4813static void bnx2x_gunzip_end(struct bnx2x *bp)
4814{
4815 kfree(bp->strm->workspace);
4816
4817 kfree(bp->strm);
4818 bp->strm = NULL;
4819
4820 if (bp->gunzip_buf) {
4821 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4822 bp->gunzip_mapping);
4823 bp->gunzip_buf = NULL;
4824 }
4825}
4826
4827static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4828{
4829 int n, rc;
4830
4831 /* check gzip header */
4832 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4833 return -EINVAL;
4834
4835 n = 10;
4836
34f80b04 4837#define FNAME 0x8
a2fbb9ea
ET
4838
4839 if (zbuf[3] & FNAME)
4840 while ((zbuf[n++] != 0) && (n < len));
4841
4842 bp->strm->next_in = zbuf + n;
4843 bp->strm->avail_in = len - n;
4844 bp->strm->next_out = bp->gunzip_buf;
4845 bp->strm->avail_out = FW_BUF_SIZE;
4846
4847 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4848 if (rc != Z_OK)
4849 return rc;
4850
4851 rc = zlib_inflate(bp->strm, Z_FINISH);
4852 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4853 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4854 bp->dev->name, bp->strm->msg);
4855
4856 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4857 if (bp->gunzip_outlen & 0x3)
4858 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4859 " gunzip_outlen (%d) not aligned\n",
4860 bp->dev->name, bp->gunzip_outlen);
4861 bp->gunzip_outlen >>= 2;
4862
4863 zlib_inflateEnd(bp->strm);
4864
4865 if (rc == Z_STREAM_END)
4866 return 0;
4867
4868 return rc;
4869}
4870
4871/* nic load/unload */
4872
4873/*
34f80b04 4874 * General service functions
a2fbb9ea
ET
4875 */
4876
4877/* send a NIG loopback debug packet */
4878static void bnx2x_lb_pckt(struct bnx2x *bp)
4879{
a2fbb9ea 4880 u32 wb_write[3];
a2fbb9ea
ET
4881
4882 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4883 wb_write[0] = 0x55555555;
4884 wb_write[1] = 0x55555555;
34f80b04 4885 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4886 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4887
4888 /* NON-IP protocol */
a2fbb9ea
ET
4889 wb_write[0] = 0x09000000;
4890 wb_write[1] = 0x55555555;
34f80b04 4891 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4892 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4893}
4894
4895/* some of the internal memories
4896 * are not directly readable from the driver
4897 * to test them we send debug packets
4898 */
4899static int bnx2x_int_mem_test(struct bnx2x *bp)
4900{
4901 int factor;
4902 int count, i;
4903 u32 val = 0;
4904
ad8d3948 4905 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4906 factor = 120;
ad8d3948
EG
4907 else if (CHIP_REV_IS_EMUL(bp))
4908 factor = 200;
4909 else
a2fbb9ea 4910 factor = 1;
a2fbb9ea
ET
4911
4912 DP(NETIF_MSG_HW, "start part1\n");
4913
4914 /* Disable inputs of parser neighbor blocks */
4915 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4916 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4917 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4918 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4919
4920 /* Write 0 to parser credits for CFC search request */
4921 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4922
4923 /* send Ethernet packet */
4924 bnx2x_lb_pckt(bp);
4925
4926 /* TODO do i reset NIG statistic? */
4927 /* Wait until NIG register shows 1 packet of size 0x10 */
4928 count = 1000 * factor;
4929 while (count) {
34f80b04 4930
a2fbb9ea
ET
4931 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4932 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4933 if (val == 0x10)
4934 break;
4935
4936 msleep(10);
4937 count--;
4938 }
4939 if (val != 0x10) {
4940 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4941 return -1;
4942 }
4943
4944 /* Wait until PRS register shows 1 packet */
4945 count = 1000 * factor;
4946 while (count) {
4947 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4948 if (val == 1)
4949 break;
4950
4951 msleep(10);
4952 count--;
4953 }
4954 if (val != 0x1) {
4955 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4956 return -2;
4957 }
4958
4959 /* Reset and init BRB, PRS */
34f80b04 4960 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4961 msleep(50);
34f80b04 4962 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4963 msleep(50);
4964 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4965 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4966
4967 DP(NETIF_MSG_HW, "part2\n");
4968
4969 /* Disable inputs of parser neighbor blocks */
4970 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4971 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4972 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4973 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4974
4975 /* Write 0 to parser credits for CFC search request */
4976 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4977
4978 /* send 10 Ethernet packets */
4979 for (i = 0; i < 10; i++)
4980 bnx2x_lb_pckt(bp);
4981
4982 /* Wait until NIG register shows 10 + 1
4983 packets of size 11*0x10 = 0xb0 */
4984 count = 1000 * factor;
4985 while (count) {
34f80b04 4986
a2fbb9ea
ET
4987 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4988 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4989 if (val == 0xb0)
4990 break;
4991
4992 msleep(10);
4993 count--;
4994 }
4995 if (val != 0xb0) {
4996 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4997 return -3;
4998 }
4999
5000 /* Wait until PRS register shows 2 packets */
5001 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5002 if (val != 2)
5003 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5004
5005 /* Write 1 to parser credits for CFC search request */
5006 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5007
5008 /* Wait until PRS register shows 3 packets */
5009 msleep(10 * factor);
5010 /* Wait until NIG register shows 1 packet of size 0x10 */
5011 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5012 if (val != 3)
5013 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5014
5015 /* clear NIG EOP FIFO */
5016 for (i = 0; i < 11; i++)
5017 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5018 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5019 if (val != 1) {
5020 BNX2X_ERR("clear of NIG failed\n");
5021 return -4;
5022 }
5023
5024 /* Reset and init BRB, PRS, NIG */
5025 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5026 msleep(50);
5027 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5028 msleep(50);
5029 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5030 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5031#ifndef BCM_ISCSI
5032 /* set NIC mode */
5033 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5034#endif
5035
5036 /* Enable inputs of parser neighbor blocks */
5037 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5038 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5039 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5040 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5041
5042 DP(NETIF_MSG_HW, "done\n");
5043
5044 return 0; /* OK */
5045}
5046
5047static void enable_blocks_attention(struct bnx2x *bp)
5048{
5049 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5050 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5051 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5052 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5053 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5054 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5055 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5056 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5057 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5058/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5059/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5060 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5061 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5062 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5063/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5064/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5065 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5066 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5067 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5068 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5069/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5070/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5071 if (CHIP_REV_IS_FPGA(bp))
5072 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5073 else
5074 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5075 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5076 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5077 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5078/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5079/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5080 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5081 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5082/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5083 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5084}
5085
34f80b04
EG
5086
5087static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5088{
a2fbb9ea 5089 u32 val, i;
a2fbb9ea 5090
34f80b04 5091 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5092
34f80b04
EG
5093 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5094 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5095
34f80b04
EG
5096 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5097 if (CHIP_IS_E1H(bp))
5098 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5099
34f80b04
EG
5100 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5101 msleep(30);
5102 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5103
34f80b04
EG
5104 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5105 if (CHIP_IS_E1(bp)) {
5106 /* enable HW interrupt from PXP on USDM overflow
5107 bit 16 on INT_MASK_0 */
5108 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5109 }
a2fbb9ea 5110
34f80b04
EG
5111 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5112 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5113
5114#ifdef __BIG_ENDIAN
34f80b04
EG
5115 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5116 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5117 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5118 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5119 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5120 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5121
5122/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5123 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5124 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5125 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5126 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5127#endif
5128
5129#ifndef BCM_ISCSI
5130 /* set NIC mode */
5131 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5132#endif
5133
34f80b04 5134 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5135#ifdef BCM_ISCSI
34f80b04
EG
5136 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5137 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5138 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5139#endif
5140
34f80b04
EG
5141 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5142 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5143
34f80b04
EG
5144 /* let the HW do it's magic ... */
5145 msleep(100);
5146 /* finish PXP init */
5147 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5148 if (val != 1) {
5149 BNX2X_ERR("PXP2 CFG failed\n");
5150 return -EBUSY;
5151 }
5152 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5153 if (val != 1) {
5154 BNX2X_ERR("PXP2 RD_INIT failed\n");
5155 return -EBUSY;
5156 }
a2fbb9ea 5157
34f80b04
EG
5158 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5159 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5160
34f80b04 5161 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5162
34f80b04
EG
5163 /* clean the DMAE memory */
5164 bp->dmae_ready = 1;
5165 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5166
34f80b04
EG
5167 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5168 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5169 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5170 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5171
34f80b04
EG
5172 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5173 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5174 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5175 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5176
5177 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5178 /* soft reset pulse */
5179 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5180 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5181
5182#ifdef BCM_ISCSI
34f80b04 5183 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5184#endif
a2fbb9ea 5185
34f80b04
EG
5186 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5187 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5188 if (!CHIP_REV_IS_SLOW(bp)) {
5189 /* enable hw interrupt from doorbell Q */
5190 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5191 }
a2fbb9ea 5192
34f80b04
EG
5193 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5194 if (CHIP_REV_IS_SLOW(bp)) {
5195 /* fix for emulation and FPGA for no pause */
5196 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5197 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5198 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5199 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5200 }
a2fbb9ea 5201
34f80b04
EG
5202 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5203 if (CHIP_IS_E1H(bp))
5204 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5205
34f80b04
EG
5206 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5207 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5208 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5209 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5210
34f80b04
EG
5211 if (CHIP_IS_E1H(bp)) {
5212 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5213 STORM_INTMEM_SIZE_E1H/2);
5214 bnx2x_init_fill(bp,
5215 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5216 0, STORM_INTMEM_SIZE_E1H/2);
5217 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5218 STORM_INTMEM_SIZE_E1H/2);
5219 bnx2x_init_fill(bp,
5220 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5221 0, STORM_INTMEM_SIZE_E1H/2);
5222 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5223 STORM_INTMEM_SIZE_E1H/2);
5224 bnx2x_init_fill(bp,
5225 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5226 0, STORM_INTMEM_SIZE_E1H/2);
5227 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5228 STORM_INTMEM_SIZE_E1H/2);
5229 bnx2x_init_fill(bp,
5230 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231 0, STORM_INTMEM_SIZE_E1H/2);
5232 } else { /* E1 */
ad8d3948
EG
5233 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5234 STORM_INTMEM_SIZE_E1);
5235 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5236 STORM_INTMEM_SIZE_E1);
5237 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5238 STORM_INTMEM_SIZE_E1);
5239 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5240 STORM_INTMEM_SIZE_E1);
34f80b04 5241 }
a2fbb9ea 5242
34f80b04
EG
5243 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5244 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5245 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5246 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5247
34f80b04
EG
5248 /* sync semi rtc */
5249 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5250 0x80000000);
5251 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5252 0x80000000);
a2fbb9ea 5253
34f80b04
EG
5254 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5255 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5256 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5257
34f80b04
EG
5258 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5259 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5260 REG_WR(bp, i, 0xc0cac01a);
5261 /* TODO: replace with something meaningful */
5262 }
5263 if (CHIP_IS_E1H(bp))
5264 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5265 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5266
34f80b04
EG
5267 if (sizeof(union cdu_context) != 1024)
5268 /* we currently assume that a context is 1024 bytes */
5269 printk(KERN_ALERT PFX "please adjust the size of"
5270 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5271
34f80b04
EG
5272 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5273 val = (4 << 24) + (0 << 12) + 1024;
5274 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5275 if (CHIP_IS_E1(bp)) {
5276 /* !!! fix pxp client crdit until excel update */
5277 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5278 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5279 }
a2fbb9ea 5280
34f80b04
EG
5281 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5282 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5283
34f80b04
EG
5284 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5285 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5286
34f80b04
EG
5287 /* PXPCS COMMON comes here */
5288 /* Reset PCIE errors for debug */
5289 REG_WR(bp, 0x2814, 0xffffffff);
5290 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5291
34f80b04
EG
5292 /* EMAC0 COMMON comes here */
5293 /* EMAC1 COMMON comes here */
5294 /* DBU COMMON comes here */
5295 /* DBG COMMON comes here */
5296
5297 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5298 if (CHIP_IS_E1H(bp)) {
5299 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5300 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5301 }
5302
5303 if (CHIP_REV_IS_SLOW(bp))
5304 msleep(200);
5305
5306 /* finish CFC init */
5307 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5308 if (val != 1) {
5309 BNX2X_ERR("CFC LL_INIT failed\n");
5310 return -EBUSY;
5311 }
5312 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5313 if (val != 1) {
5314 BNX2X_ERR("CFC AC_INIT failed\n");
5315 return -EBUSY;
5316 }
5317 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5318 if (val != 1) {
5319 BNX2X_ERR("CFC CAM_INIT failed\n");
5320 return -EBUSY;
5321 }
5322 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5323
34f80b04
EG
5324 /* read NIG statistic
5325 to see if this is our first up since powerup */
5326 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5327 val = *bnx2x_sp(bp, wb_data[0]);
5328
5329 /* do internal memory self test */
5330 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5331 BNX2X_ERR("internal mem self test failed\n");
5332 return -EBUSY;
5333 }
5334
5335 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5336 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5337 /* Fan failure is indicated by SPIO 5 */
5338 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5339 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5340
5341 /* set to active low mode */
5342 val = REG_RD(bp, MISC_REG_SPIO_INT);
5343 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5344 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5345 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5346
34f80b04
EG
5347 /* enable interrupt to signal the IGU */
5348 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5349 val |= (1 << MISC_REGISTERS_SPIO_5);
5350 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5351 break;
f1410647 5352
34f80b04
EG
5353 default:
5354 break;
5355 }
f1410647 5356
34f80b04
EG
5357 /* clear PXP2 attentions */
5358 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5359
34f80b04 5360 enable_blocks_attention(bp);
a2fbb9ea 5361
7a9b2557
VZ
5362 if (bp->flags & TPA_ENABLE_FLAG) {
5363 struct tstorm_eth_tpa_exist tmp = {0};
5364
5365 tmp.tpa_exist = 1;
5366
5367 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5368 ((u32 *)&tmp)[0]);
5369 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5370 ((u32 *)&tmp)[1]);
5371 }
5372
34f80b04
EG
5373 return 0;
5374}
a2fbb9ea 5375
34f80b04
EG
5376static int bnx2x_init_port(struct bnx2x *bp)
5377{
5378 int port = BP_PORT(bp);
5379 u32 val;
a2fbb9ea 5380
34f80b04
EG
5381 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5382
5383 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5384
5385 /* Port PXP comes here */
5386 /* Port PXP2 comes here */
a2fbb9ea
ET
5387#ifdef BCM_ISCSI
5388 /* Port0 1
5389 * Port1 385 */
5390 i++;
5391 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5392 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5393 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5394 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5395
5396 /* Port0 2
5397 * Port1 386 */
5398 i++;
5399 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5400 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5401 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5402 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5403
5404 /* Port0 3
5405 * Port1 387 */
5406 i++;
5407 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5408 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5409 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5410 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5411#endif
34f80b04 5412 /* Port CMs come here */
a2fbb9ea
ET
5413
5414 /* Port QM comes here */
a2fbb9ea
ET
5415#ifdef BCM_ISCSI
5416 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5417 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5418
5419 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5420 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5421#endif
5422 /* Port DQ comes here */
5423 /* Port BRB1 comes here */
ad8d3948 5424 /* Port PRS comes here */
a2fbb9ea
ET
5425 /* Port TSDM comes here */
5426 /* Port CSDM comes here */
5427 /* Port USDM comes here */
5428 /* Port XSDM comes here */
34f80b04
EG
5429 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5430 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5431 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5432 port ? USEM_PORT1_END : USEM_PORT0_END);
5433 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5434 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5435 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5436 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5437 /* Port UPB comes here */
34f80b04
EG
5438 /* Port XPB comes here */
5439
5440 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5441 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5442
5443 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5444 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5445
5446 /* update threshold */
34f80b04 5447 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5448 /* update init credit */
34f80b04 5449 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5450
5451 /* probe changes */
34f80b04 5452 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5453 msleep(5);
34f80b04 5454 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5455
5456#ifdef BCM_ISCSI
5457 /* tell the searcher where the T2 table is */
5458 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5459
5460 wb_write[0] = U64_LO(bp->t2_mapping);
5461 wb_write[1] = U64_HI(bp->t2_mapping);
5462 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5463 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5464 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5465 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5466
5467 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5468 /* Port SRCH comes here */
5469#endif
5470 /* Port CDU comes here */
5471 /* Port CFC comes here */
34f80b04
EG
5472
5473 if (CHIP_IS_E1(bp)) {
5474 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5475 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5476 }
5477 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5478 port ? HC_PORT1_END : HC_PORT0_END);
5479
5480 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5481 MISC_AEU_PORT0_START,
34f80b04
EG
5482 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5483 /* init aeu_mask_attn_func_0/1:
5484 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5485 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5486 * bits 4-7 are used for "per vn group attention" */
5487 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5488 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5489
a2fbb9ea
ET
5490 /* Port PXPCS comes here */
5491 /* Port EMAC0 comes here */
5492 /* Port EMAC1 comes here */
5493 /* Port DBU comes here */
5494 /* Port DBG comes here */
34f80b04
EG
5495 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5496 port ? NIG_PORT1_END : NIG_PORT0_END);
5497
5498 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5499
5500 if (CHIP_IS_E1H(bp)) {
5501 u32 wsum;
5502 struct cmng_struct_per_port m_cmng_port;
5503 int vn;
5504
5505 /* 0x2 disable e1hov, 0x1 enable */
5506 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5507 (IS_E1HMF(bp) ? 0x1 : 0x2));
5508
5509 /* Init RATE SHAPING and FAIRNESS contexts.
5510 Initialize as if there is 10G link. */
5511 wsum = bnx2x_calc_vn_wsum(bp);
5512 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5513 if (IS_E1HMF(bp))
5514 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5515 bnx2x_init_vn_minmax(bp, 2*vn + port,
5516 wsum, 10000, &m_cmng_port);
5517 }
5518
a2fbb9ea
ET
5519 /* Port MCP comes here */
5520 /* Port DMAE comes here */
5521
34f80b04 5522 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
5523 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5524 /* add SPIO 5 to group 0 */
5525 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5526 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5527 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5528 break;
5529
5530 default:
5531 break;
5532 }
5533
c18487ee 5534 bnx2x__link_reset(bp);
a2fbb9ea 5535
34f80b04
EG
5536 return 0;
5537}
5538
5539#define ILT_PER_FUNC (768/2)
5540#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5541/* the phys address is shifted right 12 bits and has an added
5542 1=valid bit added to the 53rd bit
5543 then since this is a wide register(TM)
5544 we split it into two 32 bit writes
5545 */
5546#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5547#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5548#define PXP_ONE_ILT(x) (((x) << 10) | x)
5549#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5550
5551#define CNIC_ILT_LINES 0
5552
5553static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5554{
5555 int reg;
5556
5557 if (CHIP_IS_E1H(bp))
5558 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5559 else /* E1 */
5560 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5561
5562 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5563}
5564
5565static int bnx2x_init_func(struct bnx2x *bp)
5566{
5567 int port = BP_PORT(bp);
5568 int func = BP_FUNC(bp);
5569 int i;
5570
5571 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5572
5573 i = FUNC_ILT_BASE(func);
5574
5575 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5576 if (CHIP_IS_E1H(bp)) {
5577 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5578 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5579 } else /* E1 */
5580 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5581 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5582
5583
5584 if (CHIP_IS_E1H(bp)) {
5585 for (i = 0; i < 9; i++)
5586 bnx2x_init_block(bp,
5587 cm_start[func][i], cm_end[func][i]);
5588
5589 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5590 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5591 }
5592
5593 /* HC init per function */
5594 if (CHIP_IS_E1H(bp)) {
5595 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5596
5597 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5598 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5599 }
5600 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5601
5602 if (CHIP_IS_E1H(bp))
5603 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5604
c14423fe 5605 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5606 REG_WR(bp, 0x2114, 0xffffffff);
5607 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5608
34f80b04
EG
5609 return 0;
5610}
5611
5612static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5613{
5614 int i, rc = 0;
a2fbb9ea 5615
34f80b04
EG
5616 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5617 BP_FUNC(bp), load_code);
a2fbb9ea 5618
34f80b04
EG
5619 bp->dmae_ready = 0;
5620 mutex_init(&bp->dmae_mutex);
5621 bnx2x_gunzip_init(bp);
a2fbb9ea 5622
34f80b04
EG
5623 switch (load_code) {
5624 case FW_MSG_CODE_DRV_LOAD_COMMON:
5625 rc = bnx2x_init_common(bp);
5626 if (rc)
5627 goto init_hw_err;
5628 /* no break */
5629
5630 case FW_MSG_CODE_DRV_LOAD_PORT:
5631 bp->dmae_ready = 1;
5632 rc = bnx2x_init_port(bp);
5633 if (rc)
5634 goto init_hw_err;
5635 /* no break */
5636
5637 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5638 bp->dmae_ready = 1;
5639 rc = bnx2x_init_func(bp);
5640 if (rc)
5641 goto init_hw_err;
5642 break;
5643
5644 default:
5645 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5646 break;
5647 }
5648
5649 if (!BP_NOMCP(bp)) {
5650 int func = BP_FUNC(bp);
a2fbb9ea
ET
5651
5652 bp->fw_drv_pulse_wr_seq =
34f80b04 5653 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5654 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5655 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5656 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5657 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5658 } else
5659 bp->func_stx = 0;
a2fbb9ea 5660
34f80b04
EG
5661 /* this needs to be done before gunzip end */
5662 bnx2x_zero_def_sb(bp);
5663 for_each_queue(bp, i)
5664 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5665
5666init_hw_err:
5667 bnx2x_gunzip_end(bp);
5668
5669 return rc;
a2fbb9ea
ET
5670}
5671
c14423fe 5672/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5673static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5674{
34f80b04 5675 int func = BP_FUNC(bp);
f1410647
ET
5676 u32 seq = ++bp->fw_seq;
5677 u32 rc = 0;
19680c48
EG
5678 u32 cnt = 1;
5679 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5680
34f80b04 5681 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5682 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5683
19680c48
EG
5684 do {
5685 /* let the FW do it's magic ... */
5686 msleep(delay);
a2fbb9ea 5687
19680c48 5688 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5689
19680c48
EG
5690 /* Give the FW up to 2 second (200*10ms) */
5691 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5692
5693 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5694 cnt*delay, rc, seq);
a2fbb9ea
ET
5695
5696 /* is this a reply to our command? */
5697 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5698 rc &= FW_MSG_CODE_MASK;
f1410647 5699
a2fbb9ea
ET
5700 } else {
5701 /* FW BUG! */
5702 BNX2X_ERR("FW failed to respond!\n");
5703 bnx2x_fw_dump(bp);
5704 rc = 0;
5705 }
f1410647 5706
a2fbb9ea
ET
5707 return rc;
5708}
5709
5710static void bnx2x_free_mem(struct bnx2x *bp)
5711{
5712
5713#define BNX2X_PCI_FREE(x, y, size) \
5714 do { \
5715 if (x) { \
5716 pci_free_consistent(bp->pdev, size, x, y); \
5717 x = NULL; \
5718 y = 0; \
5719 } \
5720 } while (0)
5721
5722#define BNX2X_FREE(x) \
5723 do { \
5724 if (x) { \
5725 vfree(x); \
5726 x = NULL; \
5727 } \
5728 } while (0)
5729
5730 int i;
5731
5732 /* fastpath */
5733 for_each_queue(bp, i) {
5734
5735 /* Status blocks */
5736 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5737 bnx2x_fp(bp, i, status_blk_mapping),
5738 sizeof(struct host_status_block) +
5739 sizeof(struct eth_tx_db_data));
5740
5741 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5742 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5743 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5744 bnx2x_fp(bp, i, tx_desc_mapping),
5745 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5746
5747 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5748 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5749 bnx2x_fp(bp, i, rx_desc_mapping),
5750 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5751
5752 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5753 bnx2x_fp(bp, i, rx_comp_mapping),
5754 sizeof(struct eth_fast_path_rx_cqe) *
5755 NUM_RCQ_BD);
a2fbb9ea 5756
7a9b2557 5757 /* SGE ring */
32626230 5758 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5759 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5760 bnx2x_fp(bp, i, rx_sge_mapping),
5761 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5762 }
a2fbb9ea
ET
5763 /* end of fastpath */
5764
5765 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5766 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5767
5768 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5769 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5770
5771#ifdef BCM_ISCSI
5772 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5773 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5774 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5775 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5776#endif
7a9b2557 5777 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5778
5779#undef BNX2X_PCI_FREE
5780#undef BNX2X_KFREE
5781}
5782
5783static int bnx2x_alloc_mem(struct bnx2x *bp)
5784{
5785
5786#define BNX2X_PCI_ALLOC(x, y, size) \
5787 do { \
5788 x = pci_alloc_consistent(bp->pdev, size, y); \
5789 if (x == NULL) \
5790 goto alloc_mem_err; \
5791 memset(x, 0, size); \
5792 } while (0)
5793
5794#define BNX2X_ALLOC(x, size) \
5795 do { \
5796 x = vmalloc(size); \
5797 if (x == NULL) \
5798 goto alloc_mem_err; \
5799 memset(x, 0, size); \
5800 } while (0)
5801
5802 int i;
5803
5804 /* fastpath */
a2fbb9ea
ET
5805 for_each_queue(bp, i) {
5806 bnx2x_fp(bp, i, bp) = bp;
5807
5808 /* Status blocks */
5809 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5810 &bnx2x_fp(bp, i, status_blk_mapping),
5811 sizeof(struct host_status_block) +
5812 sizeof(struct eth_tx_db_data));
5813
5814 bnx2x_fp(bp, i, hw_tx_prods) =
5815 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5816
5817 bnx2x_fp(bp, i, tx_prods_mapping) =
5818 bnx2x_fp(bp, i, status_blk_mapping) +
5819 sizeof(struct host_status_block);
5820
5821 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5822 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5823 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5824 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5825 &bnx2x_fp(bp, i, tx_desc_mapping),
5826 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5827
5828 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5829 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5830 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5831 &bnx2x_fp(bp, i, rx_desc_mapping),
5832 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5833
5834 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5835 &bnx2x_fp(bp, i, rx_comp_mapping),
5836 sizeof(struct eth_fast_path_rx_cqe) *
5837 NUM_RCQ_BD);
5838
7a9b2557
VZ
5839 /* SGE ring */
5840 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5841 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5842 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5843 &bnx2x_fp(bp, i, rx_sge_mapping),
5844 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5845 }
5846 /* end of fastpath */
5847
5848 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5849 sizeof(struct host_def_status_block));
5850
5851 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5852 sizeof(struct bnx2x_slowpath));
5853
5854#ifdef BCM_ISCSI
5855 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5856
5857 /* Initialize T1 */
5858 for (i = 0; i < 64*1024; i += 64) {
5859 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5860 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5861 }
5862
5863 /* allocate searcher T2 table
5864 we allocate 1/4 of alloc num for T2
5865 (which is not entered into the ILT) */
5866 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5867
5868 /* Initialize T2 */
5869 for (i = 0; i < 16*1024; i += 64)
5870 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5871
c14423fe 5872 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5873 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5874
5875 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5876 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5877
5878 /* QM queues (128*MAX_CONN) */
5879 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5880#endif
5881
5882 /* Slow path ring */
5883 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5884
5885 return 0;
5886
5887alloc_mem_err:
5888 bnx2x_free_mem(bp);
5889 return -ENOMEM;
5890
5891#undef BNX2X_PCI_ALLOC
5892#undef BNX2X_ALLOC
5893}
5894
5895static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5896{
5897 int i;
5898
5899 for_each_queue(bp, i) {
5900 struct bnx2x_fastpath *fp = &bp->fp[i];
5901
5902 u16 bd_cons = fp->tx_bd_cons;
5903 u16 sw_prod = fp->tx_pkt_prod;
5904 u16 sw_cons = fp->tx_pkt_cons;
5905
a2fbb9ea
ET
5906 while (sw_cons != sw_prod) {
5907 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5908 sw_cons++;
5909 }
5910 }
5911}
5912
5913static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5914{
5915 int i, j;
5916
5917 for_each_queue(bp, j) {
5918 struct bnx2x_fastpath *fp = &bp->fp[j];
5919
a2fbb9ea
ET
5920 for (i = 0; i < NUM_RX_BD; i++) {
5921 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5922 struct sk_buff *skb = rx_buf->skb;
5923
5924 if (skb == NULL)
5925 continue;
5926
5927 pci_unmap_single(bp->pdev,
5928 pci_unmap_addr(rx_buf, mapping),
5929 bp->rx_buf_use_size,
5930 PCI_DMA_FROMDEVICE);
5931
5932 rx_buf->skb = NULL;
5933 dev_kfree_skb(skb);
5934 }
7a9b2557 5935 if (!fp->disable_tpa)
32626230
EG
5936 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5937 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5938 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5939 }
5940}
5941
5942static void bnx2x_free_skbs(struct bnx2x *bp)
5943{
5944 bnx2x_free_tx_skbs(bp);
5945 bnx2x_free_rx_skbs(bp);
5946}
5947
5948static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5949{
34f80b04 5950 int i, offset = 1;
a2fbb9ea
ET
5951
5952 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5953 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5954 bp->msix_table[0].vector);
5955
5956 for_each_queue(bp, i) {
c14423fe 5957 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5958 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5959 bnx2x_fp(bp, i, state));
5960
228241eb
ET
5961 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5962 BNX2X_ERR("IRQ of fp #%d being freed while "
5963 "state != closed\n", i);
a2fbb9ea 5964
34f80b04 5965 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5966 }
a2fbb9ea
ET
5967}
5968
5969static void bnx2x_free_irq(struct bnx2x *bp)
5970{
a2fbb9ea 5971 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5972 bnx2x_free_msix_irqs(bp);
5973 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5974 bp->flags &= ~USING_MSIX_FLAG;
5975
5976 } else
5977 free_irq(bp->pdev->irq, bp->dev);
5978}
5979
5980static int bnx2x_enable_msix(struct bnx2x *bp)
5981{
34f80b04 5982 int i, rc, offset;
a2fbb9ea
ET
5983
5984 bp->msix_table[0].entry = 0;
34f80b04
EG
5985 offset = 1;
5986 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 5987
34f80b04
EG
5988 for_each_queue(bp, i) {
5989 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 5990
34f80b04
EG
5991 bp->msix_table[i + offset].entry = igu_vec;
5992 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5993 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
5994 }
5995
34f80b04
EG
5996 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5997 bp->num_queues + offset);
5998 if (rc) {
5999 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6000 return -1;
6001 }
a2fbb9ea
ET
6002 bp->flags |= USING_MSIX_FLAG;
6003
6004 return 0;
a2fbb9ea
ET
6005}
6006
a2fbb9ea
ET
6007static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6008{
34f80b04 6009 int i, rc, offset = 1;
a2fbb9ea 6010
a2fbb9ea
ET
6011 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6012 bp->dev->name, bp->dev);
a2fbb9ea
ET
6013 if (rc) {
6014 BNX2X_ERR("request sp irq failed\n");
6015 return -EBUSY;
6016 }
6017
6018 for_each_queue(bp, i) {
34f80b04 6019 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6020 bnx2x_msix_fp_int, 0,
6021 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6022 if (rc) {
34f80b04
EG
6023 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6024 i + offset, rc);
a2fbb9ea
ET
6025 bnx2x_free_msix_irqs(bp);
6026 return -EBUSY;
6027 }
6028
6029 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6030 }
6031
6032 return 0;
a2fbb9ea
ET
6033}
6034
6035static int bnx2x_req_irq(struct bnx2x *bp)
6036{
34f80b04 6037 int rc;
a2fbb9ea 6038
34f80b04
EG
6039 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6040 bp->dev->name, bp->dev);
a2fbb9ea
ET
6041 if (!rc)
6042 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6043
6044 return rc;
a2fbb9ea
ET
6045}
6046
6047/*
6048 * Init service functions
6049 */
6050
3101c2bc 6051static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6052{
6053 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6054 int port = BP_PORT(bp);
a2fbb9ea
ET
6055
6056 /* CAM allocation
6057 * unicasts 0-31:port0 32-63:port1
6058 * multicast 64-127:port0 128-191:port1
6059 */
6060 config->hdr.length_6b = 2;
34f80b04
EG
6061 config->hdr.offset = port ? 31 : 0;
6062 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6063 config->hdr.reserved1 = 0;
6064
6065 /* primary MAC */
6066 config->config_table[0].cam_entry.msb_mac_addr =
6067 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6068 config->config_table[0].cam_entry.middle_mac_addr =
6069 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6070 config->config_table[0].cam_entry.lsb_mac_addr =
6071 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6072 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6073 if (set)
6074 config->config_table[0].target_table_entry.flags = 0;
6075 else
6076 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6077 config->config_table[0].target_table_entry.client_id = 0;
6078 config->config_table[0].target_table_entry.vlan_id = 0;
6079
3101c2bc
YG
6080 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6081 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6082 config->config_table[0].cam_entry.msb_mac_addr,
6083 config->config_table[0].cam_entry.middle_mac_addr,
6084 config->config_table[0].cam_entry.lsb_mac_addr);
6085
6086 /* broadcast */
6087 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6088 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6089 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6090 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6091 if (set)
6092 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6093 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6094 else
6095 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6096 config->config_table[1].target_table_entry.client_id = 0;
6097 config->config_table[1].target_table_entry.vlan_id = 0;
6098
6099 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6100 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6101 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6102}
6103
3101c2bc 6104static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6105{
6106 struct mac_configuration_cmd_e1h *config =
6107 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6108
3101c2bc 6109 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6110 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6111 return;
6112 }
6113
6114 /* CAM allocation for E1H
6115 * unicasts: by func number
6116 * multicast: 20+FUNC*20, 20 each
6117 */
6118 config->hdr.length_6b = 1;
6119 config->hdr.offset = BP_FUNC(bp);
6120 config->hdr.client_id = BP_CL_ID(bp);
6121 config->hdr.reserved1 = 0;
6122
6123 /* primary MAC */
6124 config->config_table[0].msb_mac_addr =
6125 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6126 config->config_table[0].middle_mac_addr =
6127 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6128 config->config_table[0].lsb_mac_addr =
6129 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6130 config->config_table[0].client_id = BP_L_ID(bp);
6131 config->config_table[0].vlan_id = 0;
6132 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6133 if (set)
6134 config->config_table[0].flags = BP_PORT(bp);
6135 else
6136 config->config_table[0].flags =
6137 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6138
3101c2bc
YG
6139 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6140 (set ? "setting" : "clearing"),
34f80b04
EG
6141 config->config_table[0].msb_mac_addr,
6142 config->config_table[0].middle_mac_addr,
6143 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6144
6145 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6146 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6147 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6148}
6149
a2fbb9ea
ET
6150static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6151 int *state_p, int poll)
6152{
6153 /* can take a while if any port is running */
34f80b04 6154 int cnt = 500;
a2fbb9ea 6155
c14423fe
ET
6156 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6157 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6158
6159 might_sleep();
34f80b04 6160 while (cnt--) {
a2fbb9ea
ET
6161 if (poll) {
6162 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6163 /* if index is different from 0
6164 * the reply for some commands will
3101c2bc 6165 * be on the non default queue
a2fbb9ea
ET
6166 */
6167 if (idx)
6168 bnx2x_rx_int(&bp->fp[idx], 10);
6169 }
a2fbb9ea 6170
3101c2bc 6171 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6172 if (*state_p == state)
a2fbb9ea
ET
6173 return 0;
6174
a2fbb9ea 6175 msleep(1);
a2fbb9ea
ET
6176 }
6177
a2fbb9ea 6178 /* timeout! */
49d66772
ET
6179 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6180 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6181#ifdef BNX2X_STOP_ON_ERROR
6182 bnx2x_panic();
6183#endif
a2fbb9ea 6184
49d66772 6185 return -EBUSY;
a2fbb9ea
ET
6186}
6187
6188static int bnx2x_setup_leading(struct bnx2x *bp)
6189{
34f80b04 6190 int rc;
a2fbb9ea 6191
c14423fe 6192 /* reset IGU state */
34f80b04 6193 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6194
6195 /* SETUP ramrod */
6196 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6197
34f80b04
EG
6198 /* Wait for completion */
6199 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6200
34f80b04 6201 return rc;
a2fbb9ea
ET
6202}
6203
6204static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6205{
a2fbb9ea 6206 /* reset IGU state */
34f80b04 6207 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6208
228241eb 6209 /* SETUP ramrod */
a2fbb9ea
ET
6210 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6211 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6212
6213 /* Wait for completion */
6214 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6215 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6216}
6217
a2fbb9ea
ET
6218static int bnx2x_poll(struct napi_struct *napi, int budget);
6219static void bnx2x_set_rx_mode(struct net_device *dev);
6220
34f80b04
EG
6221/* must be called with rtnl_lock */
6222static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6223{
228241eb 6224 u32 load_code;
34f80b04
EG
6225 int i, rc;
6226
6227#ifdef BNX2X_STOP_ON_ERROR
6228 if (unlikely(bp->panic))
6229 return -EPERM;
6230#endif
a2fbb9ea
ET
6231
6232 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6233
34f80b04
EG
6234 /* Send LOAD_REQUEST command to MCP
6235 Returns the type of LOAD command:
6236 if it is the first port to be initialized
6237 common blocks should be initialized, otherwise - not
a2fbb9ea 6238 */
34f80b04 6239 if (!BP_NOMCP(bp)) {
228241eb
ET
6240 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6241 if (!load_code) {
da5a662a 6242 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6243 return -EBUSY;
6244 }
34f80b04 6245 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6246 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6247
a2fbb9ea 6248 } else {
da5a662a
VZ
6249 int port = BP_PORT(bp);
6250
34f80b04
EG
6251 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6252 load_count[0], load_count[1], load_count[2]);
6253 load_count[0]++;
da5a662a 6254 load_count[1 + port]++;
34f80b04
EG
6255 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6256 load_count[0], load_count[1], load_count[2]);
6257 if (load_count[0] == 1)
6258 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6259 else if (load_count[1 + port] == 1)
34f80b04
EG
6260 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6261 else
6262 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6263 }
6264
34f80b04
EG
6265 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6266 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6267 bp->port.pmf = 1;
6268 else
6269 bp->port.pmf = 0;
6270 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6271
6272 /* if we can't use MSI-X we only need one fp,
6273 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6274 * and fallback to inta with one fp
6275 */
34f80b04
EG
6276 if (use_inta) {
6277 bp->num_queues = 1;
6278
6279 } else {
6280 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6281 /* user requested number */
6282 bp->num_queues = use_multi;
6283
6284 else if (use_multi)
6285 bp->num_queues = min_t(u32, num_online_cpus(),
6286 BP_MAX_QUEUES(bp));
6287 else
a2fbb9ea 6288 bp->num_queues = 1;
34f80b04
EG
6289
6290 if (bnx2x_enable_msix(bp)) {
6291 /* failed to enable MSI-X */
6292 bp->num_queues = 1;
6293 if (use_multi)
6294 BNX2X_ERR("Multi requested but failed"
6295 " to enable MSI-X\n");
a2fbb9ea
ET
6296 }
6297 }
34f80b04
EG
6298 DP(NETIF_MSG_IFUP,
6299 "set number of queues to %d\n", bp->num_queues);
c14423fe 6300
a2fbb9ea
ET
6301 if (bnx2x_alloc_mem(bp))
6302 return -ENOMEM;
6303
7a9b2557
VZ
6304 for_each_queue(bp, i)
6305 bnx2x_fp(bp, i, disable_tpa) =
6306 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6307
34f80b04
EG
6308 if (bp->flags & USING_MSIX_FLAG) {
6309 rc = bnx2x_req_msix_irqs(bp);
6310 if (rc) {
6311 pci_disable_msix(bp->pdev);
6312 goto load_error;
6313 }
6314 } else {
6315 bnx2x_ack_int(bp);
6316 rc = bnx2x_req_irq(bp);
6317 if (rc) {
6318 BNX2X_ERR("IRQ request failed, aborting\n");
6319 goto load_error;
a2fbb9ea
ET
6320 }
6321 }
6322
6323 for_each_queue(bp, i)
6324 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6325 bnx2x_poll, 128);
6326
a2fbb9ea 6327 /* Initialize HW */
34f80b04
EG
6328 rc = bnx2x_init_hw(bp, load_code);
6329 if (rc) {
a2fbb9ea 6330 BNX2X_ERR("HW init failed, aborting\n");
228241eb 6331 goto load_error;
a2fbb9ea
ET
6332 }
6333
a2fbb9ea 6334 /* Setup NIC internals and enable interrupts */
471de716 6335 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6336
6337 /* Send LOAD_DONE command to MCP */
34f80b04 6338 if (!BP_NOMCP(bp)) {
228241eb
ET
6339 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6340 if (!load_code) {
da5a662a 6341 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6342 rc = -EBUSY;
228241eb 6343 goto load_int_disable;
a2fbb9ea
ET
6344 }
6345 }
6346
bb2a0f7a
YG
6347 bnx2x_stats_init(bp);
6348
a2fbb9ea
ET
6349 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6350
6351 /* Enable Rx interrupt handling before sending the ramrod
6352 as it's completed on Rx FP queue */
6353 for_each_queue(bp, i)
6354 napi_enable(&bnx2x_fp(bp, i, napi));
6355
da5a662a
VZ
6356 /* Enable interrupt handling */
6357 atomic_set(&bp->intr_sem, 0);
6358
34f80b04
EG
6359 rc = bnx2x_setup_leading(bp);
6360 if (rc) {
da5a662a 6361 BNX2X_ERR("Setup leading failed!\n");
228241eb 6362 goto load_stop_netif;
34f80b04 6363 }
a2fbb9ea 6364
34f80b04
EG
6365 if (CHIP_IS_E1H(bp))
6366 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6367 BNX2X_ERR("!!! mf_cfg function disabled\n");
6368 bp->state = BNX2X_STATE_DISABLED;
6369 }
a2fbb9ea 6370
34f80b04
EG
6371 if (bp->state == BNX2X_STATE_OPEN)
6372 for_each_nondefault_queue(bp, i) {
6373 rc = bnx2x_setup_multi(bp, i);
6374 if (rc)
6375 goto load_stop_netif;
6376 }
a2fbb9ea 6377
34f80b04 6378 if (CHIP_IS_E1(bp))
3101c2bc 6379 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6380 else
3101c2bc 6381 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6382
6383 if (bp->port.pmf)
6384 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6385
6386 /* Start fast path */
34f80b04
EG
6387 switch (load_mode) {
6388 case LOAD_NORMAL:
6389 /* Tx queue should be only reenabled */
6390 netif_wake_queue(bp->dev);
6391 bnx2x_set_rx_mode(bp->dev);
6392 break;
6393
6394 case LOAD_OPEN:
a2fbb9ea 6395 netif_start_queue(bp->dev);
34f80b04 6396 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6397 if (bp->flags & USING_MSIX_FLAG)
6398 printk(KERN_INFO PFX "%s: using MSI-X\n",
6399 bp->dev->name);
34f80b04 6400 break;
a2fbb9ea 6401
34f80b04 6402 case LOAD_DIAG:
a2fbb9ea 6403 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6404 bp->state = BNX2X_STATE_DIAG;
6405 break;
6406
6407 default:
6408 break;
a2fbb9ea
ET
6409 }
6410
34f80b04
EG
6411 if (!bp->port.pmf)
6412 bnx2x__link_status_update(bp);
6413
a2fbb9ea
ET
6414 /* start the timer */
6415 mod_timer(&bp->timer, jiffies + bp->current_interval);
6416
34f80b04 6417
a2fbb9ea
ET
6418 return 0;
6419
228241eb 6420load_stop_netif:
a2fbb9ea
ET
6421 for_each_queue(bp, i)
6422 napi_disable(&bnx2x_fp(bp, i, napi));
6423
228241eb 6424load_int_disable:
615f8fd9 6425 bnx2x_int_disable_sync(bp);
a2fbb9ea 6426
34f80b04 6427 /* Release IRQs */
a2fbb9ea
ET
6428 bnx2x_free_irq(bp);
6429
7a9b2557
VZ
6430 /* Free SKBs, SGEs, TPA pool and driver internals */
6431 bnx2x_free_skbs(bp);
6432 for_each_queue(bp, i)
6433 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6434 RX_SGE_CNT*NUM_RX_SGE_PAGES);
228241eb 6435load_error:
a2fbb9ea
ET
6436 bnx2x_free_mem(bp);
6437
6438 /* TBD we really need to reset the chip
6439 if we want to recover from this */
34f80b04 6440 return rc;
a2fbb9ea
ET
6441}
6442
6443static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6444{
a2fbb9ea
ET
6445 int rc;
6446
c14423fe 6447 /* halt the connection */
a2fbb9ea
ET
6448 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6449 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6450
34f80b04 6451 /* Wait for completion */
a2fbb9ea 6452 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6453 &(bp->fp[index].state), 1);
c14423fe 6454 if (rc) /* timeout */
a2fbb9ea
ET
6455 return rc;
6456
6457 /* delete cfc entry */
6458 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6459
34f80b04
EG
6460 /* Wait for completion */
6461 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6462 &(bp->fp[index].state), 1);
6463 return rc;
a2fbb9ea
ET
6464}
6465
da5a662a 6466static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6467{
49d66772 6468 u16 dsb_sp_prod_idx;
c14423fe 6469 /* if the other port is handling traffic,
a2fbb9ea 6470 this can take a lot of time */
34f80b04
EG
6471 int cnt = 500;
6472 int rc;
a2fbb9ea
ET
6473
6474 might_sleep();
6475
6476 /* Send HALT ramrod */
6477 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6478 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6479
34f80b04
EG
6480 /* Wait for completion */
6481 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6482 &(bp->fp[0].state), 1);
6483 if (rc) /* timeout */
da5a662a 6484 return rc;
a2fbb9ea 6485
49d66772 6486 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6487
228241eb 6488 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6489 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6490
49d66772 6491 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6492 we are going to reset the chip anyway
6493 so there is not much to do if this times out
6494 */
34f80b04 6495 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6496 if (!cnt) {
6497 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6498 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6499 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6500#ifdef BNX2X_STOP_ON_ERROR
6501 bnx2x_panic();
da5a662a
VZ
6502#else
6503 rc = -EBUSY;
34f80b04
EG
6504#endif
6505 break;
6506 }
6507 cnt--;
da5a662a 6508 msleep(1);
49d66772
ET
6509 }
6510 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6511 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6512
6513 return rc;
a2fbb9ea
ET
6514}
6515
34f80b04
EG
6516static void bnx2x_reset_func(struct bnx2x *bp)
6517{
6518 int port = BP_PORT(bp);
6519 int func = BP_FUNC(bp);
6520 int base, i;
6521
6522 /* Configure IGU */
6523 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6524 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6525
6526 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6527
6528 /* Clear ILT */
6529 base = FUNC_ILT_BASE(func);
6530 for (i = base; i < base + ILT_PER_FUNC; i++)
6531 bnx2x_ilt_wr(bp, i, 0);
6532}
6533
6534static void bnx2x_reset_port(struct bnx2x *bp)
6535{
6536 int port = BP_PORT(bp);
6537 u32 val;
6538
6539 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6540
6541 /* Do not rcv packets to BRB */
6542 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6543 /* Do not direct rcv packets that are not for MCP to the BRB */
6544 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6545 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6546
6547 /* Configure AEU */
6548 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6549
6550 msleep(100);
6551 /* Check for BRB port occupancy */
6552 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6553 if (val)
6554 DP(NETIF_MSG_IFDOWN,
6555 "BRB1 is not empty %d blooks are occupied\n", val);
6556
6557 /* TODO: Close Doorbell port? */
6558}
6559
6560static void bnx2x_reset_common(struct bnx2x *bp)
6561{
6562 /* reset_common */
6563 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6564 0xd3ffff7f);
6565 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6566}
6567
6568static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6569{
6570 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6571 BP_FUNC(bp), reset_code);
6572
6573 switch (reset_code) {
6574 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6575 bnx2x_reset_port(bp);
6576 bnx2x_reset_func(bp);
6577 bnx2x_reset_common(bp);
6578 break;
6579
6580 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6581 bnx2x_reset_port(bp);
6582 bnx2x_reset_func(bp);
6583 break;
6584
6585 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6586 bnx2x_reset_func(bp);
6587 break;
49d66772 6588
34f80b04
EG
6589 default:
6590 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6591 break;
6592 }
6593}
6594
6595/* msut be called with rtnl_lock */
6596static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6597{
da5a662a 6598 int port = BP_PORT(bp);
a2fbb9ea 6599 u32 reset_code = 0;
da5a662a 6600 int i, cnt, rc;
a2fbb9ea
ET
6601
6602 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6603
228241eb
ET
6604 bp->rx_mode = BNX2X_RX_MODE_NONE;
6605 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6606
228241eb
ET
6607 if (netif_running(bp->dev)) {
6608 netif_tx_disable(bp->dev);
6609 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6610 }
6611
34f80b04
EG
6612 del_timer_sync(&bp->timer);
6613 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6614 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6615 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6616
da5a662a 6617 /* Wait until tx fast path tasks complete */
228241eb
ET
6618 for_each_queue(bp, i) {
6619 struct bnx2x_fastpath *fp = &bp->fp[i];
6620
34f80b04
EG
6621 cnt = 1000;
6622 smp_rmb();
da5a662a
VZ
6623 while (BNX2X_HAS_TX_WORK(fp)) {
6624
6625 if (!netif_running(bp->dev))
6626 bnx2x_tx_int(fp, 1000);
6627
34f80b04
EG
6628 if (!cnt) {
6629 BNX2X_ERR("timeout waiting for queue[%d]\n",
6630 i);
6631#ifdef BNX2X_STOP_ON_ERROR
6632 bnx2x_panic();
6633 return -EBUSY;
6634#else
6635 break;
6636#endif
6637 }
6638 cnt--;
da5a662a 6639 msleep(1);
34f80b04
EG
6640 smp_rmb();
6641 }
228241eb 6642 }
a2fbb9ea 6643
da5a662a
VZ
6644 /* Give HW time to discard old tx messages */
6645 msleep(1);
a2fbb9ea 6646
228241eb
ET
6647 for_each_queue(bp, i)
6648 napi_disable(&bnx2x_fp(bp, i, napi));
6649 /* Disable interrupts after Tx and Rx are disabled on stack level */
6650 bnx2x_int_disable_sync(bp);
a2fbb9ea 6651
34f80b04
EG
6652 /* Release IRQs */
6653 bnx2x_free_irq(bp);
6654
da5a662a
VZ
6655 if (unload_mode == UNLOAD_NORMAL)
6656 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6657
6658 else if (bp->flags & NO_WOL_FLAG) {
a2fbb9ea 6659 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
da5a662a
VZ
6660 if (CHIP_IS_E1H(bp))
6661 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
228241eb 6662
da5a662a
VZ
6663 } else if (bp->wol) {
6664 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6665 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6666 u32 val;
34f80b04
EG
6667 /* The mac address is written to entries 1-4 to
6668 preserve entry 0 which is used by the PMF */
da5a662a
VZ
6669 u8 entry = (BP_E1HVN(bp) + 1)*8;
6670
a2fbb9ea 6671 val = (mac_addr[0] << 8) | mac_addr[1];
da5a662a 6672 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
a2fbb9ea
ET
6673
6674 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6675 (mac_addr[4] << 8) | mac_addr[5];
da5a662a 6676 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
a2fbb9ea
ET
6677
6678 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6679
a2fbb9ea
ET
6680 } else
6681 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6682
3101c2bc
YG
6683 if (CHIP_IS_E1(bp)) {
6684 struct mac_configuration_cmd *config =
6685 bnx2x_sp(bp, mcast_config);
6686
6687 bnx2x_set_mac_addr_e1(bp, 0);
6688
6689 for (i = 0; i < config->hdr.length_6b; i++)
6690 CAM_INVALIDATE(config->config_table[i]);
6691
6692 config->hdr.length_6b = i;
6693 if (CHIP_REV_IS_SLOW(bp))
6694 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6695 else
6696 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6697 config->hdr.client_id = BP_CL_ID(bp);
6698 config->hdr.reserved1 = 0;
6699
6700 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6701 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6702 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6703
6704 } else { /* E1H */
6705 bnx2x_set_mac_addr_e1h(bp, 0);
6706
6707 for (i = 0; i < MC_HASH_SIZE; i++)
6708 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6709 }
6710
da5a662a
VZ
6711 if (CHIP_IS_E1H(bp))
6712 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6713
34f80b04
EG
6714 /* Close multi and leading connections
6715 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6716 for_each_nondefault_queue(bp, i)
6717 if (bnx2x_stop_multi(bp, i))
228241eb 6718 goto unload_error;
a2fbb9ea 6719
da5a662a
VZ
6720 rc = bnx2x_stop_leading(bp);
6721 if (rc) {
34f80b04 6722 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6723#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6724 return -EBUSY;
da5a662a
VZ
6725#else
6726 goto unload_error;
34f80b04 6727#endif
228241eb
ET
6728 }
6729
6730unload_error:
34f80b04 6731 if (!BP_NOMCP(bp))
228241eb 6732 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6733 else {
6734 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6735 load_count[0], load_count[1], load_count[2]);
6736 load_count[0]--;
da5a662a 6737 load_count[1 + port]--;
34f80b04
EG
6738 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6739 load_count[0], load_count[1], load_count[2]);
6740 if (load_count[0] == 0)
6741 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6742 else if (load_count[1 + port] == 0)
34f80b04
EG
6743 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6744 else
6745 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6746 }
a2fbb9ea 6747
34f80b04
EG
6748 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6749 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6750 bnx2x__link_reset(bp);
a2fbb9ea
ET
6751
6752 /* Reset the chip */
228241eb 6753 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6754
6755 /* Report UNLOAD_DONE to MCP */
34f80b04 6756 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6757 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6758
7a9b2557 6759 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6760 bnx2x_free_skbs(bp);
7a9b2557
VZ
6761 for_each_queue(bp, i)
6762 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6763 RX_SGE_CNT*NUM_RX_SGE_PAGES);
a2fbb9ea
ET
6764 bnx2x_free_mem(bp);
6765
6766 bp->state = BNX2X_STATE_CLOSED;
228241eb 6767
a2fbb9ea
ET
6768 netif_carrier_off(bp->dev);
6769
6770 return 0;
6771}
6772
34f80b04
EG
6773static void bnx2x_reset_task(struct work_struct *work)
6774{
6775 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6776
6777#ifdef BNX2X_STOP_ON_ERROR
6778 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6779 " so reset not done to allow debug dump,\n"
6780 KERN_ERR " you will need to reboot when done\n");
6781 return;
6782#endif
6783
6784 rtnl_lock();
6785
6786 if (!netif_running(bp->dev))
6787 goto reset_task_exit;
6788
6789 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6790 bnx2x_nic_load(bp, LOAD_NORMAL);
6791
6792reset_task_exit:
6793 rtnl_unlock();
6794}
6795
a2fbb9ea
ET
6796/* end of nic load/unload */
6797
6798/* ethtool_ops */
6799
6800/*
6801 * Init service functions
6802 */
6803
34f80b04
EG
6804static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6805{
6806 u32 val;
6807
6808 /* Check if there is any driver already loaded */
6809 val = REG_RD(bp, MISC_REG_UNPREPARED);
6810 if (val == 0x1) {
6811 /* Check if it is the UNDI driver
6812 * UNDI driver initializes CID offset for normal bell to 0x7
6813 */
4a37fb66 6814 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6815 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6816 if (val == 0x7) {
6817 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6818 /* save our func */
34f80b04 6819 int func = BP_FUNC(bp);
da5a662a
VZ
6820 u32 swap_en;
6821 u32 swap_val;
34f80b04
EG
6822
6823 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6824
6825 /* try unload UNDI on port 0 */
6826 bp->func = 0;
da5a662a
VZ
6827 bp->fw_seq =
6828 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6829 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6830 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6831
6832 /* if UNDI is loaded on the other port */
6833 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6834
da5a662a
VZ
6835 /* send "DONE" for previous unload */
6836 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6837
6838 /* unload UNDI on port 1 */
34f80b04 6839 bp->func = 1;
da5a662a
VZ
6840 bp->fw_seq =
6841 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6842 DRV_MSG_SEQ_NUMBER_MASK);
6843 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6844
6845 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6846 }
6847
da5a662a
VZ
6848 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6849 HC_REG_CONFIG_0), 0x1000);
6850
6851 /* close input traffic and wait for it */
6852 /* Do not rcv packets to BRB */
6853 REG_WR(bp,
6854 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6855 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6856 /* Do not direct rcv packets that are not for MCP to
6857 * the BRB */
6858 REG_WR(bp,
6859 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6860 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6861 /* clear AEU */
6862 REG_WR(bp,
6863 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6864 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6865 msleep(10);
6866
6867 /* save NIG port swap info */
6868 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6869 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6870 /* reset device */
6871 REG_WR(bp,
6872 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6873 0xd3ffffff);
34f80b04
EG
6874 REG_WR(bp,
6875 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6876 0x1403);
da5a662a
VZ
6877 /* take the NIG out of reset and restore swap values */
6878 REG_WR(bp,
6879 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6880 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6881 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6882 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6883
6884 /* send unload done to the MCP */
6885 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6886
6887 /* restore our func and fw_seq */
6888 bp->func = func;
6889 bp->fw_seq =
6890 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6891 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6892 }
4a37fb66 6893 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6894 }
6895}
6896
6897static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6898{
6899 u32 val, val2, val3, val4, id;
72ce58c3 6900 u16 pmc;
34f80b04
EG
6901
6902 /* Get the chip revision id and number. */
6903 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6904 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6905 id = ((val & 0xffff) << 16);
6906 val = REG_RD(bp, MISC_REG_CHIP_REV);
6907 id |= ((val & 0xf) << 12);
6908 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6909 id |= ((val & 0xff) << 4);
6910 REG_RD(bp, MISC_REG_BOND_ID);
6911 id |= (val & 0xf);
6912 bp->common.chip_id = id;
6913 bp->link_params.chip_id = bp->common.chip_id;
6914 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6915
6916 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6917 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6918 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6919 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6920 bp->common.flash_size, bp->common.flash_size);
6921
6922 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6923 bp->link_params.shmem_base = bp->common.shmem_base;
6924 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6925
6926 if (!bp->common.shmem_base ||
6927 (bp->common.shmem_base < 0xA0000) ||
6928 (bp->common.shmem_base >= 0xC0000)) {
6929 BNX2X_DEV_INFO("MCP not active\n");
6930 bp->flags |= NO_MCP_FLAG;
6931 return;
6932 }
6933
6934 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6935 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6936 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6937 BNX2X_ERR("BAD MCP validity signature\n");
6938
6939 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6940 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6941
6942 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6943 bp->common.hw_config, bp->common.board);
6944
6945 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6946 SHARED_HW_CFG_LED_MODE_MASK) >>
6947 SHARED_HW_CFG_LED_MODE_SHIFT);
6948
6949 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6950 bp->common.bc_ver = val;
6951 BNX2X_DEV_INFO("bc_ver %X\n", val);
6952 if (val < BNX2X_BC_VER) {
6953 /* for now only warn
6954 * later we might need to enforce this */
6955 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6956 " please upgrade BC\n", BNX2X_BC_VER, val);
6957 }
72ce58c3
EG
6958
6959 if (BP_E1HVN(bp) == 0) {
6960 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6961 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6962 } else {
6963 /* no WOL capability for E1HVN != 0 */
6964 bp->flags |= NO_WOL_FLAG;
6965 }
6966 BNX2X_DEV_INFO("%sWoL capable\n",
6967 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
6968
6969 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6970 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6971 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6972 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6973
6974 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6975 val, val2, val3, val4);
6976}
6977
6978static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6979 u32 switch_cfg)
a2fbb9ea 6980{
34f80b04 6981 int port = BP_PORT(bp);
a2fbb9ea
ET
6982 u32 ext_phy_type;
6983
a2fbb9ea
ET
6984 switch (switch_cfg) {
6985 case SWITCH_CFG_1G:
6986 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6987
c18487ee
YR
6988 ext_phy_type =
6989 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6990 switch (ext_phy_type) {
6991 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6992 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6993 ext_phy_type);
6994
34f80b04
EG
6995 bp->port.supported |= (SUPPORTED_10baseT_Half |
6996 SUPPORTED_10baseT_Full |
6997 SUPPORTED_100baseT_Half |
6998 SUPPORTED_100baseT_Full |
6999 SUPPORTED_1000baseT_Full |
7000 SUPPORTED_2500baseX_Full |
7001 SUPPORTED_TP |
7002 SUPPORTED_FIBRE |
7003 SUPPORTED_Autoneg |
7004 SUPPORTED_Pause |
7005 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7006 break;
7007
7008 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7009 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7010 ext_phy_type);
7011
34f80b04
EG
7012 bp->port.supported |= (SUPPORTED_10baseT_Half |
7013 SUPPORTED_10baseT_Full |
7014 SUPPORTED_100baseT_Half |
7015 SUPPORTED_100baseT_Full |
7016 SUPPORTED_1000baseT_Full |
7017 SUPPORTED_TP |
7018 SUPPORTED_FIBRE |
7019 SUPPORTED_Autoneg |
7020 SUPPORTED_Pause |
7021 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7022 break;
7023
7024 default:
7025 BNX2X_ERR("NVRAM config error. "
7026 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7027 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7028 return;
7029 }
7030
34f80b04
EG
7031 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7032 port*0x10);
7033 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7034 break;
7035
7036 case SWITCH_CFG_10G:
7037 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7038
c18487ee
YR
7039 ext_phy_type =
7040 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7041 switch (ext_phy_type) {
7042 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7043 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7044 ext_phy_type);
7045
34f80b04
EG
7046 bp->port.supported |= (SUPPORTED_10baseT_Half |
7047 SUPPORTED_10baseT_Full |
7048 SUPPORTED_100baseT_Half |
7049 SUPPORTED_100baseT_Full |
7050 SUPPORTED_1000baseT_Full |
7051 SUPPORTED_2500baseX_Full |
7052 SUPPORTED_10000baseT_Full |
7053 SUPPORTED_TP |
7054 SUPPORTED_FIBRE |
7055 SUPPORTED_Autoneg |
7056 SUPPORTED_Pause |
7057 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7058 break;
7059
7060 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7061 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7062 ext_phy_type);
f1410647 7063
34f80b04
EG
7064 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7065 SUPPORTED_FIBRE |
7066 SUPPORTED_Pause |
7067 SUPPORTED_Asym_Pause);
f1410647
ET
7068 break;
7069
a2fbb9ea 7070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7071 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7072 ext_phy_type);
7073
34f80b04
EG
7074 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7075 SUPPORTED_1000baseT_Full |
7076 SUPPORTED_FIBRE |
7077 SUPPORTED_Pause |
7078 SUPPORTED_Asym_Pause);
f1410647
ET
7079 break;
7080
7081 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7082 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7083 ext_phy_type);
7084
34f80b04
EG
7085 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7086 SUPPORTED_1000baseT_Full |
7087 SUPPORTED_FIBRE |
7088 SUPPORTED_Autoneg |
7089 SUPPORTED_Pause |
7090 SUPPORTED_Asym_Pause);
f1410647
ET
7091 break;
7092
c18487ee
YR
7093 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7094 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7095 ext_phy_type);
7096
34f80b04
EG
7097 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7098 SUPPORTED_2500baseX_Full |
7099 SUPPORTED_1000baseT_Full |
7100 SUPPORTED_FIBRE |
7101 SUPPORTED_Autoneg |
7102 SUPPORTED_Pause |
7103 SUPPORTED_Asym_Pause);
c18487ee
YR
7104 break;
7105
f1410647
ET
7106 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7107 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7108 ext_phy_type);
7109
34f80b04
EG
7110 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7111 SUPPORTED_TP |
7112 SUPPORTED_Autoneg |
7113 SUPPORTED_Pause |
7114 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7115 break;
7116
c18487ee
YR
7117 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7118 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7119 bp->link_params.ext_phy_config);
7120 break;
7121
a2fbb9ea
ET
7122 default:
7123 BNX2X_ERR("NVRAM config error. "
7124 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7125 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7126 return;
7127 }
7128
34f80b04
EG
7129 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7130 port*0x18);
7131 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7132
a2fbb9ea
ET
7133 break;
7134
7135 default:
7136 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7137 bp->port.link_config);
a2fbb9ea
ET
7138 return;
7139 }
34f80b04 7140 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7141
7142 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7143 if (!(bp->link_params.speed_cap_mask &
7144 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7145 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7146
c18487ee
YR
7147 if (!(bp->link_params.speed_cap_mask &
7148 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7149 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7150
c18487ee
YR
7151 if (!(bp->link_params.speed_cap_mask &
7152 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7153 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7154
c18487ee
YR
7155 if (!(bp->link_params.speed_cap_mask &
7156 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7157 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7158
c18487ee
YR
7159 if (!(bp->link_params.speed_cap_mask &
7160 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7161 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7162 SUPPORTED_1000baseT_Full);
a2fbb9ea 7163
c18487ee
YR
7164 if (!(bp->link_params.speed_cap_mask &
7165 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7166 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7167
c18487ee
YR
7168 if (!(bp->link_params.speed_cap_mask &
7169 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7170 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7171
34f80b04 7172 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7173}
7174
34f80b04 7175static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7176{
c18487ee 7177 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7178
34f80b04 7179 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7180 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7181 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7182 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7183 bp->port.advertising = bp->port.supported;
a2fbb9ea 7184 } else {
c18487ee
YR
7185 u32 ext_phy_type =
7186 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7187
7188 if ((ext_phy_type ==
7189 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7190 (ext_phy_type ==
7191 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7192 /* force 10G, no AN */
c18487ee 7193 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7194 bp->port.advertising =
a2fbb9ea
ET
7195 (ADVERTISED_10000baseT_Full |
7196 ADVERTISED_FIBRE);
7197 break;
7198 }
7199 BNX2X_ERR("NVRAM config error. "
7200 "Invalid link_config 0x%x"
7201 " Autoneg not supported\n",
34f80b04 7202 bp->port.link_config);
a2fbb9ea
ET
7203 return;
7204 }
7205 break;
7206
7207 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7208 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7209 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7210 bp->port.advertising = (ADVERTISED_10baseT_Full |
7211 ADVERTISED_TP);
a2fbb9ea
ET
7212 } else {
7213 BNX2X_ERR("NVRAM config error. "
7214 "Invalid link_config 0x%x"
7215 " speed_cap_mask 0x%x\n",
34f80b04 7216 bp->port.link_config,
c18487ee 7217 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7218 return;
7219 }
7220 break;
7221
7222 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7223 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7224 bp->link_params.req_line_speed = SPEED_10;
7225 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7226 bp->port.advertising = (ADVERTISED_10baseT_Half |
7227 ADVERTISED_TP);
a2fbb9ea
ET
7228 } else {
7229 BNX2X_ERR("NVRAM config error. "
7230 "Invalid link_config 0x%x"
7231 " speed_cap_mask 0x%x\n",
34f80b04 7232 bp->port.link_config,
c18487ee 7233 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7234 return;
7235 }
7236 break;
7237
7238 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7239 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7240 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7241 bp->port.advertising = (ADVERTISED_100baseT_Full |
7242 ADVERTISED_TP);
a2fbb9ea
ET
7243 } else {
7244 BNX2X_ERR("NVRAM config error. "
7245 "Invalid link_config 0x%x"
7246 " speed_cap_mask 0x%x\n",
34f80b04 7247 bp->port.link_config,
c18487ee 7248 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7249 return;
7250 }
7251 break;
7252
7253 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7254 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7255 bp->link_params.req_line_speed = SPEED_100;
7256 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7257 bp->port.advertising = (ADVERTISED_100baseT_Half |
7258 ADVERTISED_TP);
a2fbb9ea
ET
7259 } else {
7260 BNX2X_ERR("NVRAM config error. "
7261 "Invalid link_config 0x%x"
7262 " speed_cap_mask 0x%x\n",
34f80b04 7263 bp->port.link_config,
c18487ee 7264 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7265 return;
7266 }
7267 break;
7268
7269 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7270 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7271 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7272 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7273 ADVERTISED_TP);
a2fbb9ea
ET
7274 } else {
7275 BNX2X_ERR("NVRAM config error. "
7276 "Invalid link_config 0x%x"
7277 " speed_cap_mask 0x%x\n",
34f80b04 7278 bp->port.link_config,
c18487ee 7279 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7280 return;
7281 }
7282 break;
7283
7284 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7285 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7286 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7287 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7288 ADVERTISED_TP);
a2fbb9ea
ET
7289 } else {
7290 BNX2X_ERR("NVRAM config error. "
7291 "Invalid link_config 0x%x"
7292 " speed_cap_mask 0x%x\n",
34f80b04 7293 bp->port.link_config,
c18487ee 7294 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7295 return;
7296 }
7297 break;
7298
7299 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7300 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7301 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7302 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7303 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7304 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7305 ADVERTISED_FIBRE);
a2fbb9ea
ET
7306 } else {
7307 BNX2X_ERR("NVRAM config error. "
7308 "Invalid link_config 0x%x"
7309 " speed_cap_mask 0x%x\n",
34f80b04 7310 bp->port.link_config,
c18487ee 7311 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7312 return;
7313 }
7314 break;
7315
7316 default:
7317 BNX2X_ERR("NVRAM config error. "
7318 "BAD link speed link_config 0x%x\n",
34f80b04 7319 bp->port.link_config);
c18487ee 7320 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7321 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7322 break;
7323 }
a2fbb9ea 7324
34f80b04
EG
7325 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7326 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7327 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7328 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7329 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7330
c18487ee 7331 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7332 " advertising 0x%x\n",
c18487ee
YR
7333 bp->link_params.req_line_speed,
7334 bp->link_params.req_duplex,
34f80b04 7335 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7336}
7337
34f80b04 7338static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7339{
34f80b04
EG
7340 int port = BP_PORT(bp);
7341 u32 val, val2;
a2fbb9ea 7342
c18487ee 7343 bp->link_params.bp = bp;
34f80b04 7344 bp->link_params.port = port;
c18487ee 7345
c18487ee 7346 bp->link_params.serdes_config =
f1410647 7347 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7348 bp->link_params.lane_config =
a2fbb9ea 7349 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7350 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7351 SHMEM_RD(bp,
7352 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7353 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7354 SHMEM_RD(bp,
7355 dev_info.port_hw_config[port].speed_capability_mask);
7356
34f80b04 7357 bp->port.link_config =
a2fbb9ea
ET
7358 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7359
34f80b04
EG
7360 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7361 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7362 " link_config 0x%08x\n",
c18487ee
YR
7363 bp->link_params.serdes_config,
7364 bp->link_params.lane_config,
7365 bp->link_params.ext_phy_config,
34f80b04 7366 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7367
34f80b04 7368 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7369 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7370 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7371
7372 bnx2x_link_settings_requested(bp);
7373
7374 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7375 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7376 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7377 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7378 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7379 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7380 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7381 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7382 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7383 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7384}
7385
7386static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7387{
7388 int func = BP_FUNC(bp);
7389 u32 val, val2;
7390 int rc = 0;
a2fbb9ea 7391
34f80b04 7392 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7393
34f80b04
EG
7394 bp->e1hov = 0;
7395 bp->e1hmf = 0;
7396 if (CHIP_IS_E1H(bp)) {
7397 bp->mf_config =
7398 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7399
34f80b04
EG
7400 val =
7401 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7402 FUNC_MF_CFG_E1HOV_TAG_MASK);
7403 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7404
34f80b04
EG
7405 bp->e1hov = val;
7406 bp->e1hmf = 1;
7407 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7408 "(0x%04x)\n",
7409 func, bp->e1hov, bp->e1hov);
7410 } else {
7411 BNX2X_DEV_INFO("Single function mode\n");
7412 if (BP_E1HVN(bp)) {
7413 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7414 " aborting\n", func);
7415 rc = -EPERM;
7416 }
7417 }
7418 }
a2fbb9ea 7419
34f80b04
EG
7420 if (!BP_NOMCP(bp)) {
7421 bnx2x_get_port_hwinfo(bp);
7422
7423 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7424 DRV_MSG_SEQ_NUMBER_MASK);
7425 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7426 }
7427
7428 if (IS_E1HMF(bp)) {
7429 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7430 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7431 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7432 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7433 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7434 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7435 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7436 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7437 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7438 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7439 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7440 ETH_ALEN);
7441 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7442 ETH_ALEN);
a2fbb9ea 7443 }
34f80b04
EG
7444
7445 return rc;
a2fbb9ea
ET
7446 }
7447
34f80b04
EG
7448 if (BP_NOMCP(bp)) {
7449 /* only supposed to happen on emulation/FPGA */
7450 BNX2X_ERR("warning rendom MAC workaround active\n");
7451 random_ether_addr(bp->dev->dev_addr);
7452 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7453 }
a2fbb9ea 7454
34f80b04
EG
7455 return rc;
7456}
7457
7458static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7459{
7460 int func = BP_FUNC(bp);
7461 int rc;
7462
da5a662a
VZ
7463 /* Disable interrupt handling until HW is initialized */
7464 atomic_set(&bp->intr_sem, 1);
7465
34f80b04 7466 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7467
34f80b04
EG
7468 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7469 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7470
7471 rc = bnx2x_get_hwinfo(bp);
7472
7473 /* need to reset chip if undi was active */
7474 if (!BP_NOMCP(bp))
7475 bnx2x_undi_unload(bp);
7476
7477 if (CHIP_REV_IS_FPGA(bp))
7478 printk(KERN_ERR PFX "FPGA detected\n");
7479
7480 if (BP_NOMCP(bp) && (func == 0))
7481 printk(KERN_ERR PFX
7482 "MCP disabled, must load devices in order!\n");
7483
7a9b2557
VZ
7484 /* Set TPA flags */
7485 if (disable_tpa) {
7486 bp->flags &= ~TPA_ENABLE_FLAG;
7487 bp->dev->features &= ~NETIF_F_LRO;
7488 } else {
7489 bp->flags |= TPA_ENABLE_FLAG;
7490 bp->dev->features |= NETIF_F_LRO;
7491 }
7492
7493
34f80b04
EG
7494 bp->tx_ring_size = MAX_TX_AVAIL;
7495 bp->rx_ring_size = MAX_RX_AVAIL;
7496
7497 bp->rx_csum = 1;
7498 bp->rx_offset = 0;
7499
7500 bp->tx_ticks = 50;
7501 bp->rx_ticks = 25;
7502
34f80b04
EG
7503 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7504 bp->current_interval = (poll ? poll : bp->timer_interval);
7505
7506 init_timer(&bp->timer);
7507 bp->timer.expires = jiffies + bp->current_interval;
7508 bp->timer.data = (unsigned long) bp;
7509 bp->timer.function = bnx2x_timer;
7510
7511 return rc;
a2fbb9ea
ET
7512}
7513
7514/*
7515 * ethtool service functions
7516 */
7517
7518/* All ethtool functions called with rtnl_lock */
7519
7520static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7521{
7522 struct bnx2x *bp = netdev_priv(dev);
7523
34f80b04
EG
7524 cmd->supported = bp->port.supported;
7525 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7526
7527 if (netif_carrier_ok(dev)) {
c18487ee
YR
7528 cmd->speed = bp->link_vars.line_speed;
7529 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7530 } else {
c18487ee
YR
7531 cmd->speed = bp->link_params.req_line_speed;
7532 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7533 }
34f80b04
EG
7534 if (IS_E1HMF(bp)) {
7535 u16 vn_max_rate;
7536
7537 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7538 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7539 if (vn_max_rate < cmd->speed)
7540 cmd->speed = vn_max_rate;
7541 }
a2fbb9ea 7542
c18487ee
YR
7543 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7544 u32 ext_phy_type =
7545 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7546
7547 switch (ext_phy_type) {
7548 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7549 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7550 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7551 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7552 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7553 cmd->port = PORT_FIBRE;
7554 break;
7555
7556 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7557 cmd->port = PORT_TP;
7558 break;
7559
c18487ee
YR
7560 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7561 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7562 bp->link_params.ext_phy_config);
7563 break;
7564
f1410647
ET
7565 default:
7566 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7567 bp->link_params.ext_phy_config);
7568 break;
f1410647
ET
7569 }
7570 } else
a2fbb9ea 7571 cmd->port = PORT_TP;
a2fbb9ea 7572
34f80b04 7573 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7574 cmd->transceiver = XCVR_INTERNAL;
7575
c18487ee 7576 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7577 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7578 else
a2fbb9ea 7579 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7580
7581 cmd->maxtxpkt = 0;
7582 cmd->maxrxpkt = 0;
7583
7584 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7585 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7586 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7587 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7588 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7589 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7590 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7591
7592 return 0;
7593}
7594
7595static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7596{
7597 struct bnx2x *bp = netdev_priv(dev);
7598 u32 advertising;
7599
34f80b04
EG
7600 if (IS_E1HMF(bp))
7601 return 0;
7602
a2fbb9ea
ET
7603 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7604 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7605 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7606 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7607 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7608 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7609 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7610
a2fbb9ea 7611 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7612 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7613 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7614 return -EINVAL;
f1410647 7615 }
a2fbb9ea
ET
7616
7617 /* advertise the requested speed and duplex if supported */
34f80b04 7618 cmd->advertising &= bp->port.supported;
a2fbb9ea 7619
c18487ee
YR
7620 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7621 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7622 bp->port.advertising |= (ADVERTISED_Autoneg |
7623 cmd->advertising);
a2fbb9ea
ET
7624
7625 } else { /* forced speed */
7626 /* advertise the requested speed and duplex if supported */
7627 switch (cmd->speed) {
7628 case SPEED_10:
7629 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7630 if (!(bp->port.supported &
f1410647
ET
7631 SUPPORTED_10baseT_Full)) {
7632 DP(NETIF_MSG_LINK,
7633 "10M full not supported\n");
a2fbb9ea 7634 return -EINVAL;
f1410647 7635 }
a2fbb9ea
ET
7636
7637 advertising = (ADVERTISED_10baseT_Full |
7638 ADVERTISED_TP);
7639 } else {
34f80b04 7640 if (!(bp->port.supported &
f1410647
ET
7641 SUPPORTED_10baseT_Half)) {
7642 DP(NETIF_MSG_LINK,
7643 "10M half not supported\n");
a2fbb9ea 7644 return -EINVAL;
f1410647 7645 }
a2fbb9ea
ET
7646
7647 advertising = (ADVERTISED_10baseT_Half |
7648 ADVERTISED_TP);
7649 }
7650 break;
7651
7652 case SPEED_100:
7653 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7654 if (!(bp->port.supported &
f1410647
ET
7655 SUPPORTED_100baseT_Full)) {
7656 DP(NETIF_MSG_LINK,
7657 "100M full not supported\n");
a2fbb9ea 7658 return -EINVAL;
f1410647 7659 }
a2fbb9ea
ET
7660
7661 advertising = (ADVERTISED_100baseT_Full |
7662 ADVERTISED_TP);
7663 } else {
34f80b04 7664 if (!(bp->port.supported &
f1410647
ET
7665 SUPPORTED_100baseT_Half)) {
7666 DP(NETIF_MSG_LINK,
7667 "100M half not supported\n");
a2fbb9ea 7668 return -EINVAL;
f1410647 7669 }
a2fbb9ea
ET
7670
7671 advertising = (ADVERTISED_100baseT_Half |
7672 ADVERTISED_TP);
7673 }
7674 break;
7675
7676 case SPEED_1000:
f1410647
ET
7677 if (cmd->duplex != DUPLEX_FULL) {
7678 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7679 return -EINVAL;
f1410647 7680 }
a2fbb9ea 7681
34f80b04 7682 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7683 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7684 return -EINVAL;
f1410647 7685 }
a2fbb9ea
ET
7686
7687 advertising = (ADVERTISED_1000baseT_Full |
7688 ADVERTISED_TP);
7689 break;
7690
7691 case SPEED_2500:
f1410647
ET
7692 if (cmd->duplex != DUPLEX_FULL) {
7693 DP(NETIF_MSG_LINK,
7694 "2.5G half not supported\n");
a2fbb9ea 7695 return -EINVAL;
f1410647 7696 }
a2fbb9ea 7697
34f80b04 7698 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7699 DP(NETIF_MSG_LINK,
7700 "2.5G full not supported\n");
a2fbb9ea 7701 return -EINVAL;
f1410647 7702 }
a2fbb9ea 7703
f1410647 7704 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7705 ADVERTISED_TP);
7706 break;
7707
7708 case SPEED_10000:
f1410647
ET
7709 if (cmd->duplex != DUPLEX_FULL) {
7710 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7711 return -EINVAL;
f1410647 7712 }
a2fbb9ea 7713
34f80b04 7714 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7715 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7716 return -EINVAL;
f1410647 7717 }
a2fbb9ea
ET
7718
7719 advertising = (ADVERTISED_10000baseT_Full |
7720 ADVERTISED_FIBRE);
7721 break;
7722
7723 default:
f1410647 7724 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7725 return -EINVAL;
7726 }
7727
c18487ee
YR
7728 bp->link_params.req_line_speed = cmd->speed;
7729 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7730 bp->port.advertising = advertising;
a2fbb9ea
ET
7731 }
7732
c18487ee 7733 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7734 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7735 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7736 bp->port.advertising);
a2fbb9ea 7737
34f80b04 7738 if (netif_running(dev)) {
bb2a0f7a 7739 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7740 bnx2x_link_set(bp);
7741 }
a2fbb9ea
ET
7742
7743 return 0;
7744}
7745
c18487ee
YR
7746#define PHY_FW_VER_LEN 10
7747
a2fbb9ea
ET
7748static void bnx2x_get_drvinfo(struct net_device *dev,
7749 struct ethtool_drvinfo *info)
7750{
7751 struct bnx2x *bp = netdev_priv(dev);
c18487ee 7752 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7753
7754 strcpy(info->driver, DRV_MODULE_NAME);
7755 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7756
7757 phy_fw_ver[0] = '\0';
34f80b04 7758 if (bp->port.pmf) {
4a37fb66 7759 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7760 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7761 (bp->state != BNX2X_STATE_CLOSED),
7762 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7763 bnx2x_release_phy_lock(bp);
34f80b04 7764 }
c18487ee
YR
7765
7766 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 7767 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 7768 BCM_5710_FW_REVISION_VERSION,
34f80b04 7769 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 7770 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
7771 strcpy(info->bus_info, pci_name(bp->pdev));
7772 info->n_stats = BNX2X_NUM_STATS;
7773 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7774 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7775 info->regdump_len = 0;
7776}
7777
7778static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7779{
7780 struct bnx2x *bp = netdev_priv(dev);
7781
7782 if (bp->flags & NO_WOL_FLAG) {
7783 wol->supported = 0;
7784 wol->wolopts = 0;
7785 } else {
7786 wol->supported = WAKE_MAGIC;
7787 if (bp->wol)
7788 wol->wolopts = WAKE_MAGIC;
7789 else
7790 wol->wolopts = 0;
7791 }
7792 memset(&wol->sopass, 0, sizeof(wol->sopass));
7793}
7794
7795static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7796{
7797 struct bnx2x *bp = netdev_priv(dev);
7798
7799 if (wol->wolopts & ~WAKE_MAGIC)
7800 return -EINVAL;
7801
7802 if (wol->wolopts & WAKE_MAGIC) {
7803 if (bp->flags & NO_WOL_FLAG)
7804 return -EINVAL;
7805
7806 bp->wol = 1;
34f80b04 7807 } else
a2fbb9ea 7808 bp->wol = 0;
34f80b04 7809
a2fbb9ea
ET
7810 return 0;
7811}
7812
7813static u32 bnx2x_get_msglevel(struct net_device *dev)
7814{
7815 struct bnx2x *bp = netdev_priv(dev);
7816
7817 return bp->msglevel;
7818}
7819
7820static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7821{
7822 struct bnx2x *bp = netdev_priv(dev);
7823
7824 if (capable(CAP_NET_ADMIN))
7825 bp->msglevel = level;
7826}
7827
7828static int bnx2x_nway_reset(struct net_device *dev)
7829{
7830 struct bnx2x *bp = netdev_priv(dev);
7831
34f80b04
EG
7832 if (!bp->port.pmf)
7833 return 0;
a2fbb9ea 7834
34f80b04 7835 if (netif_running(dev)) {
bb2a0f7a 7836 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7837 bnx2x_link_set(bp);
7838 }
a2fbb9ea
ET
7839
7840 return 0;
7841}
7842
7843static int bnx2x_get_eeprom_len(struct net_device *dev)
7844{
7845 struct bnx2x *bp = netdev_priv(dev);
7846
34f80b04 7847 return bp->common.flash_size;
a2fbb9ea
ET
7848}
7849
7850static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7851{
34f80b04 7852 int port = BP_PORT(bp);
a2fbb9ea
ET
7853 int count, i;
7854 u32 val = 0;
7855
7856 /* adjust timeout for emulation/FPGA */
7857 count = NVRAM_TIMEOUT_COUNT;
7858 if (CHIP_REV_IS_SLOW(bp))
7859 count *= 100;
7860
7861 /* request access to nvram interface */
7862 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7863 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7864
7865 for (i = 0; i < count*10; i++) {
7866 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7867 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7868 break;
7869
7870 udelay(5);
7871 }
7872
7873 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7874 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7875 return -EBUSY;
7876 }
7877
7878 return 0;
7879}
7880
7881static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7882{
34f80b04 7883 int port = BP_PORT(bp);
a2fbb9ea
ET
7884 int count, i;
7885 u32 val = 0;
7886
7887 /* adjust timeout for emulation/FPGA */
7888 count = NVRAM_TIMEOUT_COUNT;
7889 if (CHIP_REV_IS_SLOW(bp))
7890 count *= 100;
7891
7892 /* relinquish nvram interface */
7893 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7894 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7895
7896 for (i = 0; i < count*10; i++) {
7897 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7898 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7899 break;
7900
7901 udelay(5);
7902 }
7903
7904 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7905 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7906 return -EBUSY;
7907 }
7908
7909 return 0;
7910}
7911
7912static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7913{
7914 u32 val;
7915
7916 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7917
7918 /* enable both bits, even on read */
7919 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7920 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7921 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7922}
7923
7924static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7925{
7926 u32 val;
7927
7928 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7929
7930 /* disable both bits, even after read */
7931 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7932 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7933 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7934}
7935
7936static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7937 u32 cmd_flags)
7938{
f1410647 7939 int count, i, rc;
a2fbb9ea
ET
7940 u32 val;
7941
7942 /* build the command word */
7943 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7944
7945 /* need to clear DONE bit separately */
7946 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7947
7948 /* address of the NVRAM to read from */
7949 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7950 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7951
7952 /* issue a read command */
7953 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7954
7955 /* adjust timeout for emulation/FPGA */
7956 count = NVRAM_TIMEOUT_COUNT;
7957 if (CHIP_REV_IS_SLOW(bp))
7958 count *= 100;
7959
7960 /* wait for completion */
7961 *ret_val = 0;
7962 rc = -EBUSY;
7963 for (i = 0; i < count; i++) {
7964 udelay(5);
7965 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7966
7967 if (val & MCPR_NVM_COMMAND_DONE) {
7968 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7969 /* we read nvram data in cpu order
7970 * but ethtool sees it as an array of bytes
7971 * converting to big-endian will do the work */
7972 val = cpu_to_be32(val);
7973 *ret_val = val;
7974 rc = 0;
7975 break;
7976 }
7977 }
7978
7979 return rc;
7980}
7981
7982static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7983 int buf_size)
7984{
7985 int rc;
7986 u32 cmd_flags;
7987 u32 val;
7988
7989 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7990 DP(BNX2X_MSG_NVM,
c14423fe 7991 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7992 offset, buf_size);
7993 return -EINVAL;
7994 }
7995
34f80b04
EG
7996 if (offset + buf_size > bp->common.flash_size) {
7997 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7998 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7999 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8000 return -EINVAL;
8001 }
8002
8003 /* request access to nvram interface */
8004 rc = bnx2x_acquire_nvram_lock(bp);
8005 if (rc)
8006 return rc;
8007
8008 /* enable access to nvram interface */
8009 bnx2x_enable_nvram_access(bp);
8010
8011 /* read the first word(s) */
8012 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8013 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8014 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8015 memcpy(ret_buf, &val, 4);
8016
8017 /* advance to the next dword */
8018 offset += sizeof(u32);
8019 ret_buf += sizeof(u32);
8020 buf_size -= sizeof(u32);
8021 cmd_flags = 0;
8022 }
8023
8024 if (rc == 0) {
8025 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8026 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8027 memcpy(ret_buf, &val, 4);
8028 }
8029
8030 /* disable access to nvram interface */
8031 bnx2x_disable_nvram_access(bp);
8032 bnx2x_release_nvram_lock(bp);
8033
8034 return rc;
8035}
8036
8037static int bnx2x_get_eeprom(struct net_device *dev,
8038 struct ethtool_eeprom *eeprom, u8 *eebuf)
8039{
8040 struct bnx2x *bp = netdev_priv(dev);
8041 int rc;
8042
34f80b04 8043 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8044 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8045 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8046 eeprom->len, eeprom->len);
8047
8048 /* parameters already validated in ethtool_get_eeprom */
8049
8050 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8051
8052 return rc;
8053}
8054
8055static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8056 u32 cmd_flags)
8057{
f1410647 8058 int count, i, rc;
a2fbb9ea
ET
8059
8060 /* build the command word */
8061 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8062
8063 /* need to clear DONE bit separately */
8064 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8065
8066 /* write the data */
8067 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8068
8069 /* address of the NVRAM to write to */
8070 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8071 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8072
8073 /* issue the write command */
8074 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8075
8076 /* adjust timeout for emulation/FPGA */
8077 count = NVRAM_TIMEOUT_COUNT;
8078 if (CHIP_REV_IS_SLOW(bp))
8079 count *= 100;
8080
8081 /* wait for completion */
8082 rc = -EBUSY;
8083 for (i = 0; i < count; i++) {
8084 udelay(5);
8085 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8086 if (val & MCPR_NVM_COMMAND_DONE) {
8087 rc = 0;
8088 break;
8089 }
8090 }
8091
8092 return rc;
8093}
8094
f1410647 8095#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8096
8097static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8098 int buf_size)
8099{
8100 int rc;
8101 u32 cmd_flags;
8102 u32 align_offset;
8103 u32 val;
8104
34f80b04
EG
8105 if (offset + buf_size > bp->common.flash_size) {
8106 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8107 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8108 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8109 return -EINVAL;
8110 }
8111
8112 /* request access to nvram interface */
8113 rc = bnx2x_acquire_nvram_lock(bp);
8114 if (rc)
8115 return rc;
8116
8117 /* enable access to nvram interface */
8118 bnx2x_enable_nvram_access(bp);
8119
8120 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8121 align_offset = (offset & ~0x03);
8122 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8123
8124 if (rc == 0) {
8125 val &= ~(0xff << BYTE_OFFSET(offset));
8126 val |= (*data_buf << BYTE_OFFSET(offset));
8127
8128 /* nvram data is returned as an array of bytes
8129 * convert it back to cpu order */
8130 val = be32_to_cpu(val);
8131
a2fbb9ea
ET
8132 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8133 cmd_flags);
8134 }
8135
8136 /* disable access to nvram interface */
8137 bnx2x_disable_nvram_access(bp);
8138 bnx2x_release_nvram_lock(bp);
8139
8140 return rc;
8141}
8142
8143static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8144 int buf_size)
8145{
8146 int rc;
8147 u32 cmd_flags;
8148 u32 val;
8149 u32 written_so_far;
8150
34f80b04 8151 if (buf_size == 1) /* ethtool */
a2fbb9ea 8152 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8153
8154 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8155 DP(BNX2X_MSG_NVM,
c14423fe 8156 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8157 offset, buf_size);
8158 return -EINVAL;
8159 }
8160
34f80b04
EG
8161 if (offset + buf_size > bp->common.flash_size) {
8162 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8163 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8164 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8165 return -EINVAL;
8166 }
8167
8168 /* request access to nvram interface */
8169 rc = bnx2x_acquire_nvram_lock(bp);
8170 if (rc)
8171 return rc;
8172
8173 /* enable access to nvram interface */
8174 bnx2x_enable_nvram_access(bp);
8175
8176 written_so_far = 0;
8177 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8178 while ((written_so_far < buf_size) && (rc == 0)) {
8179 if (written_so_far == (buf_size - sizeof(u32)))
8180 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8181 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8182 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8183 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8184 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8185
8186 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8187
8188 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8189
8190 /* advance to the next dword */
8191 offset += sizeof(u32);
8192 data_buf += sizeof(u32);
8193 written_so_far += sizeof(u32);
8194 cmd_flags = 0;
8195 }
8196
8197 /* disable access to nvram interface */
8198 bnx2x_disable_nvram_access(bp);
8199 bnx2x_release_nvram_lock(bp);
8200
8201 return rc;
8202}
8203
8204static int bnx2x_set_eeprom(struct net_device *dev,
8205 struct ethtool_eeprom *eeprom, u8 *eebuf)
8206{
8207 struct bnx2x *bp = netdev_priv(dev);
8208 int rc;
8209
34f80b04 8210 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8211 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8212 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8213 eeprom->len, eeprom->len);
8214
8215 /* parameters already validated in ethtool_set_eeprom */
8216
c18487ee 8217 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8218 if (eeprom->magic == 0x00504859)
8219 if (bp->port.pmf) {
8220
4a37fb66 8221 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8222 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8223 bp->link_params.ext_phy_config,
8224 (bp->state != BNX2X_STATE_CLOSED),
8225 eebuf, eeprom->len);
bb2a0f7a
YG
8226 if ((bp->state == BNX2X_STATE_OPEN) ||
8227 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8228 rc |= bnx2x_link_reset(&bp->link_params,
8229 &bp->link_vars);
8230 rc |= bnx2x_phy_init(&bp->link_params,
8231 &bp->link_vars);
bb2a0f7a 8232 }
4a37fb66 8233 bnx2x_release_phy_lock(bp);
34f80b04
EG
8234
8235 } else /* Only the PMF can access the PHY */
8236 return -EINVAL;
8237 else
c18487ee 8238 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8239
8240 return rc;
8241}
8242
8243static int bnx2x_get_coalesce(struct net_device *dev,
8244 struct ethtool_coalesce *coal)
8245{
8246 struct bnx2x *bp = netdev_priv(dev);
8247
8248 memset(coal, 0, sizeof(struct ethtool_coalesce));
8249
8250 coal->rx_coalesce_usecs = bp->rx_ticks;
8251 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8252
8253 return 0;
8254}
8255
8256static int bnx2x_set_coalesce(struct net_device *dev,
8257 struct ethtool_coalesce *coal)
8258{
8259 struct bnx2x *bp = netdev_priv(dev);
8260
8261 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8262 if (bp->rx_ticks > 3000)
8263 bp->rx_ticks = 3000;
8264
8265 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8266 if (bp->tx_ticks > 0x3000)
8267 bp->tx_ticks = 0x3000;
8268
34f80b04 8269 if (netif_running(dev))
a2fbb9ea
ET
8270 bnx2x_update_coalesce(bp);
8271
8272 return 0;
8273}
8274
8275static void bnx2x_get_ringparam(struct net_device *dev,
8276 struct ethtool_ringparam *ering)
8277{
8278 struct bnx2x *bp = netdev_priv(dev);
8279
8280 ering->rx_max_pending = MAX_RX_AVAIL;
8281 ering->rx_mini_max_pending = 0;
8282 ering->rx_jumbo_max_pending = 0;
8283
8284 ering->rx_pending = bp->rx_ring_size;
8285 ering->rx_mini_pending = 0;
8286 ering->rx_jumbo_pending = 0;
8287
8288 ering->tx_max_pending = MAX_TX_AVAIL;
8289 ering->tx_pending = bp->tx_ring_size;
8290}
8291
8292static int bnx2x_set_ringparam(struct net_device *dev,
8293 struct ethtool_ringparam *ering)
8294{
8295 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8296 int rc = 0;
a2fbb9ea
ET
8297
8298 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8299 (ering->tx_pending > MAX_TX_AVAIL) ||
8300 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8301 return -EINVAL;
8302
8303 bp->rx_ring_size = ering->rx_pending;
8304 bp->tx_ring_size = ering->tx_pending;
8305
34f80b04
EG
8306 if (netif_running(dev)) {
8307 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8308 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8309 }
8310
34f80b04 8311 return rc;
a2fbb9ea
ET
8312}
8313
8314static void bnx2x_get_pauseparam(struct net_device *dev,
8315 struct ethtool_pauseparam *epause)
8316{
8317 struct bnx2x *bp = netdev_priv(dev);
8318
c18487ee
YR
8319 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8320 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8321
8322 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8323 FLOW_CTRL_RX);
8324 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8325 FLOW_CTRL_TX);
a2fbb9ea
ET
8326
8327 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8328 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8329 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8330}
8331
8332static int bnx2x_set_pauseparam(struct net_device *dev,
8333 struct ethtool_pauseparam *epause)
8334{
8335 struct bnx2x *bp = netdev_priv(dev);
8336
34f80b04
EG
8337 if (IS_E1HMF(bp))
8338 return 0;
8339
a2fbb9ea
ET
8340 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8341 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8342 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8343
c18487ee 8344 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8345
f1410647 8346 if (epause->rx_pause)
c18487ee
YR
8347 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8348
f1410647 8349 if (epause->tx_pause)
c18487ee
YR
8350 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8351
8352 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8353 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8354
c18487ee 8355 if (epause->autoneg) {
34f80b04 8356 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
8357 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8358 return -EINVAL;
8359 }
a2fbb9ea 8360
c18487ee
YR
8361 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8362 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8363 }
a2fbb9ea 8364
c18487ee
YR
8365 DP(NETIF_MSG_LINK,
8366 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8367
8368 if (netif_running(dev)) {
bb2a0f7a 8369 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8370 bnx2x_link_set(bp);
8371 }
a2fbb9ea
ET
8372
8373 return 0;
8374}
8375
df0f2343
VZ
8376static int bnx2x_set_flags(struct net_device *dev, u32 data)
8377{
8378 struct bnx2x *bp = netdev_priv(dev);
8379 int changed = 0;
8380 int rc = 0;
8381
8382 /* TPA requires Rx CSUM offloading */
8383 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8384 if (!(dev->features & NETIF_F_LRO)) {
8385 dev->features |= NETIF_F_LRO;
8386 bp->flags |= TPA_ENABLE_FLAG;
8387 changed = 1;
8388 }
8389
8390 } else if (dev->features & NETIF_F_LRO) {
8391 dev->features &= ~NETIF_F_LRO;
8392 bp->flags &= ~TPA_ENABLE_FLAG;
8393 changed = 1;
8394 }
8395
8396 if (changed && netif_running(dev)) {
8397 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8398 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8399 }
8400
8401 return rc;
8402}
8403
a2fbb9ea
ET
8404static u32 bnx2x_get_rx_csum(struct net_device *dev)
8405{
8406 struct bnx2x *bp = netdev_priv(dev);
8407
8408 return bp->rx_csum;
8409}
8410
8411static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8412{
8413 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8414 int rc = 0;
a2fbb9ea
ET
8415
8416 bp->rx_csum = data;
df0f2343
VZ
8417
8418 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8419 TPA'ed packets will be discarded due to wrong TCP CSUM */
8420 if (!data) {
8421 u32 flags = ethtool_op_get_flags(dev);
8422
8423 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8424 }
8425
8426 return rc;
a2fbb9ea
ET
8427}
8428
8429static int bnx2x_set_tso(struct net_device *dev, u32 data)
8430{
755735eb 8431 if (data) {
a2fbb9ea 8432 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8433 dev->features |= NETIF_F_TSO6;
8434 } else {
a2fbb9ea 8435 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8436 dev->features &= ~NETIF_F_TSO6;
8437 }
8438
a2fbb9ea
ET
8439 return 0;
8440}
8441
f3c87cdd 8442static const struct {
a2fbb9ea
ET
8443 char string[ETH_GSTRING_LEN];
8444} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8445 { "register_test (offline)" },
8446 { "memory_test (offline)" },
8447 { "loopback_test (offline)" },
8448 { "nvram_test (online)" },
8449 { "interrupt_test (online)" },
8450 { "link_test (online)" },
8451 { "idle check (online)" },
8452 { "MC errors (online)" }
a2fbb9ea
ET
8453};
8454
8455static int bnx2x_self_test_count(struct net_device *dev)
8456{
8457 return BNX2X_NUM_TESTS;
8458}
8459
f3c87cdd
YG
8460static int bnx2x_test_registers(struct bnx2x *bp)
8461{
8462 int idx, i, rc = -ENODEV;
8463 u32 wr_val = 0;
9dabc424 8464 int port = BP_PORT(bp);
f3c87cdd
YG
8465 static const struct {
8466 u32 offset0;
8467 u32 offset1;
8468 u32 mask;
8469 } reg_tbl[] = {
8470/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8471 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8472 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8473 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8474 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8475 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8476 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8477 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8478 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8479 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8480/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8481 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8482 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8483 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8484 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8485 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8486 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8487 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8488 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8489 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8490/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8491 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8492 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8493 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8494 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8495 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8496 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8497 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8498 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8499 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8500/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8501 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8502 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8503 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8504 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8505 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8506 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8507 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8508
8509 { 0xffffffff, 0, 0x00000000 }
8510 };
8511
8512 if (!netif_running(bp->dev))
8513 return rc;
8514
8515 /* Repeat the test twice:
8516 First by writing 0x00000000, second by writing 0xffffffff */
8517 for (idx = 0; idx < 2; idx++) {
8518
8519 switch (idx) {
8520 case 0:
8521 wr_val = 0;
8522 break;
8523 case 1:
8524 wr_val = 0xffffffff;
8525 break;
8526 }
8527
8528 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8529 u32 offset, mask, save_val, val;
f3c87cdd
YG
8530
8531 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8532 mask = reg_tbl[i].mask;
8533
8534 save_val = REG_RD(bp, offset);
8535
8536 REG_WR(bp, offset, wr_val);
8537 val = REG_RD(bp, offset);
8538
8539 /* Restore the original register's value */
8540 REG_WR(bp, offset, save_val);
8541
8542 /* verify that value is as expected value */
8543 if ((val & mask) != (wr_val & mask))
8544 goto test_reg_exit;
8545 }
8546 }
8547
8548 rc = 0;
8549
8550test_reg_exit:
8551 return rc;
8552}
8553
8554static int bnx2x_test_memory(struct bnx2x *bp)
8555{
8556 int i, j, rc = -ENODEV;
8557 u32 val;
8558 static const struct {
8559 u32 offset;
8560 int size;
8561 } mem_tbl[] = {
8562 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8563 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8564 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8565 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8566 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8567 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8568 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8569
8570 { 0xffffffff, 0 }
8571 };
8572 static const struct {
8573 char *name;
8574 u32 offset;
9dabc424
YG
8575 u32 e1_mask;
8576 u32 e1h_mask;
f3c87cdd 8577 } prty_tbl[] = {
9dabc424
YG
8578 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8579 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8580 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8581 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8582 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8583 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8584
8585 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8586 };
8587
8588 if (!netif_running(bp->dev))
8589 return rc;
8590
8591 /* Go through all the memories */
8592 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8593 for (j = 0; j < mem_tbl[i].size; j++)
8594 REG_RD(bp, mem_tbl[i].offset + j*4);
8595
8596 /* Check the parity status */
8597 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8598 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8599 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8600 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8601 DP(NETIF_MSG_HW,
8602 "%s is 0x%x\n", prty_tbl[i].name, val);
8603 goto test_mem_exit;
8604 }
8605 }
8606
8607 rc = 0;
8608
8609test_mem_exit:
8610 return rc;
8611}
8612
8613static void bnx2x_netif_start(struct bnx2x *bp)
8614{
8615 int i;
8616
8617 if (atomic_dec_and_test(&bp->intr_sem)) {
8618 if (netif_running(bp->dev)) {
8619 bnx2x_int_enable(bp);
8620 for_each_queue(bp, i)
8621 napi_enable(&bnx2x_fp(bp, i, napi));
8622 if (bp->state == BNX2X_STATE_OPEN)
8623 netif_wake_queue(bp->dev);
8624 }
8625 }
8626}
8627
8628static void bnx2x_netif_stop(struct bnx2x *bp)
8629{
8630 int i;
8631
8632 if (netif_running(bp->dev)) {
8633 netif_tx_disable(bp->dev);
8634 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8635 for_each_queue(bp, i)
8636 napi_disable(&bnx2x_fp(bp, i, napi));
8637 }
8638 bnx2x_int_disable_sync(bp);
8639}
8640
8641static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8642{
8643 int cnt = 1000;
8644
8645 if (link_up)
8646 while (bnx2x_link_test(bp) && cnt--)
8647 msleep(10);
8648}
8649
8650static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8651{
8652 unsigned int pkt_size, num_pkts, i;
8653 struct sk_buff *skb;
8654 unsigned char *packet;
8655 struct bnx2x_fastpath *fp = &bp->fp[0];
8656 u16 tx_start_idx, tx_idx;
8657 u16 rx_start_idx, rx_idx;
8658 u16 pkt_prod;
8659 struct sw_tx_bd *tx_buf;
8660 struct eth_tx_bd *tx_bd;
8661 dma_addr_t mapping;
8662 union eth_rx_cqe *cqe;
8663 u8 cqe_fp_flags;
8664 struct sw_rx_bd *rx_buf;
8665 u16 len;
8666 int rc = -ENODEV;
8667
8668 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8669 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8670 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8671 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8672 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8673
8674 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8675 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8676 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8677 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8678 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8679 /* wait until link state is restored */
8680 bnx2x_wait_for_link(bp, link_up);
8681
8682 } else
8683 return -EINVAL;
8684
8685 pkt_size = 1514;
8686 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8687 if (!skb) {
8688 rc = -ENOMEM;
8689 goto test_loopback_exit;
8690 }
8691 packet = skb_put(skb, pkt_size);
8692 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8693 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8694 for (i = ETH_HLEN; i < pkt_size; i++)
8695 packet[i] = (unsigned char) (i & 0xff);
8696
8697 num_pkts = 0;
8698 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8699 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8700
8701 pkt_prod = fp->tx_pkt_prod++;
8702 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8703 tx_buf->first_bd = fp->tx_bd_prod;
8704 tx_buf->skb = skb;
8705
8706 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8707 mapping = pci_map_single(bp->pdev, skb->data,
8708 skb_headlen(skb), PCI_DMA_TODEVICE);
8709 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8710 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8711 tx_bd->nbd = cpu_to_le16(1);
8712 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8713 tx_bd->vlan = cpu_to_le16(pkt_prod);
8714 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8715 ETH_TX_BD_FLAGS_END_BD);
8716 tx_bd->general_data = ((UNICAST_ADDRESS <<
8717 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8718
8719 fp->hw_tx_prods->bds_prod =
8720 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8721 mb(); /* FW restriction: must not reorder writing nbd and packets */
8722 fp->hw_tx_prods->packets_prod =
8723 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8724 DOORBELL(bp, FP_IDX(fp), 0);
8725
8726 mmiowb();
8727
8728 num_pkts++;
8729 fp->tx_bd_prod++;
8730 bp->dev->trans_start = jiffies;
8731
8732 udelay(100);
8733
8734 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8735 if (tx_idx != tx_start_idx + num_pkts)
8736 goto test_loopback_exit;
8737
8738 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8739 if (rx_idx != rx_start_idx + num_pkts)
8740 goto test_loopback_exit;
8741
8742 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8743 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8744 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8745 goto test_loopback_rx_exit;
8746
8747 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8748 if (len != pkt_size)
8749 goto test_loopback_rx_exit;
8750
8751 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8752 skb = rx_buf->skb;
8753 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8754 for (i = ETH_HLEN; i < pkt_size; i++)
8755 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8756 goto test_loopback_rx_exit;
8757
8758 rc = 0;
8759
8760test_loopback_rx_exit:
8761 bp->dev->last_rx = jiffies;
8762
8763 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8764 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8765 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8766 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8767
8768 /* Update producers */
8769 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8770 fp->rx_sge_prod);
8771 mmiowb(); /* keep prod updates ordered */
8772
8773test_loopback_exit:
8774 bp->link_params.loopback_mode = LOOPBACK_NONE;
8775
8776 return rc;
8777}
8778
8779static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8780{
8781 int rc = 0;
8782
8783 if (!netif_running(bp->dev))
8784 return BNX2X_LOOPBACK_FAILED;
8785
8786 bnx2x_netif_stop(bp);
8787
8788 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8789 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8790 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8791 }
8792
8793 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8794 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8795 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8796 }
8797
8798 bnx2x_netif_start(bp);
8799
8800 return rc;
8801}
8802
8803#define CRC32_RESIDUAL 0xdebb20e3
8804
8805static int bnx2x_test_nvram(struct bnx2x *bp)
8806{
8807 static const struct {
8808 int offset;
8809 int size;
8810 } nvram_tbl[] = {
8811 { 0, 0x14 }, /* bootstrap */
8812 { 0x14, 0xec }, /* dir */
8813 { 0x100, 0x350 }, /* manuf_info */
8814 { 0x450, 0xf0 }, /* feature_info */
8815 { 0x640, 0x64 }, /* upgrade_key_info */
8816 { 0x6a4, 0x64 },
8817 { 0x708, 0x70 }, /* manuf_key_info */
8818 { 0x778, 0x70 },
8819 { 0, 0 }
8820 };
8821 u32 buf[0x350 / 4];
8822 u8 *data = (u8 *)buf;
8823 int i, rc;
8824 u32 magic, csum;
8825
8826 rc = bnx2x_nvram_read(bp, 0, data, 4);
8827 if (rc) {
8828 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8829 goto test_nvram_exit;
8830 }
8831
8832 magic = be32_to_cpu(buf[0]);
8833 if (magic != 0x669955aa) {
8834 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8835 rc = -ENODEV;
8836 goto test_nvram_exit;
8837 }
8838
8839 for (i = 0; nvram_tbl[i].size; i++) {
8840
8841 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8842 nvram_tbl[i].size);
8843 if (rc) {
8844 DP(NETIF_MSG_PROBE,
8845 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8846 goto test_nvram_exit;
8847 }
8848
8849 csum = ether_crc_le(nvram_tbl[i].size, data);
8850 if (csum != CRC32_RESIDUAL) {
8851 DP(NETIF_MSG_PROBE,
8852 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8853 rc = -ENODEV;
8854 goto test_nvram_exit;
8855 }
8856 }
8857
8858test_nvram_exit:
8859 return rc;
8860}
8861
8862static int bnx2x_test_intr(struct bnx2x *bp)
8863{
8864 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8865 int i, rc;
8866
8867 if (!netif_running(bp->dev))
8868 return -ENODEV;
8869
8870 config->hdr.length_6b = 0;
8871 config->hdr.offset = 0;
8872 config->hdr.client_id = BP_CL_ID(bp);
8873 config->hdr.reserved1 = 0;
8874
8875 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8876 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8877 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8878 if (rc == 0) {
8879 bp->set_mac_pending++;
8880 for (i = 0; i < 10; i++) {
8881 if (!bp->set_mac_pending)
8882 break;
8883 msleep_interruptible(10);
8884 }
8885 if (i == 10)
8886 rc = -ENODEV;
8887 }
8888
8889 return rc;
8890}
8891
a2fbb9ea
ET
8892static void bnx2x_self_test(struct net_device *dev,
8893 struct ethtool_test *etest, u64 *buf)
8894{
8895 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8896
8897 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8898
f3c87cdd 8899 if (!netif_running(dev))
a2fbb9ea 8900 return;
a2fbb9ea 8901
f3c87cdd
YG
8902 /* offline tests are not suppoerted in MF mode */
8903 if (IS_E1HMF(bp))
8904 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8905
8906 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8907 u8 link_up;
8908
8909 link_up = bp->link_vars.link_up;
8910 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8911 bnx2x_nic_load(bp, LOAD_DIAG);
8912 /* wait until link state is restored */
8913 bnx2x_wait_for_link(bp, link_up);
8914
8915 if (bnx2x_test_registers(bp) != 0) {
8916 buf[0] = 1;
8917 etest->flags |= ETH_TEST_FL_FAILED;
8918 }
8919 if (bnx2x_test_memory(bp) != 0) {
8920 buf[1] = 1;
8921 etest->flags |= ETH_TEST_FL_FAILED;
8922 }
8923 buf[2] = bnx2x_test_loopback(bp, link_up);
8924 if (buf[2] != 0)
8925 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8926
f3c87cdd
YG
8927 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8928 bnx2x_nic_load(bp, LOAD_NORMAL);
8929 /* wait until link state is restored */
8930 bnx2x_wait_for_link(bp, link_up);
8931 }
8932 if (bnx2x_test_nvram(bp) != 0) {
8933 buf[3] = 1;
a2fbb9ea
ET
8934 etest->flags |= ETH_TEST_FL_FAILED;
8935 }
f3c87cdd
YG
8936 if (bnx2x_test_intr(bp) != 0) {
8937 buf[4] = 1;
8938 etest->flags |= ETH_TEST_FL_FAILED;
8939 }
8940 if (bp->port.pmf)
8941 if (bnx2x_link_test(bp) != 0) {
8942 buf[5] = 1;
8943 etest->flags |= ETH_TEST_FL_FAILED;
8944 }
8945 buf[7] = bnx2x_mc_assert(bp);
8946 if (buf[7] != 0)
8947 etest->flags |= ETH_TEST_FL_FAILED;
8948
8949#ifdef BNX2X_EXTRA_DEBUG
8950 bnx2x_panic_dump(bp);
8951#endif
a2fbb9ea
ET
8952}
8953
bb2a0f7a
YG
8954static const struct {
8955 long offset;
8956 int size;
8957 u32 flags;
66e855f3
YG
8958#define STATS_FLAGS_PORT 1
8959#define STATS_FLAGS_FUNC 2
8960 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8961} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8962/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8963 8, STATS_FLAGS_FUNC, "rx_bytes" },
8964 { STATS_OFFSET32(error_bytes_received_hi),
8965 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8966 { STATS_OFFSET32(total_bytes_transmitted_hi),
8967 8, STATS_FLAGS_FUNC, "tx_bytes" },
8968 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8969 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8970 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8971 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8972 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8973 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8974 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8975 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8976 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8977 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8978 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8979 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8980/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8981 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8982 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8983 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8984 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8985 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8986 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8987 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8988 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 8989 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 8990 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 8991 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 8992 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 8993 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 8994 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 8995 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 8996 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 8997 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 8998 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
8999 8, STATS_FLAGS_PORT, "rx_fragments" },
9000/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9001 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9002 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9003 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9004 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9005 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9006 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9007 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9008 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9009 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9010 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9011 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9012 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9013 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9014 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9015 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9016 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9017 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9018 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9019 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9020/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9021 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9022 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9023 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9024 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9025 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9026 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9027 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9028 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9029 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9030 { STATS_OFFSET32(mac_filter_discard),
9031 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9032 { STATS_OFFSET32(no_buff_discard),
9033 4, STATS_FLAGS_FUNC, "rx_discards" },
9034 { STATS_OFFSET32(xxoverflow_discard),
9035 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9036 { STATS_OFFSET32(brb_drop_hi),
9037 8, STATS_FLAGS_PORT, "brb_discard" },
9038 { STATS_OFFSET32(brb_truncate_hi),
9039 8, STATS_FLAGS_PORT, "brb_truncate" },
9040/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9041 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9042 { STATS_OFFSET32(rx_skb_alloc_failed),
9043 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9044/* 42 */{ STATS_OFFSET32(hw_csum_err),
9045 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9046};
9047
66e855f3
YG
9048#define IS_NOT_E1HMF_STAT(bp, i) \
9049 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9050
a2fbb9ea
ET
9051static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9052{
bb2a0f7a
YG
9053 struct bnx2x *bp = netdev_priv(dev);
9054 int i, j;
9055
a2fbb9ea
ET
9056 switch (stringset) {
9057 case ETH_SS_STATS:
bb2a0f7a 9058 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9059 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9060 continue;
9061 strcpy(buf + j*ETH_GSTRING_LEN,
9062 bnx2x_stats_arr[i].string);
9063 j++;
9064 }
a2fbb9ea
ET
9065 break;
9066
9067 case ETH_SS_TEST:
9068 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9069 break;
9070 }
9071}
9072
9073static int bnx2x_get_stats_count(struct net_device *dev)
9074{
bb2a0f7a
YG
9075 struct bnx2x *bp = netdev_priv(dev);
9076 int i, num_stats = 0;
9077
9078 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9079 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9080 continue;
9081 num_stats++;
9082 }
9083 return num_stats;
a2fbb9ea
ET
9084}
9085
9086static void bnx2x_get_ethtool_stats(struct net_device *dev,
9087 struct ethtool_stats *stats, u64 *buf)
9088{
9089 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9090 u32 *hw_stats = (u32 *)&bp->eth_stats;
9091 int i, j;
a2fbb9ea 9092
bb2a0f7a 9093 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9094 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9095 continue;
bb2a0f7a
YG
9096
9097 if (bnx2x_stats_arr[i].size == 0) {
9098 /* skip this counter */
9099 buf[j] = 0;
9100 j++;
a2fbb9ea
ET
9101 continue;
9102 }
bb2a0f7a 9103 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9104 /* 4-byte counter */
bb2a0f7a
YG
9105 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9106 j++;
a2fbb9ea
ET
9107 continue;
9108 }
9109 /* 8-byte counter */
bb2a0f7a
YG
9110 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9111 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9112 j++;
a2fbb9ea
ET
9113 }
9114}
9115
9116static int bnx2x_phys_id(struct net_device *dev, u32 data)
9117{
9118 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9119 int port = BP_PORT(bp);
a2fbb9ea
ET
9120 int i;
9121
34f80b04
EG
9122 if (!netif_running(dev))
9123 return 0;
9124
9125 if (!bp->port.pmf)
9126 return 0;
9127
a2fbb9ea
ET
9128 if (data == 0)
9129 data = 2;
9130
9131 for (i = 0; i < (data * 2); i++) {
c18487ee 9132 if ((i % 2) == 0)
34f80b04 9133 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9134 bp->link_params.hw_led_mode,
9135 bp->link_params.chip_id);
9136 else
34f80b04 9137 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9138 bp->link_params.hw_led_mode,
9139 bp->link_params.chip_id);
9140
a2fbb9ea
ET
9141 msleep_interruptible(500);
9142 if (signal_pending(current))
9143 break;
9144 }
9145
c18487ee 9146 if (bp->link_vars.link_up)
34f80b04 9147 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9148 bp->link_vars.line_speed,
9149 bp->link_params.hw_led_mode,
9150 bp->link_params.chip_id);
a2fbb9ea
ET
9151
9152 return 0;
9153}
9154
9155static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9156 .get_settings = bnx2x_get_settings,
9157 .set_settings = bnx2x_set_settings,
9158 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9159 .get_wol = bnx2x_get_wol,
9160 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9161 .get_msglevel = bnx2x_get_msglevel,
9162 .set_msglevel = bnx2x_set_msglevel,
9163 .nway_reset = bnx2x_nway_reset,
9164 .get_link = ethtool_op_get_link,
9165 .get_eeprom_len = bnx2x_get_eeprom_len,
9166 .get_eeprom = bnx2x_get_eeprom,
9167 .set_eeprom = bnx2x_set_eeprom,
9168 .get_coalesce = bnx2x_get_coalesce,
9169 .set_coalesce = bnx2x_set_coalesce,
9170 .get_ringparam = bnx2x_get_ringparam,
9171 .set_ringparam = bnx2x_set_ringparam,
9172 .get_pauseparam = bnx2x_get_pauseparam,
9173 .set_pauseparam = bnx2x_set_pauseparam,
9174 .get_rx_csum = bnx2x_get_rx_csum,
9175 .set_rx_csum = bnx2x_set_rx_csum,
9176 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9177 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9178 .set_flags = bnx2x_set_flags,
9179 .get_flags = ethtool_op_get_flags,
9180 .get_sg = ethtool_op_get_sg,
9181 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9182 .get_tso = ethtool_op_get_tso,
9183 .set_tso = bnx2x_set_tso,
9184 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9185 .self_test = bnx2x_self_test,
9186 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9187 .phys_id = bnx2x_phys_id,
9188 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9189 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9190};
9191
9192/* end of ethtool_ops */
9193
9194/****************************************************************************
9195* General service functions
9196****************************************************************************/
9197
9198static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9199{
9200 u16 pmcsr;
9201
9202 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9203
9204 switch (state) {
9205 case PCI_D0:
34f80b04 9206 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9207 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9208 PCI_PM_CTRL_PME_STATUS));
9209
9210 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9211 /* delay required during transition out of D3hot */
9212 msleep(20);
34f80b04 9213 break;
a2fbb9ea 9214
34f80b04
EG
9215 case PCI_D3hot:
9216 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9217 pmcsr |= 3;
a2fbb9ea 9218
34f80b04
EG
9219 if (bp->wol)
9220 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9221
34f80b04
EG
9222 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9223 pmcsr);
a2fbb9ea 9224
34f80b04
EG
9225 /* No more memory access after this point until
9226 * device is brought back to D0.
9227 */
9228 break;
9229
9230 default:
9231 return -EINVAL;
9232 }
9233 return 0;
a2fbb9ea
ET
9234}
9235
34f80b04
EG
9236/*
9237 * net_device service functions
9238 */
9239
a2fbb9ea
ET
9240static int bnx2x_poll(struct napi_struct *napi, int budget)
9241{
9242 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9243 napi);
9244 struct bnx2x *bp = fp->bp;
9245 int work_done = 0;
9246
9247#ifdef BNX2X_STOP_ON_ERROR
9248 if (unlikely(bp->panic))
34f80b04 9249 goto poll_panic;
a2fbb9ea
ET
9250#endif
9251
9252 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9253 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9254 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9255
9256 bnx2x_update_fpsb_idx(fp);
9257
da5a662a 9258 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9259 bnx2x_tx_int(fp, budget);
9260
da5a662a 9261 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9262 work_done = bnx2x_rx_int(fp, budget);
9263
da5a662a 9264 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9265
9266 /* must not complete if we consumed full budget */
da5a662a 9267 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9268
9269#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9270poll_panic:
a2fbb9ea
ET
9271#endif
9272 netif_rx_complete(bp->dev, napi);
9273
34f80b04 9274 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9275 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9276 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9277 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9278 }
a2fbb9ea
ET
9279 return work_done;
9280}
9281
755735eb
EG
9282
9283/* we split the first BD into headers and data BDs
9284 * to ease the pain of our fellow micocode engineers
9285 * we use one mapping for both BDs
9286 * So far this has only been observed to happen
9287 * in Other Operating Systems(TM)
9288 */
9289static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9290 struct bnx2x_fastpath *fp,
9291 struct eth_tx_bd **tx_bd, u16 hlen,
9292 u16 bd_prod, int nbd)
9293{
9294 struct eth_tx_bd *h_tx_bd = *tx_bd;
9295 struct eth_tx_bd *d_tx_bd;
9296 dma_addr_t mapping;
9297 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9298
9299 /* first fix first BD */
9300 h_tx_bd->nbd = cpu_to_le16(nbd);
9301 h_tx_bd->nbytes = cpu_to_le16(hlen);
9302
9303 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9304 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9305 h_tx_bd->addr_lo, h_tx_bd->nbd);
9306
9307 /* now get a new data BD
9308 * (after the pbd) and fill it */
9309 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9310 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9311
9312 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9313 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9314
9315 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9316 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9317 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9318 d_tx_bd->vlan = 0;
9319 /* this marks the BD as one that has no individual mapping
9320 * the FW ignores this flag in a BD not marked start
9321 */
9322 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9323 DP(NETIF_MSG_TX_QUEUED,
9324 "TSO split data size is %d (%x:%x)\n",
9325 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9326
9327 /* update tx_bd for marking the last BD flag */
9328 *tx_bd = d_tx_bd;
9329
9330 return bd_prod;
9331}
9332
9333static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9334{
9335 if (fix > 0)
9336 csum = (u16) ~csum_fold(csum_sub(csum,
9337 csum_partial(t_header - fix, fix, 0)));
9338
9339 else if (fix < 0)
9340 csum = (u16) ~csum_fold(csum_add(csum,
9341 csum_partial(t_header, -fix, 0)));
9342
9343 return swab16(csum);
9344}
9345
9346static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9347{
9348 u32 rc;
9349
9350 if (skb->ip_summed != CHECKSUM_PARTIAL)
9351 rc = XMIT_PLAIN;
9352
9353 else {
9354 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9355 rc = XMIT_CSUM_V6;
9356 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9357 rc |= XMIT_CSUM_TCP;
9358
9359 } else {
9360 rc = XMIT_CSUM_V4;
9361 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9362 rc |= XMIT_CSUM_TCP;
9363 }
9364 }
9365
9366 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9367 rc |= XMIT_GSO_V4;
9368
9369 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9370 rc |= XMIT_GSO_V6;
9371
9372 return rc;
9373}
9374
9375/* check if packet requires linearization (packet is too fragmented) */
9376static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9377 u32 xmit_type)
9378{
9379 int to_copy = 0;
9380 int hlen = 0;
9381 int first_bd_sz = 0;
9382
9383 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9384 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9385
9386 if (xmit_type & XMIT_GSO) {
9387 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9388 /* Check if LSO packet needs to be copied:
9389 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9390 int wnd_size = MAX_FETCH_BD - 3;
9391 /* Number of widnows to check */
9392 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9393 int wnd_idx = 0;
9394 int frag_idx = 0;
9395 u32 wnd_sum = 0;
9396
9397 /* Headers length */
9398 hlen = (int)(skb_transport_header(skb) - skb->data) +
9399 tcp_hdrlen(skb);
9400
9401 /* Amount of data (w/o headers) on linear part of SKB*/
9402 first_bd_sz = skb_headlen(skb) - hlen;
9403
9404 wnd_sum = first_bd_sz;
9405
9406 /* Calculate the first sum - it's special */
9407 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9408 wnd_sum +=
9409 skb_shinfo(skb)->frags[frag_idx].size;
9410
9411 /* If there was data on linear skb data - check it */
9412 if (first_bd_sz > 0) {
9413 if (unlikely(wnd_sum < lso_mss)) {
9414 to_copy = 1;
9415 goto exit_lbl;
9416 }
9417
9418 wnd_sum -= first_bd_sz;
9419 }
9420
9421 /* Others are easier: run through the frag list and
9422 check all windows */
9423 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9424 wnd_sum +=
9425 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9426
9427 if (unlikely(wnd_sum < lso_mss)) {
9428 to_copy = 1;
9429 break;
9430 }
9431 wnd_sum -=
9432 skb_shinfo(skb)->frags[wnd_idx].size;
9433 }
9434
9435 } else {
9436 /* in non-LSO too fragmented packet should always
9437 be linearized */
9438 to_copy = 1;
9439 }
9440 }
9441
9442exit_lbl:
9443 if (unlikely(to_copy))
9444 DP(NETIF_MSG_TX_QUEUED,
9445 "Linearization IS REQUIRED for %s packet. "
9446 "num_frags %d hlen %d first_bd_sz %d\n",
9447 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9448 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9449
9450 return to_copy;
9451}
9452
9453/* called with netif_tx_lock
a2fbb9ea 9454 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9455 * netif_wake_queue()
a2fbb9ea
ET
9456 */
9457static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9458{
9459 struct bnx2x *bp = netdev_priv(dev);
9460 struct bnx2x_fastpath *fp;
9461 struct sw_tx_bd *tx_buf;
9462 struct eth_tx_bd *tx_bd;
9463 struct eth_tx_parse_bd *pbd = NULL;
9464 u16 pkt_prod, bd_prod;
755735eb 9465 int nbd, fp_index;
a2fbb9ea 9466 dma_addr_t mapping;
755735eb
EG
9467 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9468 int vlan_off = (bp->e1hov ? 4 : 0);
9469 int i;
9470 u8 hlen = 0;
a2fbb9ea
ET
9471
9472#ifdef BNX2X_STOP_ON_ERROR
9473 if (unlikely(bp->panic))
9474 return NETDEV_TX_BUSY;
9475#endif
9476
755735eb 9477 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9478 fp = &bp->fp[fp_index];
755735eb 9479
a2fbb9ea
ET
9480 if (unlikely(bnx2x_tx_avail(bp->fp) <
9481 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9482 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9483 netif_stop_queue(dev);
9484 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9485 return NETDEV_TX_BUSY;
9486 }
9487
755735eb
EG
9488 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9489 " gso type %x xmit_type %x\n",
9490 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9491 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9492
9493 /* First, check if we need to linearaize the skb
9494 (due to FW restrictions) */
9495 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9496 /* Statistics of linearization */
9497 bp->lin_cnt++;
9498 if (skb_linearize(skb) != 0) {
9499 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9500 "silently dropping this SKB\n");
9501 dev_kfree_skb_any(skb);
da5a662a 9502 return NETDEV_TX_OK;
755735eb
EG
9503 }
9504 }
9505
a2fbb9ea 9506 /*
755735eb 9507 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9508 then for TSO or xsum we have a parsing info BD,
755735eb 9509 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9510 (don't forget to mark the last one as last,
9511 and to unmap only AFTER you write to the BD ...)
755735eb 9512 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9513 */
9514
9515 pkt_prod = fp->tx_pkt_prod++;
755735eb 9516 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9517
755735eb 9518 /* get a tx_buf and first BD */
a2fbb9ea
ET
9519 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9520 tx_bd = &fp->tx_desc_ring[bd_prod];
9521
9522 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9523 tx_bd->general_data = (UNICAST_ADDRESS <<
9524 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9525 tx_bd->general_data |= 1; /* header nbd */
9526
755735eb
EG
9527 /* remember the first BD of the packet */
9528 tx_buf->first_bd = fp->tx_bd_prod;
9529 tx_buf->skb = skb;
a2fbb9ea
ET
9530
9531 DP(NETIF_MSG_TX_QUEUED,
9532 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9533 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9534
755735eb
EG
9535 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9536 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9537 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9538 vlan_off += 4;
9539 } else
9540 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9541
755735eb 9542 if (xmit_type) {
a2fbb9ea 9543
755735eb 9544 /* turn on parsing and get a BD */
a2fbb9ea
ET
9545 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9546 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9547
9548 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9549 }
9550
9551 if (xmit_type & XMIT_CSUM) {
9552 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9553
9554 /* for now NS flag is not used in Linux */
755735eb 9555 pbd->global_data = (hlen |
96fc1784 9556 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9557 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9558
755735eb
EG
9559 pbd->ip_hlen = (skb_transport_header(skb) -
9560 skb_network_header(skb)) / 2;
9561
9562 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9563
755735eb
EG
9564 pbd->total_hlen = cpu_to_le16(hlen);
9565 hlen = hlen*2 - vlan_off;
a2fbb9ea 9566
755735eb
EG
9567 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9568
9569 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9570 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9571 ETH_TX_BD_FLAGS_IP_CSUM;
9572 else
9573 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9574
9575 if (xmit_type & XMIT_CSUM_TCP) {
9576 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9577
9578 } else {
9579 s8 fix = SKB_CS_OFF(skb); /* signed! */
9580
a2fbb9ea 9581 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9582 pbd->cs_offset = fix / 2;
a2fbb9ea 9583
755735eb
EG
9584 DP(NETIF_MSG_TX_QUEUED,
9585 "hlen %d offset %d fix %d csum before fix %x\n",
9586 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9587 SKB_CS(skb));
9588
9589 /* HW bug: fixup the CSUM */
9590 pbd->tcp_pseudo_csum =
9591 bnx2x_csum_fix(skb_transport_header(skb),
9592 SKB_CS(skb), fix);
9593
9594 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9595 pbd->tcp_pseudo_csum);
9596 }
a2fbb9ea
ET
9597 }
9598
9599 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9600 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9601
9602 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9603 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9604 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9605 tx_bd->nbd = cpu_to_le16(nbd);
9606 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9607
9608 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9609 " nbytes %d flags %x vlan %x\n",
9610 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9611 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9612 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9613
755735eb 9614 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9615
9616 DP(NETIF_MSG_TX_QUEUED,
9617 "TSO packet len %d hlen %d total len %d tso size %d\n",
9618 skb->len, hlen, skb_headlen(skb),
9619 skb_shinfo(skb)->gso_size);
9620
9621 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9622
755735eb
EG
9623 if (unlikely(skb_headlen(skb) > hlen))
9624 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9625 bd_prod, ++nbd);
a2fbb9ea
ET
9626
9627 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9628 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9629 pbd->tcp_flags = pbd_tcp_flags(skb);
9630
9631 if (xmit_type & XMIT_GSO_V4) {
9632 pbd->ip_id = swab16(ip_hdr(skb)->id);
9633 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9634 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9635 ip_hdr(skb)->daddr,
9636 0, IPPROTO_TCP, 0));
755735eb
EG
9637
9638 } else
9639 pbd->tcp_pseudo_csum =
9640 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9641 &ipv6_hdr(skb)->daddr,
9642 0, IPPROTO_TCP, 0));
9643
a2fbb9ea
ET
9644 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9645 }
9646
755735eb
EG
9647 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9648 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9649
755735eb
EG
9650 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9651 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9652
755735eb
EG
9653 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9654 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9655
755735eb
EG
9656 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9657 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9658 tx_bd->nbytes = cpu_to_le16(frag->size);
9659 tx_bd->vlan = cpu_to_le16(pkt_prod);
9660 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9661
755735eb
EG
9662 DP(NETIF_MSG_TX_QUEUED,
9663 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9664 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9665 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9666 }
9667
755735eb 9668 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9669 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9670
9671 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9672 tx_bd, tx_bd->bd_flags.as_bitfield);
9673
a2fbb9ea
ET
9674 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9675
755735eb 9676 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9677 * if the packet contains or ends with it
9678 */
9679 if (TX_BD_POFF(bd_prod) < nbd)
9680 nbd++;
9681
9682 if (pbd)
9683 DP(NETIF_MSG_TX_QUEUED,
9684 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9685 " tcp_flags %x xsum %x seq %u hlen %u\n",
9686 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9687 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9688 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9689
755735eb 9690 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9691
96fc1784
ET
9692 fp->hw_tx_prods->bds_prod =
9693 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9694 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9695 fp->hw_tx_prods->packets_prod =
9696 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9697 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9698
9699 mmiowb();
9700
755735eb 9701 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9702 dev->trans_start = jiffies;
9703
9704 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9705 netif_stop_queue(dev);
bb2a0f7a 9706 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9707 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9708 netif_wake_queue(dev);
9709 }
9710 fp->tx_pkt++;
9711
9712 return NETDEV_TX_OK;
9713}
9714
bb2a0f7a 9715/* called with rtnl_lock */
a2fbb9ea
ET
9716static int bnx2x_open(struct net_device *dev)
9717{
9718 struct bnx2x *bp = netdev_priv(dev);
9719
9720 bnx2x_set_power_state(bp, PCI_D0);
9721
bb2a0f7a 9722 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9723}
9724
bb2a0f7a 9725/* called with rtnl_lock */
a2fbb9ea
ET
9726static int bnx2x_close(struct net_device *dev)
9727{
a2fbb9ea
ET
9728 struct bnx2x *bp = netdev_priv(dev);
9729
9730 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9731 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9732 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9733 if (!CHIP_REV_IS_SLOW(bp))
9734 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9735
9736 return 0;
9737}
9738
34f80b04
EG
9739/* called with netif_tx_lock from set_multicast */
9740static void bnx2x_set_rx_mode(struct net_device *dev)
9741{
9742 struct bnx2x *bp = netdev_priv(dev);
9743 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9744 int port = BP_PORT(bp);
9745
9746 if (bp->state != BNX2X_STATE_OPEN) {
9747 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9748 return;
9749 }
9750
9751 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9752
9753 if (dev->flags & IFF_PROMISC)
9754 rx_mode = BNX2X_RX_MODE_PROMISC;
9755
9756 else if ((dev->flags & IFF_ALLMULTI) ||
9757 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9758 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9759
9760 else { /* some multicasts */
9761 if (CHIP_IS_E1(bp)) {
9762 int i, old, offset;
9763 struct dev_mc_list *mclist;
9764 struct mac_configuration_cmd *config =
9765 bnx2x_sp(bp, mcast_config);
9766
9767 for (i = 0, mclist = dev->mc_list;
9768 mclist && (i < dev->mc_count);
9769 i++, mclist = mclist->next) {
9770
9771 config->config_table[i].
9772 cam_entry.msb_mac_addr =
9773 swab16(*(u16 *)&mclist->dmi_addr[0]);
9774 config->config_table[i].
9775 cam_entry.middle_mac_addr =
9776 swab16(*(u16 *)&mclist->dmi_addr[2]);
9777 config->config_table[i].
9778 cam_entry.lsb_mac_addr =
9779 swab16(*(u16 *)&mclist->dmi_addr[4]);
9780 config->config_table[i].cam_entry.flags =
9781 cpu_to_le16(port);
9782 config->config_table[i].
9783 target_table_entry.flags = 0;
9784 config->config_table[i].
9785 target_table_entry.client_id = 0;
9786 config->config_table[i].
9787 target_table_entry.vlan_id = 0;
9788
9789 DP(NETIF_MSG_IFUP,
9790 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9791 config->config_table[i].
9792 cam_entry.msb_mac_addr,
9793 config->config_table[i].
9794 cam_entry.middle_mac_addr,
9795 config->config_table[i].
9796 cam_entry.lsb_mac_addr);
9797 }
9798 old = config->hdr.length_6b;
9799 if (old > i) {
9800 for (; i < old; i++) {
9801 if (CAM_IS_INVALID(config->
9802 config_table[i])) {
9803 i--; /* already invalidated */
9804 break;
9805 }
9806 /* invalidate */
9807 CAM_INVALIDATE(config->
9808 config_table[i]);
9809 }
9810 }
9811
9812 if (CHIP_REV_IS_SLOW(bp))
9813 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9814 else
9815 offset = BNX2X_MAX_MULTICAST*(1 + port);
9816
9817 config->hdr.length_6b = i;
9818 config->hdr.offset = offset;
9819 config->hdr.client_id = BP_CL_ID(bp);
9820 config->hdr.reserved1 = 0;
9821
9822 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9823 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9824 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9825 0);
9826 } else { /* E1H */
9827 /* Accept one or more multicasts */
9828 struct dev_mc_list *mclist;
9829 u32 mc_filter[MC_HASH_SIZE];
9830 u32 crc, bit, regidx;
9831 int i;
9832
9833 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9834
9835 for (i = 0, mclist = dev->mc_list;
9836 mclist && (i < dev->mc_count);
9837 i++, mclist = mclist->next) {
9838
9839 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9840 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9841 mclist->dmi_addr[0], mclist->dmi_addr[1],
9842 mclist->dmi_addr[2], mclist->dmi_addr[3],
9843 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9844
9845 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9846 bit = (crc >> 24) & 0xff;
9847 regidx = bit >> 5;
9848 bit &= 0x1f;
9849 mc_filter[regidx] |= (1 << bit);
9850 }
9851
9852 for (i = 0; i < MC_HASH_SIZE; i++)
9853 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9854 mc_filter[i]);
9855 }
9856 }
9857
9858 bp->rx_mode = rx_mode;
9859 bnx2x_set_storm_rx_mode(bp);
9860}
9861
9862/* called with rtnl_lock */
a2fbb9ea
ET
9863static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9864{
9865 struct sockaddr *addr = p;
9866 struct bnx2x *bp = netdev_priv(dev);
9867
34f80b04 9868 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9869 return -EINVAL;
9870
9871 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9872 if (netif_running(dev)) {
9873 if (CHIP_IS_E1(bp))
3101c2bc 9874 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9875 else
3101c2bc 9876 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9877 }
a2fbb9ea
ET
9878
9879 return 0;
9880}
9881
c18487ee 9882/* called with rtnl_lock */
a2fbb9ea
ET
9883static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9884{
9885 struct mii_ioctl_data *data = if_mii(ifr);
9886 struct bnx2x *bp = netdev_priv(dev);
9887 int err;
9888
9889 switch (cmd) {
9890 case SIOCGMIIPHY:
34f80b04 9891 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9892
c14423fe 9893 /* fallthrough */
c18487ee 9894
a2fbb9ea 9895 case SIOCGMIIREG: {
c18487ee 9896 u16 mii_regval;
a2fbb9ea 9897
c18487ee
YR
9898 if (!netif_running(dev))
9899 return -EAGAIN;
a2fbb9ea 9900
34f80b04
EG
9901 mutex_lock(&bp->port.phy_mutex);
9902 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9903 DEFAULT_PHY_DEV_ADDR,
9904 (data->reg_num & 0x1f), &mii_regval);
9905 data->val_out = mii_regval;
34f80b04 9906 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9907 return err;
9908 }
9909
9910 case SIOCSMIIREG:
9911 if (!capable(CAP_NET_ADMIN))
9912 return -EPERM;
9913
c18487ee
YR
9914 if (!netif_running(dev))
9915 return -EAGAIN;
9916
34f80b04
EG
9917 mutex_lock(&bp->port.phy_mutex);
9918 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9919 DEFAULT_PHY_DEV_ADDR,
9920 (data->reg_num & 0x1f), data->val_in);
34f80b04 9921 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9922 return err;
9923
9924 default:
9925 /* do nothing */
9926 break;
9927 }
9928
9929 return -EOPNOTSUPP;
9930}
9931
34f80b04 9932/* called with rtnl_lock */
a2fbb9ea
ET
9933static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9934{
9935 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9936 int rc = 0;
a2fbb9ea
ET
9937
9938 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9939 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9940 return -EINVAL;
9941
9942 /* This does not race with packet allocation
c14423fe 9943 * because the actual alloc size is
a2fbb9ea
ET
9944 * only updated as part of load
9945 */
9946 dev->mtu = new_mtu;
9947
9948 if (netif_running(dev)) {
34f80b04
EG
9949 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9950 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9951 }
34f80b04
EG
9952
9953 return rc;
a2fbb9ea
ET
9954}
9955
9956static void bnx2x_tx_timeout(struct net_device *dev)
9957{
9958 struct bnx2x *bp = netdev_priv(dev);
9959
9960#ifdef BNX2X_STOP_ON_ERROR
9961 if (!bp->panic)
9962 bnx2x_panic();
9963#endif
9964 /* This allows the netif to be shutdown gracefully before resetting */
9965 schedule_work(&bp->reset_task);
9966}
9967
9968#ifdef BCM_VLAN
34f80b04 9969/* called with rtnl_lock */
a2fbb9ea
ET
9970static void bnx2x_vlan_rx_register(struct net_device *dev,
9971 struct vlan_group *vlgrp)
9972{
9973 struct bnx2x *bp = netdev_priv(dev);
9974
9975 bp->vlgrp = vlgrp;
9976 if (netif_running(dev))
49d66772 9977 bnx2x_set_client_config(bp);
a2fbb9ea 9978}
34f80b04 9979
a2fbb9ea
ET
9980#endif
9981
9982#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9983static void poll_bnx2x(struct net_device *dev)
9984{
9985 struct bnx2x *bp = netdev_priv(dev);
9986
9987 disable_irq(bp->pdev->irq);
9988 bnx2x_interrupt(bp->pdev->irq, dev);
9989 enable_irq(bp->pdev->irq);
9990}
9991#endif
9992
34f80b04
EG
9993static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9994 struct net_device *dev)
a2fbb9ea
ET
9995{
9996 struct bnx2x *bp;
9997 int rc;
9998
9999 SET_NETDEV_DEV(dev, &pdev->dev);
10000 bp = netdev_priv(dev);
10001
34f80b04
EG
10002 bp->dev = dev;
10003 bp->pdev = pdev;
a2fbb9ea 10004 bp->flags = 0;
34f80b04 10005 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10006
10007 rc = pci_enable_device(pdev);
10008 if (rc) {
10009 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10010 goto err_out;
10011 }
10012
10013 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10014 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10015 " aborting\n");
10016 rc = -ENODEV;
10017 goto err_out_disable;
10018 }
10019
10020 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10021 printk(KERN_ERR PFX "Cannot find second PCI device"
10022 " base address, aborting\n");
10023 rc = -ENODEV;
10024 goto err_out_disable;
10025 }
10026
34f80b04
EG
10027 if (atomic_read(&pdev->enable_cnt) == 1) {
10028 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10029 if (rc) {
10030 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10031 " aborting\n");
10032 goto err_out_disable;
10033 }
a2fbb9ea 10034
34f80b04
EG
10035 pci_set_master(pdev);
10036 pci_save_state(pdev);
10037 }
a2fbb9ea
ET
10038
10039 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10040 if (bp->pm_cap == 0) {
10041 printk(KERN_ERR PFX "Cannot find power management"
10042 " capability, aborting\n");
10043 rc = -EIO;
10044 goto err_out_release;
10045 }
10046
10047 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10048 if (bp->pcie_cap == 0) {
10049 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10050 " aborting\n");
10051 rc = -EIO;
10052 goto err_out_release;
10053 }
10054
10055 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10056 bp->flags |= USING_DAC_FLAG;
10057 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10058 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10059 " failed, aborting\n");
10060 rc = -EIO;
10061 goto err_out_release;
10062 }
10063
10064 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10065 printk(KERN_ERR PFX "System does not support DMA,"
10066 " aborting\n");
10067 rc = -EIO;
10068 goto err_out_release;
10069 }
10070
34f80b04
EG
10071 dev->mem_start = pci_resource_start(pdev, 0);
10072 dev->base_addr = dev->mem_start;
10073 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10074
10075 dev->irq = pdev->irq;
10076
10077 bp->regview = ioremap_nocache(dev->base_addr,
10078 pci_resource_len(pdev, 0));
10079 if (!bp->regview) {
10080 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10081 rc = -ENOMEM;
10082 goto err_out_release;
10083 }
10084
34f80b04
EG
10085 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10086 min_t(u64, BNX2X_DB_SIZE,
10087 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10088 if (!bp->doorbells) {
10089 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10090 rc = -ENOMEM;
10091 goto err_out_unmap;
10092 }
10093
10094 bnx2x_set_power_state(bp, PCI_D0);
10095
34f80b04
EG
10096 /* clean indirect addresses */
10097 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10098 PCICFG_VENDOR_ID_OFFSET);
10099 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10100 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10101 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10102 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10103
34f80b04
EG
10104 dev->hard_start_xmit = bnx2x_start_xmit;
10105 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10106
34f80b04
EG
10107 dev->ethtool_ops = &bnx2x_ethtool_ops;
10108 dev->open = bnx2x_open;
10109 dev->stop = bnx2x_close;
10110 dev->set_multicast_list = bnx2x_set_rx_mode;
10111 dev->set_mac_address = bnx2x_change_mac_addr;
10112 dev->do_ioctl = bnx2x_ioctl;
10113 dev->change_mtu = bnx2x_change_mtu;
10114 dev->tx_timeout = bnx2x_tx_timeout;
10115#ifdef BCM_VLAN
10116 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10117#endif
10118#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10119 dev->poll_controller = poll_bnx2x;
10120#endif
10121 dev->features |= NETIF_F_SG;
10122 dev->features |= NETIF_F_HW_CSUM;
10123 if (bp->flags & USING_DAC_FLAG)
10124 dev->features |= NETIF_F_HIGHDMA;
10125#ifdef BCM_VLAN
10126 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10127#endif
10128 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10129 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10130
10131 return 0;
10132
10133err_out_unmap:
10134 if (bp->regview) {
10135 iounmap(bp->regview);
10136 bp->regview = NULL;
10137 }
a2fbb9ea
ET
10138 if (bp->doorbells) {
10139 iounmap(bp->doorbells);
10140 bp->doorbells = NULL;
10141 }
10142
10143err_out_release:
34f80b04
EG
10144 if (atomic_read(&pdev->enable_cnt) == 1)
10145 pci_release_regions(pdev);
a2fbb9ea
ET
10146
10147err_out_disable:
10148 pci_disable_device(pdev);
10149 pci_set_drvdata(pdev, NULL);
10150
10151err_out:
10152 return rc;
10153}
10154
25047950
ET
10155static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10156{
10157 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10158
10159 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10160 return val;
10161}
10162
10163/* return value of 1=2.5GHz 2=5GHz */
10164static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10165{
10166 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10167
10168 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10169 return val;
10170}
10171
a2fbb9ea
ET
10172static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10173 const struct pci_device_id *ent)
10174{
10175 static int version_printed;
10176 struct net_device *dev = NULL;
10177 struct bnx2x *bp;
25047950 10178 int rc;
25047950 10179 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10180
10181 if (version_printed++ == 0)
10182 printk(KERN_INFO "%s", version);
10183
10184 /* dev zeroed in init_etherdev */
10185 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10186 if (!dev) {
10187 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10188 return -ENOMEM;
34f80b04 10189 }
a2fbb9ea
ET
10190
10191 netif_carrier_off(dev);
10192
10193 bp = netdev_priv(dev);
10194 bp->msglevel = debug;
10195
34f80b04 10196 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10197 if (rc < 0) {
10198 free_netdev(dev);
10199 return rc;
10200 }
10201
a2fbb9ea
ET
10202 rc = register_netdev(dev);
10203 if (rc) {
c14423fe 10204 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10205 goto init_one_exit;
a2fbb9ea
ET
10206 }
10207
10208 pci_set_drvdata(pdev, dev);
10209
34f80b04
EG
10210 rc = bnx2x_init_bp(bp);
10211 if (rc) {
10212 unregister_netdev(dev);
10213 goto init_one_exit;
10214 }
10215
10216 bp->common.name = board_info[ent->driver_data].name;
25047950 10217 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10218 " IRQ %d, ", dev->name, bp->common.name,
10219 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10220 bnx2x_get_pcie_width(bp),
10221 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10222 dev->base_addr, bp->pdev->irq);
10223 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10224 return 0;
34f80b04
EG
10225
10226init_one_exit:
10227 if (bp->regview)
10228 iounmap(bp->regview);
10229
10230 if (bp->doorbells)
10231 iounmap(bp->doorbells);
10232
10233 free_netdev(dev);
10234
10235 if (atomic_read(&pdev->enable_cnt) == 1)
10236 pci_release_regions(pdev);
10237
10238 pci_disable_device(pdev);
10239 pci_set_drvdata(pdev, NULL);
10240
10241 return rc;
a2fbb9ea
ET
10242}
10243
10244static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10245{
10246 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10247 struct bnx2x *bp;
10248
10249 if (!dev) {
228241eb
ET
10250 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10251 return;
10252 }
228241eb 10253 bp = netdev_priv(dev);
a2fbb9ea 10254
a2fbb9ea
ET
10255 unregister_netdev(dev);
10256
10257 if (bp->regview)
10258 iounmap(bp->regview);
10259
10260 if (bp->doorbells)
10261 iounmap(bp->doorbells);
10262
10263 free_netdev(dev);
34f80b04
EG
10264
10265 if (atomic_read(&pdev->enable_cnt) == 1)
10266 pci_release_regions(pdev);
10267
a2fbb9ea
ET
10268 pci_disable_device(pdev);
10269 pci_set_drvdata(pdev, NULL);
10270}
10271
10272static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10273{
10274 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10275 struct bnx2x *bp;
10276
34f80b04
EG
10277 if (!dev) {
10278 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10279 return -ENODEV;
10280 }
10281 bp = netdev_priv(dev);
a2fbb9ea 10282
34f80b04 10283 rtnl_lock();
a2fbb9ea 10284
34f80b04 10285 pci_save_state(pdev);
228241eb 10286
34f80b04
EG
10287 if (!netif_running(dev)) {
10288 rtnl_unlock();
10289 return 0;
10290 }
a2fbb9ea
ET
10291
10292 netif_device_detach(dev);
a2fbb9ea 10293
da5a662a 10294 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10295
a2fbb9ea 10296 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10297
34f80b04
EG
10298 rtnl_unlock();
10299
a2fbb9ea
ET
10300 return 0;
10301}
10302
10303static int bnx2x_resume(struct pci_dev *pdev)
10304{
10305 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10306 struct bnx2x *bp;
a2fbb9ea
ET
10307 int rc;
10308
228241eb
ET
10309 if (!dev) {
10310 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10311 return -ENODEV;
10312 }
228241eb 10313 bp = netdev_priv(dev);
a2fbb9ea 10314
34f80b04
EG
10315 rtnl_lock();
10316
228241eb 10317 pci_restore_state(pdev);
34f80b04
EG
10318
10319 if (!netif_running(dev)) {
10320 rtnl_unlock();
10321 return 0;
10322 }
10323
a2fbb9ea
ET
10324 bnx2x_set_power_state(bp, PCI_D0);
10325 netif_device_attach(dev);
10326
da5a662a 10327 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10328
34f80b04
EG
10329 rtnl_unlock();
10330
10331 return rc;
a2fbb9ea
ET
10332}
10333
493adb1f
WX
10334/**
10335 * bnx2x_io_error_detected - called when PCI error is detected
10336 * @pdev: Pointer to PCI device
10337 * @state: The current pci connection state
10338 *
10339 * This function is called after a PCI bus error affecting
10340 * this device has been detected.
10341 */
10342static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10343 pci_channel_state_t state)
10344{
10345 struct net_device *dev = pci_get_drvdata(pdev);
10346 struct bnx2x *bp = netdev_priv(dev);
10347
10348 rtnl_lock();
10349
10350 netif_device_detach(dev);
10351
10352 if (netif_running(dev))
10353 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10354
10355 pci_disable_device(pdev);
10356
10357 rtnl_unlock();
10358
10359 /* Request a slot reset */
10360 return PCI_ERS_RESULT_NEED_RESET;
10361}
10362
10363/**
10364 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10365 * @pdev: Pointer to PCI device
10366 *
10367 * Restart the card from scratch, as if from a cold-boot.
10368 */
10369static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10370{
10371 struct net_device *dev = pci_get_drvdata(pdev);
10372 struct bnx2x *bp = netdev_priv(dev);
10373
10374 rtnl_lock();
10375
10376 if (pci_enable_device(pdev)) {
10377 dev_err(&pdev->dev,
10378 "Cannot re-enable PCI device after reset\n");
10379 rtnl_unlock();
10380 return PCI_ERS_RESULT_DISCONNECT;
10381 }
10382
10383 pci_set_master(pdev);
10384 pci_restore_state(pdev);
10385
10386 if (netif_running(dev))
10387 bnx2x_set_power_state(bp, PCI_D0);
10388
10389 rtnl_unlock();
10390
10391 return PCI_ERS_RESULT_RECOVERED;
10392}
10393
10394/**
10395 * bnx2x_io_resume - called when traffic can start flowing again
10396 * @pdev: Pointer to PCI device
10397 *
10398 * This callback is called when the error recovery driver tells us that
10399 * its OK to resume normal operation.
10400 */
10401static void bnx2x_io_resume(struct pci_dev *pdev)
10402{
10403 struct net_device *dev = pci_get_drvdata(pdev);
10404 struct bnx2x *bp = netdev_priv(dev);
10405
10406 rtnl_lock();
10407
10408 if (netif_running(dev))
10409 bnx2x_nic_load(bp, LOAD_OPEN);
10410
10411 netif_device_attach(dev);
10412
10413 rtnl_unlock();
10414}
10415
10416static struct pci_error_handlers bnx2x_err_handler = {
10417 .error_detected = bnx2x_io_error_detected,
10418 .slot_reset = bnx2x_io_slot_reset,
10419 .resume = bnx2x_io_resume,
10420};
10421
a2fbb9ea 10422static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10423 .name = DRV_MODULE_NAME,
10424 .id_table = bnx2x_pci_tbl,
10425 .probe = bnx2x_init_one,
10426 .remove = __devexit_p(bnx2x_remove_one),
10427 .suspend = bnx2x_suspend,
10428 .resume = bnx2x_resume,
10429 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10430};
10431
10432static int __init bnx2x_init(void)
10433{
10434 return pci_register_driver(&bnx2x_pci_driver);
10435}
10436
10437static void __exit bnx2x_cleanup(void)
10438{
10439 pci_unregister_driver(&bnx2x_pci_driver);
10440}
10441
10442module_init(bnx2x_init);
10443module_exit(bnx2x_cleanup);
10444