]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Change GPIO for any port
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
e35c3269
EG
63#define DRV_MODULE_VERSION "1.45.6"
64#define DRV_MODULE_RELDATE "2008/06/23"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 78
19680c48 79static int disable_tpa;
a2fbb9ea
ET
80static int use_inta;
81static int poll;
a2fbb9ea 82static int debug;
34f80b04 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
84static int use_multi;
85
19680c48 86module_param(disable_tpa, int, 0);
a2fbb9ea
ET
87module_param(use_inta, int, 0);
88module_param(poll, int, 0);
a2fbb9ea 89module_param(debug, int, 0);
19680c48 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 93MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
94
95#ifdef BNX2X_MULTI
96module_param(use_multi, int, 0);
97MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98#endif
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
473 u32 data[9];
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
507 for_each_queue(bp, i) {
508 struct bnx2x_fastpath *fp = &bp->fp[i];
509 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
518 fp->rx_bd_prod, fp->rx_bd_cons,
519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
523 " *sb_u_idx(%x) bd data(%x,%x)\n",
524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525 fp->status_blk->c_status_block.status_block_index,
526 fp->fp_u_idx,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
529
530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532 for (j = start; j < end; j++) {
533 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536 sw_bd->skb, sw_bd->first_bd);
537 }
538
539 start = TX_BD(fp->tx_bd_cons - 10);
540 end = TX_BD(fp->tx_bd_cons + 254);
541 for (j = start; j < end; j++) {
542 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 }
547
548 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550 for (j = start; j < end; j++) {
551 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
556 }
557
7a9b2557
VZ
558 start = 0;
559 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
560 for (j = start; j < end; j++) {
561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
565 j, rx_sge[1], rx_sge[0], sw_page->page);
566 }
567
a2fbb9ea
ET
568 start = RCQ_BD(fp->rx_comp_cons - 10);
569 end = RCQ_BD(fp->rx_comp_cons + 503);
570 for (j = start; j < end; j++) {
571 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574 j, cqe[0], cqe[1], cqe[2], cqe[3]);
575 }
576 }
577
49d66772
ET
578 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
579 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 580 " spq_prod_idx(%u)\n",
49d66772 581 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
582 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
34f80b04 584 bnx2x_fw_dump(bp);
a2fbb9ea
ET
585 bnx2x_mc_assert(bp);
586 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
587}
588
615f8fd9 589static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 590{
34f80b04 591 int port = BP_PORT(bp);
a2fbb9ea
ET
592 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593 u32 val = REG_RD(bp, addr);
594 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596 if (msix) {
597 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600 } else {
601 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 602 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
603 HC_CONFIG_0_REG_INT_LINE_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 605
615f8fd9
ET
606 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
607 val, port, addr, msix);
608
609 REG_WR(bp, addr, val);
610
a2fbb9ea
ET
611 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 }
613
615f8fd9 614 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
615 val, port, addr, msix);
616
617 REG_WR(bp, addr, val);
34f80b04
EG
618
619 if (CHIP_IS_E1H(bp)) {
620 /* init leading/trailing edge */
621 if (IS_E1HMF(bp)) {
622 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623 if (bp->port.pmf)
624 /* enable nig attention */
625 val |= 0x0100;
626 } else
627 val = 0xffff;
628
629 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631 }
a2fbb9ea
ET
632}
633
615f8fd9 634static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 635{
34f80b04 636 int port = BP_PORT(bp);
a2fbb9ea
ET
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
639
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_INT_LINE_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 val, port, addr);
647
648 REG_WR(bp, addr, val);
649 if (REG_RD(bp, addr) != val)
650 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651}
652
615f8fd9 653static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 654{
a2fbb9ea
ET
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 int i;
657
34f80b04 658 /* disable interrupt handling */
a2fbb9ea 659 atomic_inc(&bp->intr_sem);
c14423fe 660 /* prevent the HW from sending interrupts */
615f8fd9 661 bnx2x_int_disable(bp);
a2fbb9ea
ET
662
663 /* make sure all ISRs are done */
664 if (msix) {
665 for_each_queue(bp, i)
666 synchronize_irq(bp->msix_table[i].vector);
667
668 /* one more for the Slow Path IRQ */
669 synchronize_irq(bp->msix_table[i].vector);
670 } else
671 synchronize_irq(bp->pdev->irq);
672
673 /* make sure sp_task is not running */
674 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
675}
676
34f80b04 677/* fast path */
a2fbb9ea
ET
678
679/*
34f80b04 680 * General service functions
a2fbb9ea
ET
681 */
682
34f80b04 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
684 u8 storm, u16 index, u8 op, u8 update)
685{
5c862848
EG
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
34f80b04 692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
5c862848
EG
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
700}
701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703{
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717}
718
a2fbb9ea
ET
719static u16 bnx2x_ack_int(struct bnx2x *bp)
720{
5c862848
EG
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 724
5c862848
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
a2fbb9ea 727
a2fbb9ea
ET
728 return result;
729}
730
731
732/*
733 * fast path service functions
734 */
735
736/* free skb in the packet ring at pos idx
737 * return idx of last bd freed
738 */
739static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740 u16 idx)
741{
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
34f80b04 745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
746 int nbd;
747
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
749 idx, tx_buf, skb);
750
751 /* unmap first bd */
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 758 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
759#ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 761 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
762 bnx2x_panic();
763 }
764#endif
765
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
768 if (nbd)
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
774 if (--nbd)
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779 if (--nbd)
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781 }
782 }
783
784 /* now free frags */
785 while (nbd > 0) {
786
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791 if (--nbd)
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793 }
794
795 /* release skb */
53e5e96e 796 WARN_ON(!skb);
a2fbb9ea
ET
797 dev_kfree_skb(skb);
798 tx_buf->first_bd = 0;
799 tx_buf->skb = NULL;
800
34f80b04 801 return new_cons;
a2fbb9ea
ET
802}
803
34f80b04 804static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 805{
34f80b04
EG
806 s16 used;
807 u16 prod;
808 u16 cons;
a2fbb9ea 809
34f80b04 810 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
813
34f80b04
EG
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 817
34f80b04 818#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
819 WARN_ON(used < 0);
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 822#endif
a2fbb9ea 823
34f80b04 824 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
825}
826
827static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828{
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831 int done = 0;
832
833#ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
835 return;
836#endif
837
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
840
841 while (sw_cons != hw_cons) {
842 u16 pkt_cons;
843
844 pkt_cons = TX_BD(sw_cons);
845
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
34f80b04 848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
849 hw_cons, sw_cons, pkt_cons);
850
34f80b04 851/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
852 rmb();
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854 }
855*/
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857 sw_cons++;
858 done++;
859
860 if (done == work)
861 break;
862 }
863
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
866
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
871 */
872 smp_mb();
873
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
876
877 netif_tx_lock(bp->dev);
878
879 if (netif_queue_stopped(bp->dev) &&
da5a662a 880 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
883
884 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
885 }
886}
887
888static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889 union eth_rx_cqe *rr_cqe)
890{
891 struct bnx2x *bp = fp->bp;
892 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894
34f80b04 895 DP(BNX2X_MSG_SP,
a2fbb9ea 896 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
897 FP_IDX(fp), cid, command, bp->state,
898 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
899
900 bp->spq_left++;
901
34f80b04 902 if (FP_IDX(fp)) {
a2fbb9ea
ET
903 switch (command | fp->state) {
904 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905 BNX2X_FP_STATE_OPENING):
906 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
907 cid);
908 fp->state = BNX2X_FP_STATE_OPEN;
909 break;
910
911 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
913 cid);
914 fp->state = BNX2X_FP_STATE_HALTED;
915 break;
916
917 default:
34f80b04
EG
918 BNX2X_ERR("unexpected MC reply (%d) "
919 "fp->state is %x\n", command, fp->state);
920 break;
a2fbb9ea 921 }
34f80b04 922 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
923 return;
924 }
c14423fe 925
a2fbb9ea
ET
926 switch (command | bp->state) {
927 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929 bp->state = BNX2X_STATE_OPEN;
930 break;
931
932 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935 fp->state = BNX2X_FP_STATE_HALTED;
936 break;
937
a2fbb9ea 938 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 939 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 940 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
941 break;
942
943 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 944 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 945 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 946 bp->set_mac_pending = 0;
a2fbb9ea
ET
947 break;
948
49d66772 949 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 950 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
951 break;
952
a2fbb9ea 953 default:
34f80b04 954 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 955 command, bp->state);
34f80b04 956 break;
a2fbb9ea 957 }
34f80b04 958 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
959}
960
7a9b2557
VZ
961static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
962 struct bnx2x_fastpath *fp, u16 index)
963{
964 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
965 struct page *page = sw_buf->page;
966 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
967
968 /* Skip "next page" elements */
969 if (!page)
970 return;
971
972 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
973 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
974 __free_pages(page, PAGES_PER_SGE_SHIFT);
975
976 sw_buf->page = NULL;
977 sge->addr_hi = 0;
978 sge->addr_lo = 0;
979}
980
981static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
982 struct bnx2x_fastpath *fp, int last)
983{
984 int i;
985
986 for (i = 0; i < last; i++)
987 bnx2x_free_rx_sge(bp, fp, i);
988}
989
990static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
991 struct bnx2x_fastpath *fp, u16 index)
992{
993 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
994 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
995 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
996 dma_addr_t mapping;
997
998 if (unlikely(page == NULL))
999 return -ENOMEM;
1000
1001 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1002 PCI_DMA_FROMDEVICE);
8d8bb39b 1003 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1004 __free_pages(page, PAGES_PER_SGE_SHIFT);
1005 return -ENOMEM;
1006 }
1007
1008 sw_buf->page = page;
1009 pci_unmap_addr_set(sw_buf, mapping, mapping);
1010
1011 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1012 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1013
1014 return 0;
1015}
1016
a2fbb9ea
ET
1017static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1018 struct bnx2x_fastpath *fp, u16 index)
1019{
1020 struct sk_buff *skb;
1021 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1022 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1023 dma_addr_t mapping;
1024
1025 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1026 if (unlikely(skb == NULL))
1027 return -ENOMEM;
1028
1029 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1030 PCI_DMA_FROMDEVICE);
8d8bb39b 1031 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1032 dev_kfree_skb(skb);
1033 return -ENOMEM;
1034 }
1035
1036 rx_buf->skb = skb;
1037 pci_unmap_addr_set(rx_buf, mapping, mapping);
1038
1039 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1040 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1041
1042 return 0;
1043}
1044
1045/* note that we are not allocating a new skb,
1046 * we are just moving one from cons to prod
1047 * we are not creating a new mapping,
1048 * so there is no need to check for dma_mapping_error().
1049 */
1050static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1051 struct sk_buff *skb, u16 cons, u16 prod)
1052{
1053 struct bnx2x *bp = fp->bp;
1054 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1055 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1056 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1057 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1058
1059 pci_dma_sync_single_for_device(bp->pdev,
1060 pci_unmap_addr(cons_rx_buf, mapping),
1061 bp->rx_offset + RX_COPY_THRESH,
1062 PCI_DMA_FROMDEVICE);
1063
1064 prod_rx_buf->skb = cons_rx_buf->skb;
1065 pci_unmap_addr_set(prod_rx_buf, mapping,
1066 pci_unmap_addr(cons_rx_buf, mapping));
1067 *prod_bd = *cons_bd;
1068}
1069
7a9b2557
VZ
1070static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1071 u16 idx)
1072{
1073 u16 last_max = fp->last_max_sge;
1074
1075 if (SUB_S16(idx, last_max) > 0)
1076 fp->last_max_sge = idx;
1077}
1078
1079static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1080{
1081 int i, j;
1082
1083 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1084 int idx = RX_SGE_CNT * i - 1;
1085
1086 for (j = 0; j < 2; j++) {
1087 SGE_MASK_CLEAR_BIT(fp, idx);
1088 idx--;
1089 }
1090 }
1091}
1092
1093static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1094 struct eth_fast_path_rx_cqe *fp_cqe)
1095{
1096 struct bnx2x *bp = fp->bp;
1097 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1098 le16_to_cpu(fp_cqe->len_on_bd)) >>
1099 BCM_PAGE_SHIFT;
1100 u16 last_max, last_elem, first_elem;
1101 u16 delta = 0;
1102 u16 i;
1103
1104 if (!sge_len)
1105 return;
1106
1107 /* First mark all used pages */
1108 for (i = 0; i < sge_len; i++)
1109 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1110
1111 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1112 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1113
1114 /* Here we assume that the last SGE index is the biggest */
1115 prefetch((void *)(fp->sge_mask));
1116 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118 last_max = RX_SGE(fp->last_max_sge);
1119 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1120 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1121
1122 /* If ring is not full */
1123 if (last_elem + 1 != first_elem)
1124 last_elem++;
1125
1126 /* Now update the prod */
1127 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1128 if (likely(fp->sge_mask[i]))
1129 break;
1130
1131 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1132 delta += RX_SGE_MASK_ELEM_SZ;
1133 }
1134
1135 if (delta > 0) {
1136 fp->rx_sge_prod += delta;
1137 /* clear page-end entries */
1138 bnx2x_clear_sge_mask_next_elems(fp);
1139 }
1140
1141 DP(NETIF_MSG_RX_STATUS,
1142 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1143 fp->last_max_sge, fp->rx_sge_prod);
1144}
1145
1146static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1147{
1148 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1149 memset(fp->sge_mask, 0xff,
1150 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1151
1152 /* Clear the two last indeces in the page to 1:
1153 these are the indeces that correspond to the "next" element,
1154 hence will never be indicated and should be removed from
1155 the calculations. */
1156 bnx2x_clear_sge_mask_next_elems(fp);
1157}
1158
1159static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1160 struct sk_buff *skb, u16 cons, u16 prod)
1161{
1162 struct bnx2x *bp = fp->bp;
1163 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1164 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1165 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1166 dma_addr_t mapping;
1167
1168 /* move empty skb from pool to prod and map it */
1169 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1170 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1171 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1172 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1173
1174 /* move partial skb from cons to pool (don't unmap yet) */
1175 fp->tpa_pool[queue] = *cons_rx_buf;
1176
1177 /* mark bin state as start - print error if current state != stop */
1178 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1179 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1180
1181 fp->tpa_state[queue] = BNX2X_TPA_START;
1182
1183 /* point prod_bd to new skb */
1184 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1185 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1186
1187#ifdef BNX2X_STOP_ON_ERROR
1188 fp->tpa_queue_used |= (1 << queue);
1189#ifdef __powerpc64__
1190 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1191#else
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1193#endif
1194 fp->tpa_queue_used);
1195#endif
1196}
1197
1198static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1199 struct sk_buff *skb,
1200 struct eth_fast_path_rx_cqe *fp_cqe,
1201 u16 cqe_idx)
1202{
1203 struct sw_rx_page *rx_pg, old_rx_pg;
1204 struct page *sge;
1205 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1206 u32 i, frag_len, frag_size, pages;
1207 int err;
1208 int j;
1209
1210 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1211 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1212
1213 /* This is needed in order to enable forwarding support */
1214 if (frag_size)
1215 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1216 max(frag_size, (u32)len_on_bd));
1217
1218#ifdef BNX2X_STOP_ON_ERROR
1219 if (pages > 8*PAGES_PER_SGE) {
1220 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1221 pages, cqe_idx);
1222 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1223 fp_cqe->pkt_len, len_on_bd);
1224 bnx2x_panic();
1225 return -EINVAL;
1226 }
1227#endif
1228
1229 /* Run through the SGL and compose the fragmented skb */
1230 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1231 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1232
1233 /* FW gives the indices of the SGE as if the ring is an array
1234 (meaning that "next" element will consume 2 indices) */
1235 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1236 rx_pg = &fp->rx_page_ring[sge_idx];
1237 sge = rx_pg->page;
1238 old_rx_pg = *rx_pg;
1239
1240 /* If we fail to allocate a substitute page, we simply stop
1241 where we are and drop the whole packet */
1242 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1243 if (unlikely(err)) {
66e855f3 1244 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1245 return err;
1246 }
1247
1248 /* Unmap the page as we r going to pass it to the stack */
1249 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1250 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1251
1252 /* Add one frag and update the appropriate fields in the skb */
1253 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1254
1255 skb->data_len += frag_len;
1256 skb->truesize += frag_len;
1257 skb->len += frag_len;
1258
1259 frag_size -= frag_len;
1260 }
1261
1262 return 0;
1263}
1264
1265static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1266 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1267 u16 cqe_idx)
1268{
1269 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1270 struct sk_buff *skb = rx_buf->skb;
1271 /* alloc new skb */
1272 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1273
1274 /* Unmap skb in the pool anyway, as we are going to change
1275 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1276 fails. */
1277 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1278 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1279
7a9b2557 1280 if (likely(new_skb)) {
66e855f3
YG
1281 /* fix ip xsum and give it to the stack */
1282 /* (no need to map the new skb) */
7a9b2557
VZ
1283
1284 prefetch(skb);
1285 prefetch(((char *)(skb)) + 128);
1286
7a9b2557
VZ
1287#ifdef BNX2X_STOP_ON_ERROR
1288 if (pad + len > bp->rx_buf_size) {
1289 BNX2X_ERR("skb_put is about to fail... "
1290 "pad %d len %d rx_buf_size %d\n",
1291 pad, len, bp->rx_buf_size);
1292 bnx2x_panic();
1293 return;
1294 }
1295#endif
1296
1297 skb_reserve(skb, pad);
1298 skb_put(skb, len);
1299
1300 skb->protocol = eth_type_trans(skb, bp->dev);
1301 skb->ip_summed = CHECKSUM_UNNECESSARY;
1302
1303 {
1304 struct iphdr *iph;
1305
1306 iph = (struct iphdr *)skb->data;
1307 iph->check = 0;
1308 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1309 }
1310
1311 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1312 &cqe->fast_path_cqe, cqe_idx)) {
1313#ifdef BCM_VLAN
1314 if ((bp->vlgrp != NULL) &&
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN))
1317 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1318 le16_to_cpu(cqe->fast_path_cqe.
1319 vlan_tag));
1320 else
1321#endif
1322 netif_receive_skb(skb);
1323 } else {
1324 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1325 " - dropping packet!\n");
1326 dev_kfree_skb(skb);
1327 }
1328
1329 bp->dev->last_rx = jiffies;
1330
1331 /* put new skb in bin */
1332 fp->tpa_pool[queue].skb = new_skb;
1333
1334 } else {
66e855f3 1335 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1336 DP(NETIF_MSG_RX_STATUS,
1337 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1338 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1339 }
1340
1341 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1342}
1343
1344static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1345 struct bnx2x_fastpath *fp,
1346 u16 bd_prod, u16 rx_comp_prod,
1347 u16 rx_sge_prod)
1348{
1349 struct tstorm_eth_rx_producers rx_prods = {0};
1350 int i;
1351
1352 /* Update producers */
1353 rx_prods.bd_prod = bd_prod;
1354 rx_prods.cqe_prod = rx_comp_prod;
1355 rx_prods.sge_prod = rx_sge_prod;
1356
1357 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1358 REG_WR(bp, BAR_TSTRORM_INTMEM +
1359 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1360 ((u32 *)&rx_prods)[i]);
1361
1362 DP(NETIF_MSG_RX_STATUS,
1363 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1364 bd_prod, rx_comp_prod, rx_sge_prod);
1365}
1366
a2fbb9ea
ET
1367static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1368{
1369 struct bnx2x *bp = fp->bp;
34f80b04 1370 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1371 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1372 int rx_pkt = 0;
7a9b2557 1373 u16 queue;
a2fbb9ea
ET
1374
1375#ifdef BNX2X_STOP_ON_ERROR
1376 if (unlikely(bp->panic))
1377 return 0;
1378#endif
1379
34f80b04
EG
1380 /* CQ "next element" is of the size of the regular element,
1381 that's why it's ok here */
a2fbb9ea
ET
1382 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384 hw_comp_cons++;
1385
1386 bd_cons = fp->rx_bd_cons;
1387 bd_prod = fp->rx_bd_prod;
34f80b04 1388 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1389 sw_comp_cons = fp->rx_comp_cons;
1390 sw_comp_prod = fp->rx_comp_prod;
1391
1392 /* Memory barrier necessary as speculative reads of the rx
1393 * buffer can be ahead of the index in the status block
1394 */
1395 rmb();
1396
1397 DP(NETIF_MSG_RX_STATUS,
1398 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1399 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1400
1401 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1402 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1403 struct sk_buff *skb;
1404 union eth_rx_cqe *cqe;
34f80b04
EG
1405 u8 cqe_fp_flags;
1406 u16 len, pad;
a2fbb9ea
ET
1407
1408 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409 bd_prod = RX_BD(bd_prod);
1410 bd_cons = RX_BD(bd_cons);
1411
1412 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1413 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1414
a2fbb9ea 1415 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1416 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1417 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1418 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1419 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1421
1422 /* is this a slowpath msg? */
34f80b04 1423 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1424 bnx2x_sp_event(fp, cqe);
1425 goto next_cqe;
1426
1427 /* this is an rx packet */
1428 } else {
1429 rx_buf = &fp->rx_buf_ring[bd_cons];
1430 skb = rx_buf->skb;
a2fbb9ea
ET
1431 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432 pad = cqe->fast_path_cqe.placement_offset;
1433
7a9b2557
VZ
1434 /* If CQE is marked both TPA_START and TPA_END
1435 it is a non-TPA CQE */
1436 if ((!fp->disable_tpa) &&
1437 (TPA_TYPE(cqe_fp_flags) !=
1438 (TPA_TYPE_START | TPA_TYPE_END))) {
1439 queue = cqe->fast_path_cqe.queue_index;
1440
1441 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442 DP(NETIF_MSG_RX_STATUS,
1443 "calling tpa_start on queue %d\n",
1444 queue);
1445
1446 bnx2x_tpa_start(fp, queue, skb,
1447 bd_cons, bd_prod);
1448 goto next_rx;
1449 }
1450
1451 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452 DP(NETIF_MSG_RX_STATUS,
1453 "calling tpa_stop on queue %d\n",
1454 queue);
1455
1456 if (!BNX2X_RX_SUM_FIX(cqe))
1457 BNX2X_ERR("STOP on none TCP "
1458 "data\n");
1459
1460 /* This is a size of the linear data
1461 on this skb */
1462 len = le16_to_cpu(cqe->fast_path_cqe.
1463 len_on_bd);
1464 bnx2x_tpa_stop(bp, fp, queue, pad,
1465 len, cqe, comp_ring_cons);
1466#ifdef BNX2X_STOP_ON_ERROR
1467 if (bp->panic)
1468 return -EINVAL;
1469#endif
1470
1471 bnx2x_update_sge_prod(fp,
1472 &cqe->fast_path_cqe);
1473 goto next_cqe;
1474 }
1475 }
1476
a2fbb9ea
ET
1477 pci_dma_sync_single_for_device(bp->pdev,
1478 pci_unmap_addr(rx_buf, mapping),
1479 pad + RX_COPY_THRESH,
1480 PCI_DMA_FROMDEVICE);
1481 prefetch(skb);
1482 prefetch(((char *)(skb)) + 128);
1483
1484 /* is this an error packet? */
34f80b04 1485 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1486 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1487 "ERROR flags %x rx packet %u\n",
1488 cqe_fp_flags, sw_comp_cons);
66e855f3 1489 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1490 goto reuse_rx;
1491 }
1492
1493 /* Since we don't have a jumbo ring
1494 * copy small packets if mtu > 1500
1495 */
1496 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497 (len <= RX_COPY_THRESH)) {
1498 struct sk_buff *new_skb;
1499
1500 new_skb = netdev_alloc_skb(bp->dev,
1501 len + pad);
1502 if (new_skb == NULL) {
1503 DP(NETIF_MSG_RX_ERR,
34f80b04 1504 "ERROR packet dropped "
a2fbb9ea 1505 "because of alloc failure\n");
66e855f3 1506 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1507 goto reuse_rx;
1508 }
1509
1510 /* aligned copy */
1511 skb_copy_from_linear_data_offset(skb, pad,
1512 new_skb->data + pad, len);
1513 skb_reserve(new_skb, pad);
1514 skb_put(new_skb, len);
1515
1516 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518 skb = new_skb;
1519
1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521 pci_unmap_single(bp->pdev,
1522 pci_unmap_addr(rx_buf, mapping),
1523 bp->rx_buf_use_size,
1524 PCI_DMA_FROMDEVICE);
1525 skb_reserve(skb, pad);
1526 skb_put(skb, len);
1527
1528 } else {
1529 DP(NETIF_MSG_RX_ERR,
34f80b04 1530 "ERROR packet dropped because "
a2fbb9ea 1531 "of alloc failure\n");
66e855f3 1532 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1533reuse_rx:
1534 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535 goto next_rx;
1536 }
1537
1538 skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1541 if (bp->rx_csum) {
1adcd8be
EG
1542 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1544 else
1545 bp->eth_stats.hw_csum_err++;
1546 }
a2fbb9ea
ET
1547 }
1548
1549#ifdef BCM_VLAN
34f80b04
EG
1550 if ((bp->vlgrp != NULL) &&
1551 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1553 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555 else
1556#endif
34f80b04 1557 netif_receive_skb(skb);
a2fbb9ea
ET
1558
1559 bp->dev->last_rx = jiffies;
1560
1561next_rx:
1562 rx_buf->skb = NULL;
1563
1564 bd_cons = NEXT_RX_IDX(bd_cons);
1565 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1566 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1567 rx_pkt++;
a2fbb9ea
ET
1568next_cqe:
1569 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1571
34f80b04 1572 if (rx_pkt == budget)
a2fbb9ea
ET
1573 break;
1574 } /* while */
1575
1576 fp->rx_bd_cons = bd_cons;
34f80b04 1577 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1578 fp->rx_comp_cons = sw_comp_cons;
1579 fp->rx_comp_prod = sw_comp_prod;
1580
7a9b2557
VZ
1581 /* Update producers */
1582 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1583 fp->rx_sge_prod);
a2fbb9ea
ET
1584 mmiowb(); /* keep prod updates ordered */
1585
1586 fp->rx_pkt += rx_pkt;
1587 fp->rx_calls++;
1588
1589 return rx_pkt;
1590}
1591
1592static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1593{
1594 struct bnx2x_fastpath *fp = fp_cookie;
1595 struct bnx2x *bp = fp->bp;
1596 struct net_device *dev = bp->dev;
34f80b04 1597 int index = FP_IDX(fp);
a2fbb9ea 1598
da5a662a
VZ
1599 /* Return here if interrupt is disabled */
1600 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602 return IRQ_HANDLED;
1603 }
1604
34f80b04
EG
1605 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606 index, FP_SB_ID(fp));
1607 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1608
1609#ifdef BNX2X_STOP_ON_ERROR
1610 if (unlikely(bp->panic))
1611 return IRQ_HANDLED;
1612#endif
1613
1614 prefetch(fp->rx_cons_sb);
1615 prefetch(fp->tx_cons_sb);
1616 prefetch(&fp->status_blk->c_status_block.status_block_index);
1617 prefetch(&fp->status_blk->u_status_block.status_block_index);
1618
1619 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1620
a2fbb9ea
ET
1621 return IRQ_HANDLED;
1622}
1623
1624static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1625{
1626 struct net_device *dev = dev_instance;
1627 struct bnx2x *bp = netdev_priv(dev);
1628 u16 status = bnx2x_ack_int(bp);
34f80b04 1629 u16 mask;
a2fbb9ea 1630
34f80b04 1631 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1632 if (unlikely(status == 0)) {
1633 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1634 return IRQ_NONE;
1635 }
34f80b04 1636 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1637
1638#ifdef BNX2X_STOP_ON_ERROR
1639 if (unlikely(bp->panic))
1640 return IRQ_HANDLED;
1641#endif
1642
34f80b04 1643 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1644 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1645 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1646 return IRQ_HANDLED;
1647 }
1648
34f80b04
EG
1649 mask = 0x2 << bp->fp[0].sb_id;
1650 if (status & mask) {
a2fbb9ea
ET
1651 struct bnx2x_fastpath *fp = &bp->fp[0];
1652
1653 prefetch(fp->rx_cons_sb);
1654 prefetch(fp->tx_cons_sb);
1655 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1659
34f80b04 1660 status &= ~mask;
a2fbb9ea
ET
1661 }
1662
a2fbb9ea 1663
34f80b04 1664 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1665 schedule_work(&bp->sp_task);
1666
1667 status &= ~0x1;
1668 if (!status)
1669 return IRQ_HANDLED;
1670 }
1671
34f80b04
EG
1672 if (status)
1673 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1674 status);
a2fbb9ea 1675
c18487ee 1676 return IRQ_HANDLED;
a2fbb9ea
ET
1677}
1678
c18487ee 1679/* end of fast path */
a2fbb9ea 1680
bb2a0f7a 1681static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1682
c18487ee
YR
1683/* Link */
1684
1685/*
1686 * General service functions
1687 */
a2fbb9ea 1688
4a37fb66 1689static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1690{
1691 u32 lock_status;
1692 u32 resource_bit = (1 << resource);
4a37fb66
YG
1693 int func = BP_FUNC(bp);
1694 u32 hw_lock_control_reg;
c18487ee 1695 int cnt;
a2fbb9ea 1696
c18487ee
YR
1697 /* Validating that the resource is within range */
1698 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1699 DP(NETIF_MSG_HW,
1700 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1702 return -EINVAL;
1703 }
a2fbb9ea 1704
4a37fb66
YG
1705 if (func <= 5) {
1706 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707 } else {
1708 hw_lock_control_reg =
1709 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710 }
1711
c18487ee 1712 /* Validating that the resource is not already taken */
4a37fb66 1713 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1714 if (lock_status & resource_bit) {
1715 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1716 lock_status, resource_bit);
1717 return -EEXIST;
1718 }
a2fbb9ea 1719
c18487ee
YR
1720 /* Try for 1 second every 5ms */
1721 for (cnt = 0; cnt < 200; cnt++) {
1722 /* Try to acquire the lock */
4a37fb66
YG
1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1725 if (lock_status & resource_bit)
1726 return 0;
a2fbb9ea 1727
c18487ee 1728 msleep(5);
a2fbb9ea 1729 }
c18487ee
YR
1730 DP(NETIF_MSG_HW, "Timeout\n");
1731 return -EAGAIN;
1732}
a2fbb9ea 1733
4a37fb66 1734static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1735{
1736 u32 lock_status;
1737 u32 resource_bit = (1 << resource);
4a37fb66
YG
1738 int func = BP_FUNC(bp);
1739 u32 hw_lock_control_reg;
a2fbb9ea 1740
c18487ee
YR
1741 /* Validating that the resource is within range */
1742 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743 DP(NETIF_MSG_HW,
1744 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746 return -EINVAL;
1747 }
1748
4a37fb66
YG
1749 if (func <= 5) {
1750 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751 } else {
1752 hw_lock_control_reg =
1753 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754 }
1755
c18487ee 1756 /* Validating that the resource is currently taken */
4a37fb66 1757 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1758 if (!(lock_status & resource_bit)) {
1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1760 lock_status, resource_bit);
1761 return -EFAULT;
a2fbb9ea
ET
1762 }
1763
4a37fb66 1764 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1765 return 0;
1766}
1767
1768/* HW Lock for shared dual port PHYs */
4a37fb66 1769static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1770{
1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1772
34f80b04 1773 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1774
c18487ee
YR
1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1777 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1778}
a2fbb9ea 1779
4a37fb66 1780static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1781{
1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1783
c18487ee
YR
1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1786 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1787
34f80b04 1788 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1789}
a2fbb9ea 1790
17de50b7 1791int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1792{
1793 /* The GPIO should be swapped if swap register is set and active */
1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1796 int gpio_shift = gpio_num +
1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798 u32 gpio_mask = (1 << gpio_shift);
1799 u32 gpio_reg;
a2fbb9ea 1800
c18487ee
YR
1801 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803 return -EINVAL;
1804 }
a2fbb9ea 1805
4a37fb66 1806 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1807 /* read GPIO and mask except the float bits */
1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1809
c18487ee
YR
1810 switch (mode) {
1811 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813 gpio_num, gpio_shift);
1814 /* clear FLOAT and set CLR */
1815 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817 break;
a2fbb9ea 1818
c18487ee
YR
1819 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821 gpio_num, gpio_shift);
1822 /* clear FLOAT and set SET */
1823 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825 break;
a2fbb9ea 1826
17de50b7 1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829 gpio_num, gpio_shift);
1830 /* set FLOAT */
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832 break;
a2fbb9ea 1833
c18487ee
YR
1834 default:
1835 break;
a2fbb9ea
ET
1836 }
1837
c18487ee 1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1839 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1840
c18487ee 1841 return 0;
a2fbb9ea
ET
1842}
1843
c18487ee 1844static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1845{
c18487ee
YR
1846 u32 spio_mask = (1 << spio_num);
1847 u32 spio_reg;
a2fbb9ea 1848
c18487ee
YR
1849 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850 (spio_num > MISC_REGISTERS_SPIO_7)) {
1851 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852 return -EINVAL;
a2fbb9ea
ET
1853 }
1854
4a37fb66 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1856 /* read SPIO and mask except the float bits */
1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1858
c18487ee
YR
1859 switch (mode) {
1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862 /* clear FLOAT and set CLR */
1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865 break;
a2fbb9ea 1866
c18487ee
YR
1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869 /* clear FLOAT and set SET */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872 break;
a2fbb9ea 1873
c18487ee
YR
1874 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876 /* set FLOAT */
1877 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878 break;
a2fbb9ea 1879
c18487ee
YR
1880 default:
1881 break;
a2fbb9ea
ET
1882 }
1883
c18487ee 1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1886
a2fbb9ea
ET
1887 return 0;
1888}
1889
c18487ee 1890static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1891{
c18487ee
YR
1892 switch (bp->link_vars.ieee_fc) {
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1894 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1895 ADVERTISED_Pause);
1896 break;
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1898 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1899 ADVERTISED_Pause);
1900 break;
1901 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1902 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1903 break;
1904 default:
34f80b04 1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 }
1909}
f1410647 1910
c18487ee
YR
1911static void bnx2x_link_report(struct bnx2x *bp)
1912{
1913 if (bp->link_vars.link_up) {
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_carrier_on(bp->dev);
1916 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1917
c18487ee 1918 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1919
c18487ee
YR
1920 if (bp->link_vars.duplex == DUPLEX_FULL)
1921 printk("full duplex");
1922 else
1923 printk("half duplex");
f1410647 1924
c18487ee
YR
1925 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927 printk(", receive ");
1928 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929 printk("& transmit ");
1930 } else {
1931 printk(", transmit ");
1932 }
1933 printk("flow control ON");
1934 }
1935 printk("\n");
f1410647 1936
c18487ee
YR
1937 } else { /* link_down */
1938 netif_carrier_off(bp->dev);
1939 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1940 }
c18487ee
YR
1941}
1942
1943static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944{
19680c48
EG
1945 if (!BP_NOMCP(bp)) {
1946 u8 rc;
a2fbb9ea 1947
19680c48 1948 /* Initialize link parameters structure variables */
8c99e7b0
YR
1949 /* It is recommended to turn off RX FC for jumbo frames
1950 for better performance */
1951 if (IS_E1HMF(bp))
1952 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1953 else if (bp->dev->mtu > 5000)
1954 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1955 else
1956 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
a2fbb9ea 1957
4a37fb66 1958 bnx2x_acquire_phy_lock(bp);
19680c48 1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1960 bnx2x_release_phy_lock(bp);
a2fbb9ea 1961
19680c48
EG
1962 if (bp->link_vars.link_up)
1963 bnx2x_link_report(bp);
a2fbb9ea 1964
19680c48 1965 bnx2x_calc_fc_adv(bp);
34f80b04 1966
19680c48
EG
1967 return rc;
1968 }
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970 return -EINVAL;
a2fbb9ea
ET
1971}
1972
c18487ee 1973static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1974{
19680c48 1975 if (!BP_NOMCP(bp)) {
4a37fb66 1976 bnx2x_acquire_phy_lock(bp);
19680c48 1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1978 bnx2x_release_phy_lock(bp);
a2fbb9ea 1979
19680c48
EG
1980 bnx2x_calc_fc_adv(bp);
1981 } else
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1983}
a2fbb9ea 1984
c18487ee
YR
1985static void bnx2x__link_reset(struct bnx2x *bp)
1986{
19680c48 1987 if (!BP_NOMCP(bp)) {
4a37fb66 1988 bnx2x_acquire_phy_lock(bp);
19680c48 1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 1990 bnx2x_release_phy_lock(bp);
19680c48
EG
1991 } else
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1993}
a2fbb9ea 1994
c18487ee
YR
1995static u8 bnx2x_link_test(struct bnx2x *bp)
1996{
1997 u8 rc;
a2fbb9ea 1998
4a37fb66 1999 bnx2x_acquire_phy_lock(bp);
c18487ee 2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2001 bnx2x_release_phy_lock(bp);
a2fbb9ea 2002
c18487ee
YR
2003 return rc;
2004}
a2fbb9ea 2005
34f80b04
EG
2006/* Calculates the sum of vn_min_rates.
2007 It's needed for further normalizing of the min_rates.
2008
2009 Returns:
2010 sum of vn_min_rates
2011 or
2012 0 - if all the min_rates are 0.
2013 In the later case fainess algorithm should be deactivated.
2014 If not all min_rates are zero then those that are zeroes will
2015 be set to 1.
2016 */
2017static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2018{
2019 int i, port = BP_PORT(bp);
2020 u32 wsum = 0;
2021 int all_zero = 1;
2022
2023 for (i = 0; i < E1HVN_MAX; i++) {
2024 u32 vn_cfg =
2025 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029 /* If min rate is zero - set it to 1 */
2030 if (!vn_min_rate)
2031 vn_min_rate = DEF_MIN_RATE;
2032 else
2033 all_zero = 0;
2034
2035 wsum += vn_min_rate;
2036 }
2037 }
2038
2039 /* ... only if all min rates are zeros - disable FAIRNESS */
2040 if (all_zero)
2041 return 0;
2042
2043 return wsum;
2044}
2045
2046static void bnx2x_init_port_minmax(struct bnx2x *bp,
2047 int en_fness,
2048 u16 port_rate,
2049 struct cmng_struct_per_port *m_cmng_port)
2050{
2051 u32 r_param = port_rate / 8;
2052 int port = BP_PORT(bp);
2053 int i;
2054
2055 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2056
2057 /* Enable minmax only if we are in e1hmf mode */
2058 if (IS_E1HMF(bp)) {
2059 u32 fair_periodic_timeout_usec;
2060 u32 t_fair;
2061
2062 /* Enable rate shaping and fairness */
2063 m_cmng_port->flags.cmng_vn_enable = 1;
2064 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065 m_cmng_port->flags.rate_shaping_enable = 1;
2066
2067 if (!en_fness)
2068 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2070
2071 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072 m_cmng_port->rs_vars.rs_periodic_timeout =
2073 RS_PERIODIC_TIMEOUT_USEC / 4;
2074
2075 /* this is the threshold below which no timer arming will occur
2076 1.25 coefficient is for the threshold to be a little bigger
2077 than the real time, to compensate for timer in-accuracy */
2078 m_cmng_port->rs_vars.rs_threshold =
2079 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2080
2081 /* resolution of fairness timer */
2082 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084 t_fair = T_FAIR_COEF / port_rate;
2085
2086 /* this is the threshold below which we won't arm
2087 the timer anymore */
2088 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2089
2090 /* we multiply by 1e3/8 to get bytes/msec.
2091 We don't want the credits to pass a credit
2092 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093 m_cmng_port->fair_vars.upper_bound =
2094 r_param * t_fair * FAIR_MEM;
2095 /* since each tick is 4 usec */
2096 m_cmng_port->fair_vars.fairness_timeout =
2097 fair_periodic_timeout_usec / 4;
2098
2099 } else {
2100 /* Disable rate shaping and fairness */
2101 m_cmng_port->flags.cmng_vn_enable = 0;
2102 m_cmng_port->flags.fairness_enable = 0;
2103 m_cmng_port->flags.rate_shaping_enable = 0;
2104
2105 DP(NETIF_MSG_IFUP,
2106 "Single function mode minmax will be disabled\n");
2107 }
2108
2109 /* Store it to internal memory */
2110 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113 ((u32 *)(m_cmng_port))[i]);
2114}
2115
2116static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117 u32 wsum, u16 port_rate,
2118 struct cmng_struct_per_port *m_cmng_port)
2119{
2120 struct rate_shaping_vars_per_vn m_rs_vn;
2121 struct fairness_vars_per_vn m_fair_vn;
2122 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123 u16 vn_min_rate, vn_max_rate;
2124 int i;
2125
2126 /* If function is hidden - set min and max to zeroes */
2127 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2128 vn_min_rate = 0;
2129 vn_max_rate = 0;
2130
2131 } else {
2132 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135 if current min rate is zero - set it to 1.
2136 This is a requirment of the algorithm. */
2137 if ((vn_min_rate == 0) && wsum)
2138 vn_min_rate = DEF_MIN_RATE;
2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2141 }
2142
2143 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2144 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2145
2146 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2148
2149 /* global vn counter - maximal Mbps for this vn */
2150 m_rs_vn.vn_counter.rate = vn_max_rate;
2151
2152 /* quota - number of bytes transmitted in this period */
2153 m_rs_vn.vn_counter.quota =
2154 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2155
2156#ifdef BNX2X_PER_PROT_QOS
2157 /* per protocol counter */
2158 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159 /* maximal Mbps for this protocol */
2160 m_rs_vn.protocol_counters[protocol].rate =
2161 protocol_max_rate[protocol];
2162 /* the quota in each timer period -
2163 number of bytes transmitted in this period */
2164 m_rs_vn.protocol_counters[protocol].quota =
2165 (u32)(rs_periodic_timeout_usec *
2166 ((double)m_rs_vn.
2167 protocol_counters[protocol].rate/8));
2168 }
2169#endif
2170
2171 if (wsum) {
2172 /* credit for each period of the fairness algorithm:
2173 number of bytes in T_FAIR (the vn share the port rate).
2174 wsum should not be larger than 10000, thus
2175 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176 m_fair_vn.vn_credit_delta =
2177 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180 m_fair_vn.vn_credit_delta);
2181 }
2182
2183#ifdef BNX2X_PER_PROT_QOS
2184 do {
2185 u32 protocolWeightSum = 0;
2186
2187 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188 protocolWeightSum +=
2189 drvInit.protocol_min_rate[protocol];
2190 /* per protocol counter -
2191 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192 if (protocolWeightSum > 0) {
2193 for (protocol = 0;
2194 protocol < NUM_OF_PROTOCOLS; protocol++)
2195 /* credit for each period of the
2196 fairness algorithm - number of bytes in
2197 T_FAIR (the protocol share the vn rate) */
2198 m_fair_vn.protocol_credit_delta[protocol] =
2199 (u32)((vn_min_rate / 8) * t_fair *
2200 protocol_min_rate / protocolWeightSum);
2201 }
2202 } while (0);
2203#endif
2204
2205 /* Store it to internal memory */
2206 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209 ((u32 *)(&m_rs_vn))[i]);
2210
2211 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214 ((u32 *)(&m_fair_vn))[i]);
2215}
2216
c18487ee
YR
2217/* This function is called upon link interrupt */
2218static void bnx2x_link_attn(struct bnx2x *bp)
2219{
34f80b04
EG
2220 int vn;
2221
bb2a0f7a
YG
2222 /* Make sure that we are synced with the current statistics */
2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2224
4a37fb66 2225 bnx2x_acquire_phy_lock(bp);
c18487ee 2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2227 bnx2x_release_phy_lock(bp);
a2fbb9ea 2228
bb2a0f7a
YG
2229 if (bp->link_vars.link_up) {
2230
2231 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232 struct host_port_stats *pstats;
2233
2234 pstats = bnx2x_sp(bp, port_stats);
2235 /* reset old bmac stats */
2236 memset(&(pstats->mac_stx[0]), 0,
2237 sizeof(struct mac_stx));
2238 }
2239 if ((bp->state == BNX2X_STATE_OPEN) ||
2240 (bp->state == BNX2X_STATE_DISABLED))
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2242 }
2243
c18487ee
YR
2244 /* indicate link status */
2245 bnx2x_link_report(bp);
34f80b04
EG
2246
2247 if (IS_E1HMF(bp)) {
2248 int func;
2249
2250 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251 if (vn == BP_E1HVN(bp))
2252 continue;
2253
2254 func = ((vn << 1) | BP_PORT(bp));
2255
2256 /* Set the attention towards other drivers
2257 on the same port */
2258 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2260 }
2261 }
2262
2263 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264 struct cmng_struct_per_port m_cmng_port;
2265 u32 wsum;
2266 int port = BP_PORT(bp);
2267
2268 /* Init RATE SHAPING and FAIRNESS contexts */
2269 wsum = bnx2x_calc_vn_wsum(bp);
2270 bnx2x_init_port_minmax(bp, (int)wsum,
2271 bp->link_vars.line_speed,
2272 &m_cmng_port);
2273 if (IS_E1HMF(bp))
2274 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276 wsum, bp->link_vars.line_speed,
2277 &m_cmng_port);
2278 }
c18487ee 2279}
a2fbb9ea 2280
c18487ee
YR
2281static void bnx2x__link_status_update(struct bnx2x *bp)
2282{
2283 if (bp->state != BNX2X_STATE_OPEN)
2284 return;
a2fbb9ea 2285
c18487ee 2286 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2287
bb2a0f7a
YG
2288 if (bp->link_vars.link_up)
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290 else
2291 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2292
c18487ee
YR
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
a2fbb9ea 2295}
a2fbb9ea 2296
34f80b04
EG
2297static void bnx2x_pmf_update(struct bnx2x *bp)
2298{
2299 int port = BP_PORT(bp);
2300 u32 val;
2301
2302 bp->port.pmf = 1;
2303 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2304
2305 /* enable nig attention */
2306 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2309
2310 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2311}
2312
c18487ee 2313/* end of Link */
a2fbb9ea
ET
2314
2315/* slow path */
2316
2317/*
2318 * General service functions
2319 */
2320
2321/* the slow path queue is odd since completions arrive on the fastpath ring */
2322static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323 u32 data_hi, u32 data_lo, int common)
2324{
34f80b04 2325 int func = BP_FUNC(bp);
a2fbb9ea 2326
34f80b04
EG
2327 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2329 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2332
2333#ifdef BNX2X_STOP_ON_ERROR
2334 if (unlikely(bp->panic))
2335 return -EIO;
2336#endif
2337
34f80b04 2338 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2339
2340 if (!bp->spq_left) {
2341 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2342 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2343 bnx2x_panic();
2344 return -EBUSY;
2345 }
f1410647 2346
a2fbb9ea
ET
2347 /* CID needs port number to be encoded int it */
2348 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2350 HW_CID(bp, cid)));
2351 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2352 if (common)
2353 bp->spq_prod_bd->hdr.type |=
2354 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2355
2356 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2358
2359 bp->spq_left--;
2360
2361 if (bp->spq_prod_bd == bp->spq_last_bd) {
2362 bp->spq_prod_bd = bp->spq;
2363 bp->spq_prod_idx = 0;
2364 DP(NETIF_MSG_TIMER, "end of spq\n");
2365
2366 } else {
2367 bp->spq_prod_bd++;
2368 bp->spq_prod_idx++;
2369 }
2370
34f80b04 2371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2372 bp->spq_prod_idx);
2373
34f80b04 2374 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2375 return 0;
2376}
2377
2378/* acquire split MCP access lock register */
4a37fb66 2379static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2380{
a2fbb9ea 2381 u32 i, j, val;
34f80b04 2382 int rc = 0;
a2fbb9ea
ET
2383
2384 might_sleep();
2385 i = 100;
2386 for (j = 0; j < i*10; j++) {
2387 val = (1UL << 31);
2388 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390 if (val & (1L << 31))
2391 break;
2392
2393 msleep(5);
2394 }
a2fbb9ea 2395 if (!(val & (1L << 31))) {
19680c48 2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2397 rc = -EBUSY;
2398 }
2399
2400 return rc;
2401}
2402
4a37fb66
YG
2403/* release split MCP access lock register */
2404static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2405{
2406 u32 val = 0;
2407
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409}
2410
2411static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2412{
2413 struct host_def_status_block *def_sb = bp->def_status_blk;
2414 u16 rc = 0;
2415
2416 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2419 rc |= 1;
2420 }
2421 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2423 rc |= 2;
2424 }
2425 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2427 rc |= 4;
2428 }
2429 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2431 rc |= 8;
2432 }
2433 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435 rc |= 16;
2436 }
2437 return rc;
2438}
2439
2440/*
2441 * slow path service functions
2442 */
2443
2444static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2445{
34f80b04 2446 int port = BP_PORT(bp);
5c862848
EG
2447 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2448 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2453 u32 aeu_mask;
a2fbb9ea 2454
a2fbb9ea
ET
2455 if (bp->attn_state & asserted)
2456 BNX2X_ERR("IGU ERROR\n");
2457
3fcaf2e5
EG
2458 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459 aeu_mask = REG_RD(bp, aeu_addr);
2460
a2fbb9ea 2461 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2462 aeu_mask, asserted);
2463 aeu_mask &= ~(asserted & 0xff);
2464 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2465
3fcaf2e5
EG
2466 REG_WR(bp, aeu_addr, aeu_mask);
2467 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2468
3fcaf2e5 2469 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2470 bp->attn_state |= asserted;
3fcaf2e5 2471 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2472
2473 if (asserted & ATTN_HARD_WIRED_MASK) {
2474 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2475
877e9aa4
ET
2476 /* save nig interrupt mask */
2477 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2478 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2479
c18487ee 2480 bnx2x_link_attn(bp);
a2fbb9ea
ET
2481
2482 /* handle unicore attn? */
2483 }
2484 if (asserted & ATTN_SW_TIMER_4_FUNC)
2485 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2486
2487 if (asserted & GPIO_2_FUNC)
2488 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2489
2490 if (asserted & GPIO_3_FUNC)
2491 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2492
2493 if (asserted & GPIO_4_FUNC)
2494 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2495
2496 if (port == 0) {
2497 if (asserted & ATTN_GENERAL_ATTN_1) {
2498 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2499 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2500 }
2501 if (asserted & ATTN_GENERAL_ATTN_2) {
2502 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2503 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2504 }
2505 if (asserted & ATTN_GENERAL_ATTN_3) {
2506 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2508 }
2509 } else {
2510 if (asserted & ATTN_GENERAL_ATTN_4) {
2511 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2512 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2513 }
2514 if (asserted & ATTN_GENERAL_ATTN_5) {
2515 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2516 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2517 }
2518 if (asserted & ATTN_GENERAL_ATTN_6) {
2519 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2520 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2521 }
2522 }
2523
2524 } /* if hardwired */
2525
5c862848
EG
2526 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2527 asserted, hc_addr);
2528 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2529
2530 /* now set back the mask */
2531 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2532 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2533}
2534
877e9aa4 2535static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2536{
34f80b04 2537 int port = BP_PORT(bp);
877e9aa4
ET
2538 int reg_offset;
2539 u32 val;
2540
34f80b04
EG
2541 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2542 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2543
34f80b04 2544 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2545
2546 val = REG_RD(bp, reg_offset);
2547 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2548 REG_WR(bp, reg_offset, val);
2549
2550 BNX2X_ERR("SPIO5 hw attention\n");
2551
34f80b04 2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2554 /* Fan failure attention */
2555
17de50b7 2556 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2557 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2558 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2559 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2560 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2561 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2562 /* mark the failure */
c18487ee 2563 bp->link_params.ext_phy_config &=
877e9aa4 2564 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2565 bp->link_params.ext_phy_config |=
877e9aa4
ET
2566 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2567 SHMEM_WR(bp,
2568 dev_info.port_hw_config[port].
2569 external_phy_config,
c18487ee 2570 bp->link_params.ext_phy_config);
877e9aa4
ET
2571 /* log the failure */
2572 printk(KERN_ERR PFX "Fan Failure on Network"
2573 " Controller %s has caused the driver to"
2574 " shutdown the card to prevent permanent"
2575 " damage. Please contact Dell Support for"
2576 " assistance\n", bp->dev->name);
2577 break;
2578
2579 default:
2580 break;
2581 }
2582 }
34f80b04
EG
2583
2584 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2585
2586 val = REG_RD(bp, reg_offset);
2587 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2588 REG_WR(bp, reg_offset, val);
2589
2590 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2591 (attn & HW_INTERRUT_ASSERT_SET_0));
2592 bnx2x_panic();
2593 }
877e9aa4
ET
2594}
2595
2596static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2597{
2598 u32 val;
2599
2600 if (attn & BNX2X_DOORQ_ASSERT) {
2601
2602 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2603 BNX2X_ERR("DB hw attention 0x%x\n", val);
2604 /* DORQ discard attention */
2605 if (val & 0x2)
2606 BNX2X_ERR("FATAL error from DORQ\n");
2607 }
34f80b04
EG
2608
2609 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2610
2611 int port = BP_PORT(bp);
2612 int reg_offset;
2613
2614 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2615 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2616
2617 val = REG_RD(bp, reg_offset);
2618 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2619 REG_WR(bp, reg_offset, val);
2620
2621 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2622 (attn & HW_INTERRUT_ASSERT_SET_1));
2623 bnx2x_panic();
2624 }
877e9aa4
ET
2625}
2626
2627static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2628{
2629 u32 val;
2630
2631 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2632
2633 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2634 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2635 /* CFC error attention */
2636 if (val & 0x2)
2637 BNX2X_ERR("FATAL error from CFC\n");
2638 }
2639
2640 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2641
2642 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2643 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2644 /* RQ_USDMDP_FIFO_OVERFLOW */
2645 if (val & 0x18000)
2646 BNX2X_ERR("FATAL error from PXP\n");
2647 }
34f80b04
EG
2648
2649 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2650
2651 int port = BP_PORT(bp);
2652 int reg_offset;
2653
2654 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2655 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2656
2657 val = REG_RD(bp, reg_offset);
2658 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2659 REG_WR(bp, reg_offset, val);
2660
2661 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2662 (attn & HW_INTERRUT_ASSERT_SET_2));
2663 bnx2x_panic();
2664 }
877e9aa4
ET
2665}
2666
2667static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2668{
34f80b04
EG
2669 u32 val;
2670
877e9aa4
ET
2671 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2672
34f80b04
EG
2673 if (attn & BNX2X_PMF_LINK_ASSERT) {
2674 int func = BP_FUNC(bp);
2675
2676 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2677 bnx2x__link_status_update(bp);
2678 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2679 DRV_STATUS_PMF)
2680 bnx2x_pmf_update(bp);
2681
2682 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2683
2684 BNX2X_ERR("MC assert!\n");
2685 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2686 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2689 bnx2x_panic();
2690
2691 } else if (attn & BNX2X_MCP_ASSERT) {
2692
2693 BNX2X_ERR("MCP assert!\n");
2694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2695 bnx2x_fw_dump(bp);
877e9aa4
ET
2696
2697 } else
2698 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2699 }
2700
2701 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2702 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2703 if (attn & BNX2X_GRC_TIMEOUT) {
2704 val = CHIP_IS_E1H(bp) ?
2705 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2706 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2707 }
2708 if (attn & BNX2X_GRC_RSV) {
2709 val = CHIP_IS_E1H(bp) ?
2710 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2711 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2712 }
877e9aa4 2713 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2714 }
2715}
2716
2717static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2718{
a2fbb9ea
ET
2719 struct attn_route attn;
2720 struct attn_route group_mask;
34f80b04 2721 int port = BP_PORT(bp);
877e9aa4 2722 int index;
a2fbb9ea
ET
2723 u32 reg_addr;
2724 u32 val;
3fcaf2e5 2725 u32 aeu_mask;
a2fbb9ea
ET
2726
2727 /* need to take HW lock because MCP or other port might also
2728 try to handle this event */
4a37fb66 2729 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2730
2731 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2732 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2733 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2734 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2735 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2736 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2737
2738 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2739 if (deasserted & (1 << index)) {
2740 group_mask = bp->attn_group[index];
2741
34f80b04
EG
2742 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2743 index, group_mask.sig[0], group_mask.sig[1],
2744 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2745
877e9aa4
ET
2746 bnx2x_attn_int_deasserted3(bp,
2747 attn.sig[3] & group_mask.sig[3]);
2748 bnx2x_attn_int_deasserted1(bp,
2749 attn.sig[1] & group_mask.sig[1]);
2750 bnx2x_attn_int_deasserted2(bp,
2751 attn.sig[2] & group_mask.sig[2]);
2752 bnx2x_attn_int_deasserted0(bp,
2753 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2754
a2fbb9ea
ET
2755 if ((attn.sig[0] & group_mask.sig[0] &
2756 HW_PRTY_ASSERT_SET_0) ||
2757 (attn.sig[1] & group_mask.sig[1] &
2758 HW_PRTY_ASSERT_SET_1) ||
2759 (attn.sig[2] & group_mask.sig[2] &
2760 HW_PRTY_ASSERT_SET_2))
877e9aa4 2761 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2762 }
2763 }
2764
4a37fb66 2765 bnx2x_release_alr(bp);
a2fbb9ea 2766
5c862848 2767 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2768
2769 val = ~deasserted;
3fcaf2e5
EG
2770 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2771 val, reg_addr);
5c862848 2772 REG_WR(bp, reg_addr, val);
a2fbb9ea 2773
a2fbb9ea 2774 if (~bp->attn_state & deasserted)
3fcaf2e5 2775 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2776
2777 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2778 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2779
3fcaf2e5
EG
2780 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2781 aeu_mask = REG_RD(bp, reg_addr);
2782
2783 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2784 aeu_mask, deasserted);
2785 aeu_mask |= (deasserted & 0xff);
2786 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2787
3fcaf2e5
EG
2788 REG_WR(bp, reg_addr, aeu_mask);
2789 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2790
2791 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2792 bp->attn_state &= ~deasserted;
2793 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2794}
2795
2796static void bnx2x_attn_int(struct bnx2x *bp)
2797{
2798 /* read local copy of bits */
2799 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2800 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2801 u32 attn_state = bp->attn_state;
2802
2803 /* look for changed bits */
2804 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2805 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2806
2807 DP(NETIF_MSG_HW,
2808 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2809 attn_bits, attn_ack, asserted, deasserted);
2810
2811 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2812 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2813
2814 /* handle bits that were raised */
2815 if (asserted)
2816 bnx2x_attn_int_asserted(bp, asserted);
2817
2818 if (deasserted)
2819 bnx2x_attn_int_deasserted(bp, deasserted);
2820}
2821
2822static void bnx2x_sp_task(struct work_struct *work)
2823{
2824 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2825 u16 status;
2826
34f80b04 2827
a2fbb9ea
ET
2828 /* Return here if interrupt is disabled */
2829 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2830 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2831 return;
2832 }
2833
2834 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2835/* if (status == 0) */
2836/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2837
34f80b04 2838 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2839
877e9aa4
ET
2840 /* HW attentions */
2841 if (status & 0x1)
a2fbb9ea 2842 bnx2x_attn_int(bp);
a2fbb9ea 2843
bb2a0f7a
YG
2844 /* CStorm events: query_stats, port delete ramrod */
2845 if (status & 0x2)
2846 bp->stats_pending = 0;
2847
a2fbb9ea
ET
2848 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2849 IGU_INT_NOP, 1);
2850 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2851 IGU_INT_NOP, 1);
2852 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2853 IGU_INT_NOP, 1);
2854 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2855 IGU_INT_NOP, 1);
2856 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2857 IGU_INT_ENABLE, 1);
877e9aa4 2858
a2fbb9ea
ET
2859}
2860
2861static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2862{
2863 struct net_device *dev = dev_instance;
2864 struct bnx2x *bp = netdev_priv(dev);
2865
2866 /* Return here if interrupt is disabled */
2867 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2868 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2869 return IRQ_HANDLED;
2870 }
2871
877e9aa4 2872 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2873
2874#ifdef BNX2X_STOP_ON_ERROR
2875 if (unlikely(bp->panic))
2876 return IRQ_HANDLED;
2877#endif
2878
2879 schedule_work(&bp->sp_task);
2880
2881 return IRQ_HANDLED;
2882}
2883
2884/* end of slow path */
2885
2886/* Statistics */
2887
2888/****************************************************************************
2889* Macros
2890****************************************************************************/
2891
a2fbb9ea
ET
2892/* sum[hi:lo] += add[hi:lo] */
2893#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2894 do { \
2895 s_lo += a_lo; \
2896 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2897 } while (0)
2898
2899/* difference = minuend - subtrahend */
2900#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2901 do { \
bb2a0f7a
YG
2902 if (m_lo < s_lo) { \
2903 /* underflow */ \
a2fbb9ea 2904 d_hi = m_hi - s_hi; \
bb2a0f7a
YG
2905 if (d_hi > 0) { \
2906 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2907 d_hi--; \
2908 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a
YG
2909 } else { \
2910 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2911 d_hi = 0; \
2912 d_lo = 0; \
2913 } \
bb2a0f7a
YG
2914 } else { \
2915 /* m_lo >= s_lo */ \
a2fbb9ea 2916 if (m_hi < s_hi) { \
bb2a0f7a
YG
2917 d_hi = 0; \
2918 d_lo = 0; \
2919 } else { \
2920 /* m_hi >= s_hi */ \
2921 d_hi = m_hi - s_hi; \
2922 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2923 } \
2924 } \
2925 } while (0)
2926
bb2a0f7a 2927#define UPDATE_STAT64(s, t) \
a2fbb9ea 2928 do { \
bb2a0f7a
YG
2929 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2930 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2931 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2932 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2933 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2934 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2935 } while (0)
2936
bb2a0f7a 2937#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2938 do { \
bb2a0f7a
YG
2939 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2940 diff.lo, new->s##_lo, old->s##_lo); \
2941 ADD_64(estats->t##_hi, diff.hi, \
2942 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2943 } while (0)
2944
2945/* sum[hi:lo] += add */
2946#define ADD_EXTEND_64(s_hi, s_lo, a) \
2947 do { \
2948 s_lo += a; \
2949 s_hi += (s_lo < a) ? 1 : 0; \
2950 } while (0)
2951
bb2a0f7a 2952#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2953 do { \
bb2a0f7a
YG
2954 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2955 pstats->mac_stx[1].s##_lo, \
2956 new->s); \
a2fbb9ea
ET
2957 } while (0)
2958
bb2a0f7a 2959#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2960 do { \
2961 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2962 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2963 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2964 } while (0)
2965
2966#define UPDATE_EXTEND_XSTAT(s, t) \
2967 do { \
2968 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2969 old_xclient->s = le32_to_cpu(xclient->s); \
2970 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2971 } while (0)
2972
2973/*
2974 * General service functions
2975 */
2976
2977static inline long bnx2x_hilo(u32 *hiref)
2978{
2979 u32 lo = *(hiref + 1);
2980#if (BITS_PER_LONG == 64)
2981 u32 hi = *hiref;
2982
2983 return HILO_U64(hi, lo);
2984#else
2985 return lo;
2986#endif
2987}
2988
2989/*
2990 * Init service functions
2991 */
2992
bb2a0f7a
YG
2993static void bnx2x_storm_stats_post(struct bnx2x *bp)
2994{
2995 if (!bp->stats_pending) {
2996 struct eth_query_ramrod_data ramrod_data = {0};
2997 int rc;
2998
2999 ramrod_data.drv_counter = bp->stats_counter++;
3000 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3001 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3002
3003 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3004 ((u32 *)&ramrod_data)[1],
3005 ((u32 *)&ramrod_data)[0], 0);
3006 if (rc == 0) {
3007 /* stats ramrod has it's own slot on the spq */
3008 bp->spq_left++;
3009 bp->stats_pending = 1;
3010 }
3011 }
3012}
3013
3014static void bnx2x_stats_init(struct bnx2x *bp)
3015{
3016 int port = BP_PORT(bp);
3017
3018 bp->executer_idx = 0;
3019 bp->stats_counter = 0;
3020
3021 /* port stats */
3022 if (!BP_NOMCP(bp))
3023 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3024 else
3025 bp->port.port_stx = 0;
3026 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3027
3028 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3029 bp->port.old_nig_stats.brb_discard =
3030 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3031 bp->port.old_nig_stats.brb_truncate =
3032 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3033 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3034 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3037
3038 /* function stats */
3039 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3040 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3041 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3042 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3043
3044 bp->stats_state = STATS_STATE_DISABLED;
3045 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3046 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3047}
3048
3049static void bnx2x_hw_stats_post(struct bnx2x *bp)
3050{
3051 struct dmae_command *dmae = &bp->stats_dmae;
3052 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3053
3054 *stats_comp = DMAE_COMP_VAL;
3055
3056 /* loader */
3057 if (bp->executer_idx) {
3058 int loader_idx = PMF_DMAE_C(bp);
3059
3060 memset(dmae, 0, sizeof(struct dmae_command));
3061
3062 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3063 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3064 DMAE_CMD_DST_RESET |
3065#ifdef __BIG_ENDIAN
3066 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3067#else
3068 DMAE_CMD_ENDIANITY_DW_SWAP |
3069#endif
3070 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3071 DMAE_CMD_PORT_0) |
3072 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3073 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3074 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3075 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3076 sizeof(struct dmae_command) *
3077 (loader_idx + 1)) >> 2;
3078 dmae->dst_addr_hi = 0;
3079 dmae->len = sizeof(struct dmae_command) >> 2;
3080 if (CHIP_IS_E1(bp))
3081 dmae->len--;
3082 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3083 dmae->comp_addr_hi = 0;
3084 dmae->comp_val = 1;
3085
3086 *stats_comp = 0;
3087 bnx2x_post_dmae(bp, dmae, loader_idx);
3088
3089 } else if (bp->func_stx) {
3090 *stats_comp = 0;
3091 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3092 }
3093}
3094
3095static int bnx2x_stats_comp(struct bnx2x *bp)
3096{
3097 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098 int cnt = 10;
3099
3100 might_sleep();
3101 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3102 if (!cnt) {
3103 BNX2X_ERR("timeout waiting for stats finished\n");
3104 break;
3105 }
3106 cnt--;
12469401 3107 msleep(1);
bb2a0f7a
YG
3108 }
3109 return 1;
3110}
3111
3112/*
3113 * Statistics service functions
3114 */
3115
3116static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3117{
3118 struct dmae_command *dmae;
3119 u32 opcode;
3120 int loader_idx = PMF_DMAE_C(bp);
3121 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122
3123 /* sanity */
3124 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3125 BNX2X_ERR("BUG!\n");
3126 return;
3127 }
3128
3129 bp->executer_idx = 0;
3130
3131 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3132 DMAE_CMD_C_ENABLE |
3133 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3134#ifdef __BIG_ENDIAN
3135 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3136#else
3137 DMAE_CMD_ENDIANITY_DW_SWAP |
3138#endif
3139 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3140 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3141
3142 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3143 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3144 dmae->src_addr_lo = bp->port.port_stx >> 2;
3145 dmae->src_addr_hi = 0;
3146 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3147 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3148 dmae->len = DMAE_LEN32_RD_MAX;
3149 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3150 dmae->comp_addr_hi = 0;
3151 dmae->comp_val = 1;
3152
3153 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3154 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3155 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3156 dmae->src_addr_hi = 0;
7a9b2557
VZ
3157 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3158 DMAE_LEN32_RD_MAX * 4);
3159 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3160 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3161 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3162 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3163 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3164 dmae->comp_val = DMAE_COMP_VAL;
3165
3166 *stats_comp = 0;
3167 bnx2x_hw_stats_post(bp);
3168 bnx2x_stats_comp(bp);
3169}
3170
3171static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3172{
3173 struct dmae_command *dmae;
34f80b04 3174 int port = BP_PORT(bp);
bb2a0f7a 3175 int vn = BP_E1HVN(bp);
a2fbb9ea 3176 u32 opcode;
bb2a0f7a 3177 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3178 u32 mac_addr;
bb2a0f7a
YG
3179 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180
3181 /* sanity */
3182 if (!bp->link_vars.link_up || !bp->port.pmf) {
3183 BNX2X_ERR("BUG!\n");
3184 return;
3185 }
a2fbb9ea
ET
3186
3187 bp->executer_idx = 0;
bb2a0f7a
YG
3188
3189 /* MCP */
3190 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3191 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3192 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3193#ifdef __BIG_ENDIAN
bb2a0f7a 3194 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3195#else
bb2a0f7a 3196 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3197#endif
bb2a0f7a
YG
3198 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3199 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3200
bb2a0f7a 3201 if (bp->port.port_stx) {
a2fbb9ea
ET
3202
3203 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3204 dmae->opcode = opcode;
bb2a0f7a
YG
3205 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3206 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3207 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3208 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3209 dmae->len = sizeof(struct host_port_stats) >> 2;
3210 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3211 dmae->comp_addr_hi = 0;
3212 dmae->comp_val = 1;
a2fbb9ea
ET
3213 }
3214
bb2a0f7a
YG
3215 if (bp->func_stx) {
3216
3217 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3218 dmae->opcode = opcode;
3219 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3220 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3221 dmae->dst_addr_lo = bp->func_stx >> 2;
3222 dmae->dst_addr_hi = 0;
3223 dmae->len = sizeof(struct host_func_stats) >> 2;
3224 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3225 dmae->comp_addr_hi = 0;
3226 dmae->comp_val = 1;
a2fbb9ea
ET
3227 }
3228
bb2a0f7a 3229 /* MAC */
a2fbb9ea
ET
3230 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3231 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3232 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3233#ifdef __BIG_ENDIAN
3234 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3235#else
3236 DMAE_CMD_ENDIANITY_DW_SWAP |
3237#endif
bb2a0f7a
YG
3238 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3239 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3240
c18487ee 3241 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3242
3243 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3244 NIG_REG_INGRESS_BMAC0_MEM);
3245
3246 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3247 BIGMAC_REGISTER_TX_STAT_GTBYT */
3248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249 dmae->opcode = opcode;
3250 dmae->src_addr_lo = (mac_addr +
3251 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3252 dmae->src_addr_hi = 0;
3253 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3254 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3255 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3256 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3257 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3258 dmae->comp_addr_hi = 0;
3259 dmae->comp_val = 1;
3260
3261 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3262 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264 dmae->opcode = opcode;
3265 dmae->src_addr_lo = (mac_addr +
3266 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267 dmae->src_addr_hi = 0;
3268 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3269 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3270 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3271 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3272 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3273 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3274 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3275 dmae->comp_addr_hi = 0;
3276 dmae->comp_val = 1;
3277
c18487ee 3278 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3279
3280 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3281
3282 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3283 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3284 dmae->opcode = opcode;
3285 dmae->src_addr_lo = (mac_addr +
3286 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3287 dmae->src_addr_hi = 0;
3288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3290 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3291 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292 dmae->comp_addr_hi = 0;
3293 dmae->comp_val = 1;
3294
3295 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3296 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297 dmae->opcode = opcode;
3298 dmae->src_addr_lo = (mac_addr +
3299 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3300 dmae->src_addr_hi = 0;
3301 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3302 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3303 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3304 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3305 dmae->len = 1;
3306 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307 dmae->comp_addr_hi = 0;
3308 dmae->comp_val = 1;
3309
3310 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3311 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3312 dmae->opcode = opcode;
3313 dmae->src_addr_lo = (mac_addr +
3314 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3315 dmae->src_addr_hi = 0;
3316 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3317 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3318 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3319 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3320 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3321 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322 dmae->comp_addr_hi = 0;
3323 dmae->comp_val = 1;
3324 }
3325
3326 /* NIG */
bb2a0f7a
YG
3327 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3328 dmae->opcode = opcode;
3329 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3330 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3331 dmae->src_addr_hi = 0;
3332 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3333 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3334 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3335 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336 dmae->comp_addr_hi = 0;
3337 dmae->comp_val = 1;
3338
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3342 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3345 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3346 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3347 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348 dmae->len = (2*sizeof(u32)) >> 2;
3349 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3350 dmae->comp_addr_hi = 0;
3351 dmae->comp_val = 1;
3352
a2fbb9ea
ET
3353 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3355 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3356 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3357#ifdef __BIG_ENDIAN
3358 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3359#else
3360 DMAE_CMD_ENDIANITY_DW_SWAP |
3361#endif
bb2a0f7a
YG
3362 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3363 (vn << DMAE_CMD_E1HVN_SHIFT));
3364 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3365 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3366 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3367 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3368 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3369 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3370 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371 dmae->len = (2*sizeof(u32)) >> 2;
3372 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3373 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3374 dmae->comp_val = DMAE_COMP_VAL;
3375
3376 *stats_comp = 0;
a2fbb9ea
ET
3377}
3378
bb2a0f7a 3379static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3380{
bb2a0f7a
YG
3381 struct dmae_command *dmae = &bp->stats_dmae;
3382 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3383
bb2a0f7a
YG
3384 /* sanity */
3385 if (!bp->func_stx) {
3386 BNX2X_ERR("BUG!\n");
3387 return;
3388 }
a2fbb9ea 3389
bb2a0f7a
YG
3390 bp->executer_idx = 0;
3391 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3392
bb2a0f7a
YG
3393 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3394 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3395 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3396#ifdef __BIG_ENDIAN
3397 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3398#else
3399 DMAE_CMD_ENDIANITY_DW_SWAP |
3400#endif
3401 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3402 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3403 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3404 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3405 dmae->dst_addr_lo = bp->func_stx >> 2;
3406 dmae->dst_addr_hi = 0;
3407 dmae->len = sizeof(struct host_func_stats) >> 2;
3408 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3409 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3410 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3411
bb2a0f7a
YG
3412 *stats_comp = 0;
3413}
a2fbb9ea 3414
bb2a0f7a
YG
3415static void bnx2x_stats_start(struct bnx2x *bp)
3416{
3417 if (bp->port.pmf)
3418 bnx2x_port_stats_init(bp);
3419
3420 else if (bp->func_stx)
3421 bnx2x_func_stats_init(bp);
3422
3423 bnx2x_hw_stats_post(bp);
3424 bnx2x_storm_stats_post(bp);
3425}
3426
3427static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3428{
3429 bnx2x_stats_comp(bp);
3430 bnx2x_stats_pmf_update(bp);
3431 bnx2x_stats_start(bp);
3432}
3433
3434static void bnx2x_stats_restart(struct bnx2x *bp)
3435{
3436 bnx2x_stats_comp(bp);
3437 bnx2x_stats_start(bp);
3438}
3439
3440static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3441{
3442 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3443 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3444 struct regpair diff;
3445
3446 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3447 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3448 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3449 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3450 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3451 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3452 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3453 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3455 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3457 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3458 UPDATE_STAT64(tx_stat_gt127,
3459 tx_stat_etherstatspkts65octetsto127octets);
3460 UPDATE_STAT64(tx_stat_gt255,
3461 tx_stat_etherstatspkts128octetsto255octets);
3462 UPDATE_STAT64(tx_stat_gt511,
3463 tx_stat_etherstatspkts256octetsto511octets);
3464 UPDATE_STAT64(tx_stat_gt1023,
3465 tx_stat_etherstatspkts512octetsto1023octets);
3466 UPDATE_STAT64(tx_stat_gt1518,
3467 tx_stat_etherstatspkts1024octetsto1522octets);
3468 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3469 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3470 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3471 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3472 UPDATE_STAT64(tx_stat_gterr,
3473 tx_stat_dot3statsinternalmactransmiterrors);
3474 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3475}
3476
3477static void bnx2x_emac_stats_update(struct bnx2x *bp)
3478{
3479 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3480 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3481
3482 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3483 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3484 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3485 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3487 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3488 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3489 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3490 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3491 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3492 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3493 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3494 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3495 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3496 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3497 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3498 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3499 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3500 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3501 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3505 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3513}
3514
3515static int bnx2x_hw_stats_update(struct bnx2x *bp)
3516{
3517 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3518 struct nig_stats *old = &(bp->port.old_nig_stats);
3519 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3520 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3521 struct regpair diff;
3522
3523 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3524 bnx2x_bmac_stats_update(bp);
3525
3526 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3527 bnx2x_emac_stats_update(bp);
3528
3529 else { /* unreached */
3530 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3531 return -1;
3532 }
a2fbb9ea 3533
bb2a0f7a
YG
3534 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3535 new->brb_discard - old->brb_discard);
66e855f3
YG
3536 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3537 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3538
bb2a0f7a
YG
3539 UPDATE_STAT64_NIG(egress_mac_pkt0,
3540 etherstatspkts1024octetsto1522octets);
3541 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3542
bb2a0f7a 3543 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3544
bb2a0f7a
YG
3545 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3546 sizeof(struct mac_stx));
3547 estats->brb_drop_hi = pstats->brb_drop_hi;
3548 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3549
bb2a0f7a 3550 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3551
bb2a0f7a 3552 return 0;
a2fbb9ea
ET
3553}
3554
bb2a0f7a 3555static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3556{
3557 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3558 int cl_id = BP_CL_ID(bp);
3559 struct tstorm_per_port_stats *tport =
3560 &stats->tstorm_common.port_statistics;
a2fbb9ea 3561 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3562 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3563 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3564 struct xstorm_per_client_stats *xclient =
3565 &stats->xstorm_common.client_statistics[cl_id];
3566 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3567 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3568 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3569 u32 diff;
3570
bb2a0f7a
YG
3571 /* are storm stats valid? */
3572 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3573 bp->stats_counter) {
3574 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3575 " tstorm counter (%d) != stats_counter (%d)\n",
3576 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3577 return -1;
3578 }
bb2a0f7a
YG
3579 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3580 bp->stats_counter) {
3581 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3582 " xstorm counter (%d) != stats_counter (%d)\n",
3583 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3584 return -2;
3585 }
a2fbb9ea 3586
bb2a0f7a
YG
3587 fstats->total_bytes_received_hi =
3588 fstats->valid_bytes_received_hi =
a2fbb9ea 3589 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3590 fstats->total_bytes_received_lo =
3591 fstats->valid_bytes_received_lo =
a2fbb9ea 3592 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3593
3594 estats->error_bytes_received_hi =
3595 le32_to_cpu(tclient->rcv_error_bytes.hi);
3596 estats->error_bytes_received_lo =
3597 le32_to_cpu(tclient->rcv_error_bytes.lo);
3598 ADD_64(estats->error_bytes_received_hi,
3599 estats->rx_stat_ifhcinbadoctets_hi,
3600 estats->error_bytes_received_lo,
3601 estats->rx_stat_ifhcinbadoctets_lo);
3602
3603 ADD_64(fstats->total_bytes_received_hi,
3604 estats->error_bytes_received_hi,
3605 fstats->total_bytes_received_lo,
3606 estats->error_bytes_received_lo);
3607
3608 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3609 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3610 total_multicast_packets_received);
a2fbb9ea 3611 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3612 total_broadcast_packets_received);
3613
3614 fstats->total_bytes_transmitted_hi =
3615 le32_to_cpu(xclient->total_sent_bytes.hi);
3616 fstats->total_bytes_transmitted_lo =
3617 le32_to_cpu(xclient->total_sent_bytes.lo);
3618
3619 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3620 total_unicast_packets_transmitted);
3621 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3622 total_multicast_packets_transmitted);
3623 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3624 total_broadcast_packets_transmitted);
3625
3626 memcpy(estats, &(fstats->total_bytes_received_hi),
3627 sizeof(struct host_func_stats) - 2*sizeof(u32));
3628
3629 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3630 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3631 estats->brb_truncate_discard =
3632 le32_to_cpu(tport->brb_truncate_discard);
3633 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3634
3635 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3636 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3637 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3638 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3639 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3640 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3641 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3642 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3643 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3644 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3645 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3646 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3647 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3648
bb2a0f7a
YG
3649 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3650 old_tclient->packets_too_big_discard =
a2fbb9ea 3651 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3652 estats->no_buff_discard =
3653 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3654 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3655
3656 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3657 old_xclient->unicast_bytes_sent.hi =
3658 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3659 old_xclient->unicast_bytes_sent.lo =
3660 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3661 old_xclient->multicast_bytes_sent.hi =
3662 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3663 old_xclient->multicast_bytes_sent.lo =
3664 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3665 old_xclient->broadcast_bytes_sent.hi =
3666 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3667 old_xclient->broadcast_bytes_sent.lo =
3668 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3669
3670 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3671
3672 return 0;
3673}
3674
bb2a0f7a 3675static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3676{
bb2a0f7a
YG
3677 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3678 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3679 struct net_device_stats *nstats = &bp->dev->stats;
3680
3681 nstats->rx_packets =
3682 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3683 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3684 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3685
3686 nstats->tx_packets =
3687 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3688 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3689 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3690
bb2a0f7a 3691 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3692
0e39e645 3693 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3694
bb2a0f7a
YG
3695 nstats->rx_dropped = old_tclient->checksum_discard +
3696 estats->mac_discard;
a2fbb9ea
ET
3697 nstats->tx_dropped = 0;
3698
3699 nstats->multicast =
3700 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3701
bb2a0f7a
YG
3702 nstats->collisions =
3703 estats->tx_stat_dot3statssinglecollisionframes_lo +
3704 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3705 estats->tx_stat_dot3statslatecollisions_lo +
3706 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3707
bb2a0f7a
YG
3708 estats->jabber_packets_received =
3709 old_tclient->packets_too_big_discard +
3710 estats->rx_stat_dot3statsframestoolong_lo;
3711
3712 nstats->rx_length_errors =
3713 estats->rx_stat_etherstatsundersizepkts_lo +
3714 estats->jabber_packets_received;
66e855f3 3715 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3716 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3717 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3718 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3719 nstats->rx_missed_errors = estats->xxoverflow_discard;
3720
3721 nstats->rx_errors = nstats->rx_length_errors +
3722 nstats->rx_over_errors +
3723 nstats->rx_crc_errors +
3724 nstats->rx_frame_errors +
0e39e645
ET
3725 nstats->rx_fifo_errors +
3726 nstats->rx_missed_errors;
a2fbb9ea 3727
bb2a0f7a
YG
3728 nstats->tx_aborted_errors =
3729 estats->tx_stat_dot3statslatecollisions_lo +
3730 estats->tx_stat_dot3statsexcessivecollisions_lo;
3731 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3732 nstats->tx_fifo_errors = 0;
3733 nstats->tx_heartbeat_errors = 0;
3734 nstats->tx_window_errors = 0;
3735
3736 nstats->tx_errors = nstats->tx_aborted_errors +
3737 nstats->tx_carrier_errors;
a2fbb9ea
ET
3738}
3739
bb2a0f7a 3740static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3741{
bb2a0f7a
YG
3742 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3743 int update = 0;
a2fbb9ea 3744
bb2a0f7a
YG
3745 if (*stats_comp != DMAE_COMP_VAL)
3746 return;
3747
3748 if (bp->port.pmf)
3749 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3750
bb2a0f7a 3751 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3752
bb2a0f7a
YG
3753 if (update)
3754 bnx2x_net_stats_update(bp);
a2fbb9ea 3755
bb2a0f7a
YG
3756 else {
3757 if (bp->stats_pending) {
3758 bp->stats_pending++;
3759 if (bp->stats_pending == 3) {
3760 BNX2X_ERR("stats not updated for 3 times\n");
3761 bnx2x_panic();
3762 return;
3763 }
3764 }
a2fbb9ea
ET
3765 }
3766
3767 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3768 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3769 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3770 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3771 int i;
a2fbb9ea
ET
3772
3773 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3774 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3775 " tx pkt (%lx)\n",
3776 bnx2x_tx_avail(bp->fp),
7a9b2557 3777 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3778 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3779 " rx pkt (%lx)\n",
7a9b2557
VZ
3780 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3781 bp->fp->rx_comp_cons),
3782 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea
ET
3783 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3784 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
bb2a0f7a 3785 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3786 printk(KERN_DEBUG "tstats: checksum_discard %u "
3787 "packets_too_big_discard %u no_buff_discard %u "
3788 "mac_discard %u mac_filter_discard %u "
3789 "xxovrflow_discard %u brb_truncate_discard %u "
3790 "ttl0_discard %u\n",
bb2a0f7a
YG
3791 old_tclient->checksum_discard,
3792 old_tclient->packets_too_big_discard,
3793 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3794 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3795 estats->brb_truncate_discard,
3796 old_tclient->ttl0_discard);
a2fbb9ea
ET
3797
3798 for_each_queue(bp, i) {
3799 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3800 bnx2x_fp(bp, i, tx_pkt),
3801 bnx2x_fp(bp, i, rx_pkt),
3802 bnx2x_fp(bp, i, rx_calls));
3803 }
3804 }
3805
bb2a0f7a
YG
3806 bnx2x_hw_stats_post(bp);
3807 bnx2x_storm_stats_post(bp);
3808}
a2fbb9ea 3809
bb2a0f7a
YG
3810static void bnx2x_port_stats_stop(struct bnx2x *bp)
3811{
3812 struct dmae_command *dmae;
3813 u32 opcode;
3814 int loader_idx = PMF_DMAE_C(bp);
3815 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3816
bb2a0f7a 3817 bp->executer_idx = 0;
a2fbb9ea 3818
bb2a0f7a
YG
3819 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3820 DMAE_CMD_C_ENABLE |
3821 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3822#ifdef __BIG_ENDIAN
bb2a0f7a 3823 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3824#else
bb2a0f7a 3825 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3826#endif
bb2a0f7a
YG
3827 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3828 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3829
3830 if (bp->port.port_stx) {
3831
3832 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3833 if (bp->func_stx)
3834 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3835 else
3836 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3837 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3838 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3839 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3840 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3841 dmae->len = sizeof(struct host_port_stats) >> 2;
3842 if (bp->func_stx) {
3843 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3844 dmae->comp_addr_hi = 0;
3845 dmae->comp_val = 1;
3846 } else {
3847 dmae->comp_addr_lo =
3848 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3849 dmae->comp_addr_hi =
3850 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3851 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3852
bb2a0f7a
YG
3853 *stats_comp = 0;
3854 }
a2fbb9ea
ET
3855 }
3856
bb2a0f7a
YG
3857 if (bp->func_stx) {
3858
3859 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3861 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3862 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3863 dmae->dst_addr_lo = bp->func_stx >> 2;
3864 dmae->dst_addr_hi = 0;
3865 dmae->len = sizeof(struct host_func_stats) >> 2;
3866 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3867 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3868 dmae->comp_val = DMAE_COMP_VAL;
3869
3870 *stats_comp = 0;
a2fbb9ea 3871 }
bb2a0f7a
YG
3872}
3873
3874static void bnx2x_stats_stop(struct bnx2x *bp)
3875{
3876 int update = 0;
3877
3878 bnx2x_stats_comp(bp);
3879
3880 if (bp->port.pmf)
3881 update = (bnx2x_hw_stats_update(bp) == 0);
3882
3883 update |= (bnx2x_storm_stats_update(bp) == 0);
3884
3885 if (update) {
3886 bnx2x_net_stats_update(bp);
a2fbb9ea 3887
bb2a0f7a
YG
3888 if (bp->port.pmf)
3889 bnx2x_port_stats_stop(bp);
3890
3891 bnx2x_hw_stats_post(bp);
3892 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3893 }
3894}
3895
bb2a0f7a
YG
3896static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3897{
3898}
3899
3900static const struct {
3901 void (*action)(struct bnx2x *bp);
3902 enum bnx2x_stats_state next_state;
3903} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3904/* state event */
3905{
3906/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3907/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3908/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3909/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3910},
3911{
3912/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3913/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3914/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3915/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3916}
3917};
3918
3919static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3920{
3921 enum bnx2x_stats_state state = bp->stats_state;
3922
3923 bnx2x_stats_stm[state][event].action(bp);
3924 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3925
3926 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3927 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3928 state, event, bp->stats_state);
3929}
3930
a2fbb9ea
ET
3931static void bnx2x_timer(unsigned long data)
3932{
3933 struct bnx2x *bp = (struct bnx2x *) data;
3934
3935 if (!netif_running(bp->dev))
3936 return;
3937
3938 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3939 goto timer_restart;
a2fbb9ea
ET
3940
3941 if (poll) {
3942 struct bnx2x_fastpath *fp = &bp->fp[0];
3943 int rc;
3944
3945 bnx2x_tx_int(fp, 1000);
3946 rc = bnx2x_rx_int(fp, 1000);
3947 }
3948
34f80b04
EG
3949 if (!BP_NOMCP(bp)) {
3950 int func = BP_FUNC(bp);
a2fbb9ea
ET
3951 u32 drv_pulse;
3952 u32 mcp_pulse;
3953
3954 ++bp->fw_drv_pulse_wr_seq;
3955 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3956 /* TBD - add SYSTEM_TIME */
3957 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3958 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3959
34f80b04 3960 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3961 MCP_PULSE_SEQ_MASK);
3962 /* The delta between driver pulse and mcp response
3963 * should be 1 (before mcp response) or 0 (after mcp response)
3964 */
3965 if ((drv_pulse != mcp_pulse) &&
3966 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3967 /* someone lost a heartbeat... */
3968 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3969 drv_pulse, mcp_pulse);
3970 }
3971 }
3972
bb2a0f7a
YG
3973 if ((bp->state == BNX2X_STATE_OPEN) ||
3974 (bp->state == BNX2X_STATE_DISABLED))
3975 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3976
f1410647 3977timer_restart:
a2fbb9ea
ET
3978 mod_timer(&bp->timer, jiffies + bp->current_interval);
3979}
3980
3981/* end of Statistics */
3982
3983/* nic init */
3984
3985/*
3986 * nic init service functions
3987 */
3988
34f80b04 3989static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3990{
34f80b04
EG
3991 int port = BP_PORT(bp);
3992
3993 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3994 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 3995 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
3996 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3997 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 3998 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
3999}
4000
5c862848
EG
4001static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4002 dma_addr_t mapping, int sb_id)
34f80b04
EG
4003{
4004 int port = BP_PORT(bp);
bb2a0f7a 4005 int func = BP_FUNC(bp);
a2fbb9ea 4006 int index;
34f80b04 4007 u64 section;
a2fbb9ea
ET
4008
4009 /* USTORM */
4010 section = ((u64)mapping) + offsetof(struct host_status_block,
4011 u_status_block);
34f80b04 4012 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4013
4014 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4015 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4016 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4017 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4018 U64_HI(section));
bb2a0f7a
YG
4019 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4020 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4021
4022 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4023 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4024 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4025
4026 /* CSTORM */
4027 section = ((u64)mapping) + offsetof(struct host_status_block,
4028 c_status_block);
34f80b04 4029 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4030
4031 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4032 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4033 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4034 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4035 U64_HI(section));
7a9b2557
VZ
4036 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4037 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4038
4039 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4040 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4041 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4042
4043 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4044}
4045
4046static void bnx2x_zero_def_sb(struct bnx2x *bp)
4047{
4048 int func = BP_FUNC(bp);
a2fbb9ea 4049
34f80b04
EG
4050 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4051 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4052 sizeof(struct ustorm_def_status_block)/4);
4053 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4054 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4055 sizeof(struct cstorm_def_status_block)/4);
4056 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4057 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4058 sizeof(struct xstorm_def_status_block)/4);
4059 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4060 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4061 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4062}
4063
4064static void bnx2x_init_def_sb(struct bnx2x *bp,
4065 struct host_def_status_block *def_sb,
34f80b04 4066 dma_addr_t mapping, int sb_id)
a2fbb9ea 4067{
34f80b04
EG
4068 int port = BP_PORT(bp);
4069 int func = BP_FUNC(bp);
a2fbb9ea
ET
4070 int index, val, reg_offset;
4071 u64 section;
4072
4073 /* ATTN */
4074 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4075 atten_status_block);
34f80b04 4076 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4077
49d66772
ET
4078 bp->attn_state = 0;
4079
a2fbb9ea
ET
4080 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4081 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4082
34f80b04 4083 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4084 bp->attn_group[index].sig[0] = REG_RD(bp,
4085 reg_offset + 0x10*index);
4086 bp->attn_group[index].sig[1] = REG_RD(bp,
4087 reg_offset + 0x4 + 0x10*index);
4088 bp->attn_group[index].sig[2] = REG_RD(bp,
4089 reg_offset + 0x8 + 0x10*index);
4090 bp->attn_group[index].sig[3] = REG_RD(bp,
4091 reg_offset + 0xc + 0x10*index);
4092 }
4093
a2fbb9ea
ET
4094 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4095 HC_REG_ATTN_MSG0_ADDR_L);
4096
4097 REG_WR(bp, reg_offset, U64_LO(section));
4098 REG_WR(bp, reg_offset + 4, U64_HI(section));
4099
4100 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4101
4102 val = REG_RD(bp, reg_offset);
34f80b04 4103 val |= sb_id;
a2fbb9ea
ET
4104 REG_WR(bp, reg_offset, val);
4105
4106 /* USTORM */
4107 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4108 u_def_status_block);
34f80b04 4109 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4110
4111 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4112 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4113 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4114 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4115 U64_HI(section));
5c862848 4116 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4117 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4118
4119 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4120 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4121 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4122
4123 /* CSTORM */
4124 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4125 c_def_status_block);
34f80b04 4126 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4127
4128 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4129 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4130 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4131 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4132 U64_HI(section));
5c862848 4133 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4134 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4135
4136 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4137 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4138 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4139
4140 /* TSTORM */
4141 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4142 t_def_status_block);
34f80b04 4143 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4144
4145 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4146 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4147 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4148 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4149 U64_HI(section));
5c862848 4150 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4151 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4152
4153 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4154 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4155 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4156
4157 /* XSTORM */
4158 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4159 x_def_status_block);
34f80b04 4160 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4161
4162 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4163 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4164 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4165 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4166 U64_HI(section));
5c862848 4167 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4168 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4169
4170 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4171 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4172 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4173
bb2a0f7a 4174 bp->stats_pending = 0;
66e855f3 4175 bp->set_mac_pending = 0;
bb2a0f7a 4176
34f80b04 4177 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4178}
4179
4180static void bnx2x_update_coalesce(struct bnx2x *bp)
4181{
34f80b04 4182 int port = BP_PORT(bp);
a2fbb9ea
ET
4183 int i;
4184
4185 for_each_queue(bp, i) {
34f80b04 4186 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4187
4188 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4189 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4190 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4191 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4192 bp->rx_ticks/12);
a2fbb9ea 4193 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4194 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4195 U_SB_ETH_RX_CQ_INDEX),
4196 bp->rx_ticks ? 0 : 1);
4197 REG_WR16(bp, BAR_USTRORM_INTMEM +
4198 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4199 U_SB_ETH_RX_BD_INDEX),
34f80b04 4200 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4201
4202 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4203 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4204 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4205 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4206 bp->tx_ticks/12);
a2fbb9ea 4207 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4208 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4209 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4210 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4211 }
4212}
4213
7a9b2557
VZ
4214static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4215 struct bnx2x_fastpath *fp, int last)
4216{
4217 int i;
4218
4219 for (i = 0; i < last; i++) {
4220 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4221 struct sk_buff *skb = rx_buf->skb;
4222
4223 if (skb == NULL) {
4224 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4225 continue;
4226 }
4227
4228 if (fp->tpa_state[i] == BNX2X_TPA_START)
4229 pci_unmap_single(bp->pdev,
4230 pci_unmap_addr(rx_buf, mapping),
4231 bp->rx_buf_use_size,
4232 PCI_DMA_FROMDEVICE);
4233
4234 dev_kfree_skb(skb);
4235 rx_buf->skb = NULL;
4236 }
4237}
4238
a2fbb9ea
ET
4239static void bnx2x_init_rx_rings(struct bnx2x *bp)
4240{
7a9b2557 4241 int func = BP_FUNC(bp);
32626230
EG
4242 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4243 ETH_MAX_AGGREGATION_QUEUES_E1H;
4244 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4245 int i, j;
a2fbb9ea
ET
4246
4247 bp->rx_buf_use_size = bp->dev->mtu;
a2fbb9ea
ET
4248 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4249 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4250
7a9b2557
VZ
4251 if (bp->flags & TPA_ENABLE_FLAG) {
4252 DP(NETIF_MSG_IFUP,
4253 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4254 bp->rx_buf_use_size, bp->rx_buf_size,
4255 bp->dev->mtu + ETH_OVREHEAD);
4256
4257 for_each_queue(bp, j) {
32626230 4258 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4259
32626230 4260 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4261 fp->tpa_pool[i].skb =
4262 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4263 if (!fp->tpa_pool[i].skb) {
4264 BNX2X_ERR("Failed to allocate TPA "
4265 "skb pool for queue[%d] - "
4266 "disabling TPA on this "
4267 "queue!\n", j);
4268 bnx2x_free_tpa_pool(bp, fp, i);
4269 fp->disable_tpa = 1;
4270 break;
4271 }
4272 pci_unmap_addr_set((struct sw_rx_bd *)
4273 &bp->fp->tpa_pool[i],
4274 mapping, 0);
4275 fp->tpa_state[i] = BNX2X_TPA_STOP;
4276 }
4277 }
4278 }
4279
a2fbb9ea
ET
4280 for_each_queue(bp, j) {
4281 struct bnx2x_fastpath *fp = &bp->fp[j];
4282
4283 fp->rx_bd_cons = 0;
4284 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4285 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4286
4287 /* "next page" elements initialization */
4288 /* SGE ring */
4289 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4290 struct eth_rx_sge *sge;
4291
4292 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4293 sge->addr_hi =
4294 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4295 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4296 sge->addr_lo =
4297 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4298 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4299 }
4300
4301 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4302
7a9b2557 4303 /* RX BD ring */
a2fbb9ea
ET
4304 for (i = 1; i <= NUM_RX_RINGS; i++) {
4305 struct eth_rx_bd *rx_bd;
4306
4307 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4308 rx_bd->addr_hi =
4309 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4310 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4311 rx_bd->addr_lo =
4312 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4313 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4314 }
4315
34f80b04 4316 /* CQ ring */
a2fbb9ea
ET
4317 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4318 struct eth_rx_cqe_next_page *nextpg;
4319
4320 nextpg = (struct eth_rx_cqe_next_page *)
4321 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4322 nextpg->addr_hi =
4323 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4324 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4325 nextpg->addr_lo =
4326 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4327 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4328 }
4329
7a9b2557
VZ
4330 /* Allocate SGEs and initialize the ring elements */
4331 for (i = 0, ring_prod = 0;
4332 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4333
7a9b2557
VZ
4334 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4335 BNX2X_ERR("was only able to allocate "
4336 "%d rx sges\n", i);
4337 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4338 /* Cleanup already allocated elements */
4339 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4340 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4341 fp->disable_tpa = 1;
4342 ring_prod = 0;
4343 break;
4344 }
4345 ring_prod = NEXT_SGE_IDX(ring_prod);
4346 }
4347 fp->rx_sge_prod = ring_prod;
4348
4349 /* Allocate BDs and initialize BD ring */
66e855f3 4350 fp->rx_comp_cons = 0;
7a9b2557 4351 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4352 for (i = 0; i < bp->rx_ring_size; i++) {
4353 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4354 BNX2X_ERR("was only able to allocate "
4355 "%d rx skbs\n", i);
66e855f3 4356 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4357 break;
4358 }
4359 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4360 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4361 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4362 }
4363
7a9b2557
VZ
4364 fp->rx_bd_prod = ring_prod;
4365 /* must not have more available CQEs than BDs */
4366 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4367 cqe_ring_prod);
a2fbb9ea
ET
4368 fp->rx_pkt = fp->rx_calls = 0;
4369
7a9b2557
VZ
4370 /* Warning!
4371 * this will generate an interrupt (to the TSTORM)
4372 * must only be done after chip is initialized
4373 */
4374 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4375 fp->rx_sge_prod);
a2fbb9ea
ET
4376 if (j != 0)
4377 continue;
4378
4379 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4380 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4381 U64_LO(fp->rx_comp_mapping));
4382 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4383 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4384 U64_HI(fp->rx_comp_mapping));
4385 }
4386}
4387
4388static void bnx2x_init_tx_ring(struct bnx2x *bp)
4389{
4390 int i, j;
4391
4392 for_each_queue(bp, j) {
4393 struct bnx2x_fastpath *fp = &bp->fp[j];
4394
4395 for (i = 1; i <= NUM_TX_RINGS; i++) {
4396 struct eth_tx_bd *tx_bd =
4397 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4398
4399 tx_bd->addr_hi =
4400 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4401 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4402 tx_bd->addr_lo =
4403 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4404 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4405 }
4406
4407 fp->tx_pkt_prod = 0;
4408 fp->tx_pkt_cons = 0;
4409 fp->tx_bd_prod = 0;
4410 fp->tx_bd_cons = 0;
4411 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4412 fp->tx_pkt = 0;
4413 }
4414}
4415
4416static void bnx2x_init_sp_ring(struct bnx2x *bp)
4417{
34f80b04 4418 int func = BP_FUNC(bp);
a2fbb9ea
ET
4419
4420 spin_lock_init(&bp->spq_lock);
4421
4422 bp->spq_left = MAX_SPQ_PENDING;
4423 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4424 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4425 bp->spq_prod_bd = bp->spq;
4426 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4427
34f80b04 4428 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4429 U64_LO(bp->spq_mapping));
34f80b04
EG
4430 REG_WR(bp,
4431 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4432 U64_HI(bp->spq_mapping));
4433
34f80b04 4434 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4435 bp->spq_prod_idx);
4436}
4437
4438static void bnx2x_init_context(struct bnx2x *bp)
4439{
4440 int i;
4441
4442 for_each_queue(bp, i) {
4443 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4444 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4445 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4446
4447 context->xstorm_st_context.tx_bd_page_base_hi =
4448 U64_HI(fp->tx_desc_mapping);
4449 context->xstorm_st_context.tx_bd_page_base_lo =
4450 U64_LO(fp->tx_desc_mapping);
4451 context->xstorm_st_context.db_data_addr_hi =
4452 U64_HI(fp->tx_prods_mapping);
4453 context->xstorm_st_context.db_data_addr_lo =
4454 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4455 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4456 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4457
4458 context->ustorm_st_context.common.sb_index_numbers =
4459 BNX2X_RX_SB_INDEX_NUM;
4460 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4461 context->ustorm_st_context.common.status_block_id = sb_id;
4462 context->ustorm_st_context.common.flags =
4463 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4464 context->ustorm_st_context.common.mc_alignment_size = 64;
4465 context->ustorm_st_context.common.bd_buff_size =
4466 bp->rx_buf_use_size;
4467 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4468 U64_HI(fp->rx_desc_mapping);
34f80b04 4469 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4470 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4471 if (!fp->disable_tpa) {
4472 context->ustorm_st_context.common.flags |=
4473 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4474 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4475 context->ustorm_st_context.common.sge_buff_size =
4476 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4477 context->ustorm_st_context.common.sge_page_base_hi =
4478 U64_HI(fp->rx_sge_mapping);
4479 context->ustorm_st_context.common.sge_page_base_lo =
4480 U64_LO(fp->rx_sge_mapping);
4481 }
4482
a2fbb9ea 4483 context->cstorm_st_context.sb_index_number =
5c862848 4484 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4485 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4486
4487 context->xstorm_ag_context.cdu_reserved =
4488 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4489 CDU_REGION_NUMBER_XCM_AG,
4490 ETH_CONNECTION_TYPE);
4491 context->ustorm_ag_context.cdu_usage =
4492 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4493 CDU_REGION_NUMBER_UCM_AG,
4494 ETH_CONNECTION_TYPE);
4495 }
4496}
4497
4498static void bnx2x_init_ind_table(struct bnx2x *bp)
4499{
34f80b04 4500 int port = BP_PORT(bp);
a2fbb9ea
ET
4501 int i;
4502
4503 if (!is_multi(bp))
4504 return;
4505
34f80b04 4506 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4507 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4508 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4509 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4510 i % bp->num_queues);
4511
4512 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4513}
4514
49d66772
ET
4515static void bnx2x_set_client_config(struct bnx2x *bp)
4516{
49d66772 4517 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4518 int port = BP_PORT(bp);
4519 int i;
49d66772 4520
34f80b04 4521 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4522 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4523 tstorm_client.config_flags =
4524 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4525#ifdef BCM_VLAN
34f80b04 4526 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4527 tstorm_client.config_flags |=
4528 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4529 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4530 }
4531#endif
49d66772 4532
7a9b2557
VZ
4533 if (bp->flags & TPA_ENABLE_FLAG) {
4534 tstorm_client.max_sges_for_packet =
4535 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4536 tstorm_client.max_sges_for_packet =
4537 ((tstorm_client.max_sges_for_packet +
4538 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4539 PAGES_PER_SGE_SHIFT;
4540
4541 tstorm_client.config_flags |=
4542 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4543 }
4544
49d66772
ET
4545 for_each_queue(bp, i) {
4546 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4547 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4548 ((u32 *)&tstorm_client)[0]);
4549 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4550 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4551 ((u32 *)&tstorm_client)[1]);
4552 }
4553
34f80b04
EG
4554 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4555 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4556}
4557
a2fbb9ea
ET
4558static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4559{
a2fbb9ea 4560 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4561 int mode = bp->rx_mode;
4562 int mask = (1 << BP_L_ID(bp));
4563 int func = BP_FUNC(bp);
a2fbb9ea
ET
4564 int i;
4565
4566 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4567
4568 switch (mode) {
4569 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4570 tstorm_mac_filter.ucast_drop_all = mask;
4571 tstorm_mac_filter.mcast_drop_all = mask;
4572 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4573 break;
4574 case BNX2X_RX_MODE_NORMAL:
34f80b04 4575 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4576 break;
4577 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4578 tstorm_mac_filter.mcast_accept_all = mask;
4579 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4580 break;
4581 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4582 tstorm_mac_filter.ucast_accept_all = mask;
4583 tstorm_mac_filter.mcast_accept_all = mask;
4584 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4585 break;
4586 default:
34f80b04
EG
4587 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4588 break;
a2fbb9ea
ET
4589 }
4590
4591 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4592 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4593 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4594 ((u32 *)&tstorm_mac_filter)[i]);
4595
34f80b04 4596/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4597 ((u32 *)&tstorm_mac_filter)[i]); */
4598 }
a2fbb9ea 4599
49d66772
ET
4600 if (mode != BNX2X_RX_MODE_NONE)
4601 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4602}
4603
471de716
EG
4604static void bnx2x_init_internal_common(struct bnx2x *bp)
4605{
4606 int i;
4607
4608 /* Zero this manually as its initialization is
4609 currently missing in the initTool */
4610 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4611 REG_WR(bp, BAR_USTRORM_INTMEM +
4612 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4613}
4614
4615static void bnx2x_init_internal_port(struct bnx2x *bp)
4616{
4617 int port = BP_PORT(bp);
4618
4619 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4620 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623}
4624
4625static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4626{
a2fbb9ea
ET
4627 struct tstorm_eth_function_common_config tstorm_config = {0};
4628 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4629 int port = BP_PORT(bp);
4630 int func = BP_FUNC(bp);
4631 int i;
471de716 4632 u16 max_agg_size;
a2fbb9ea
ET
4633
4634 if (is_multi(bp)) {
4635 tstorm_config.config_flags = MULTI_FLAGS;
4636 tstorm_config.rss_result_mask = MULTI_MASK;
4637 }
4638
34f80b04
EG
4639 tstorm_config.leading_client_id = BP_L_ID(bp);
4640
a2fbb9ea 4641 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4642 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4643 (*(u32 *)&tstorm_config));
4644
c14423fe 4645 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4646 bnx2x_set_storm_rx_mode(bp);
4647
66e855f3
YG
4648 /* reset xstorm per client statistics */
4649 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4650 REG_WR(bp, BAR_XSTRORM_INTMEM +
4651 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4652 i*4, 0);
4653 }
4654 /* reset tstorm per client statistics */
4655 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4656 REG_WR(bp, BAR_TSTRORM_INTMEM +
4657 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4658 i*4, 0);
4659 }
4660
4661 /* Init statistics related context */
34f80b04 4662 stats_flags.collect_eth = 1;
a2fbb9ea 4663
66e855f3 4664 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4665 ((u32 *)&stats_flags)[0]);
66e855f3 4666 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4667 ((u32 *)&stats_flags)[1]);
4668
66e855f3 4669 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4670 ((u32 *)&stats_flags)[0]);
66e855f3 4671 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4672 ((u32 *)&stats_flags)[1]);
4673
66e855f3 4674 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4675 ((u32 *)&stats_flags)[0]);
66e855f3 4676 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4677 ((u32 *)&stats_flags)[1]);
4678
66e855f3
YG
4679 REG_WR(bp, BAR_XSTRORM_INTMEM +
4680 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682 REG_WR(bp, BAR_XSTRORM_INTMEM +
4683 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4685
4686 REG_WR(bp, BAR_TSTRORM_INTMEM +
4687 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4688 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4689 REG_WR(bp, BAR_TSTRORM_INTMEM +
4690 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4691 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4692
4693 if (CHIP_IS_E1H(bp)) {
4694 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4695 IS_E1HMF(bp));
4696 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4697 IS_E1HMF(bp));
4698 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4699 IS_E1HMF(bp));
4700 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4701 IS_E1HMF(bp));
4702
7a9b2557
VZ
4703 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4704 bp->e1hov);
34f80b04
EG
4705 }
4706
471de716
EG
4707 /* Init CQ ring mapping and aggregation size */
4708 max_agg_size = min((u32)(bp->rx_buf_use_size +
4709 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4710 (u32)0xffff);
7a9b2557
VZ
4711 for_each_queue(bp, i) {
4712 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4713
4714 REG_WR(bp, BAR_USTRORM_INTMEM +
4715 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4716 U64_LO(fp->rx_comp_mapping));
4717 REG_WR(bp, BAR_USTRORM_INTMEM +
4718 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4719 U64_HI(fp->rx_comp_mapping));
4720
7a9b2557
VZ
4721 REG_WR16(bp, BAR_USTRORM_INTMEM +
4722 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4723 max_agg_size);
4724 }
a2fbb9ea
ET
4725}
4726
471de716
EG
4727static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4728{
4729 switch (load_code) {
4730 case FW_MSG_CODE_DRV_LOAD_COMMON:
4731 bnx2x_init_internal_common(bp);
4732 /* no break */
4733
4734 case FW_MSG_CODE_DRV_LOAD_PORT:
4735 bnx2x_init_internal_port(bp);
4736 /* no break */
4737
4738 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4739 bnx2x_init_internal_func(bp);
4740 break;
4741
4742 default:
4743 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4744 break;
4745 }
4746}
4747
4748static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4749{
4750 int i;
4751
4752 for_each_queue(bp, i) {
4753 struct bnx2x_fastpath *fp = &bp->fp[i];
4754
34f80b04 4755 fp->bp = bp;
a2fbb9ea 4756 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4757 fp->index = i;
34f80b04
EG
4758 fp->cl_id = BP_L_ID(bp) + i;
4759 fp->sb_id = fp->cl_id;
4760 DP(NETIF_MSG_IFUP,
4761 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4762 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4763 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4764 FP_SB_ID(fp));
4765 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4766 }
4767
5c862848
EG
4768 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4769 DEF_SB_ID);
4770 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4771 bnx2x_update_coalesce(bp);
4772 bnx2x_init_rx_rings(bp);
4773 bnx2x_init_tx_ring(bp);
4774 bnx2x_init_sp_ring(bp);
4775 bnx2x_init_context(bp);
471de716 4776 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4777 bnx2x_init_ind_table(bp);
615f8fd9 4778 bnx2x_int_enable(bp);
a2fbb9ea
ET
4779}
4780
4781/* end of nic init */
4782
4783/*
4784 * gzip service functions
4785 */
4786
4787static int bnx2x_gunzip_init(struct bnx2x *bp)
4788{
4789 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4790 &bp->gunzip_mapping);
4791 if (bp->gunzip_buf == NULL)
4792 goto gunzip_nomem1;
4793
4794 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4795 if (bp->strm == NULL)
4796 goto gunzip_nomem2;
4797
4798 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4799 GFP_KERNEL);
4800 if (bp->strm->workspace == NULL)
4801 goto gunzip_nomem3;
4802
4803 return 0;
4804
4805gunzip_nomem3:
4806 kfree(bp->strm);
4807 bp->strm = NULL;
4808
4809gunzip_nomem2:
4810 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4811 bp->gunzip_mapping);
4812 bp->gunzip_buf = NULL;
4813
4814gunzip_nomem1:
4815 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4816 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4817 return -ENOMEM;
4818}
4819
4820static void bnx2x_gunzip_end(struct bnx2x *bp)
4821{
4822 kfree(bp->strm->workspace);
4823
4824 kfree(bp->strm);
4825 bp->strm = NULL;
4826
4827 if (bp->gunzip_buf) {
4828 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4829 bp->gunzip_mapping);
4830 bp->gunzip_buf = NULL;
4831 }
4832}
4833
4834static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4835{
4836 int n, rc;
4837
4838 /* check gzip header */
4839 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4840 return -EINVAL;
4841
4842 n = 10;
4843
34f80b04 4844#define FNAME 0x8
a2fbb9ea
ET
4845
4846 if (zbuf[3] & FNAME)
4847 while ((zbuf[n++] != 0) && (n < len));
4848
4849 bp->strm->next_in = zbuf + n;
4850 bp->strm->avail_in = len - n;
4851 bp->strm->next_out = bp->gunzip_buf;
4852 bp->strm->avail_out = FW_BUF_SIZE;
4853
4854 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4855 if (rc != Z_OK)
4856 return rc;
4857
4858 rc = zlib_inflate(bp->strm, Z_FINISH);
4859 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4860 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4861 bp->dev->name, bp->strm->msg);
4862
4863 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4864 if (bp->gunzip_outlen & 0x3)
4865 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4866 " gunzip_outlen (%d) not aligned\n",
4867 bp->dev->name, bp->gunzip_outlen);
4868 bp->gunzip_outlen >>= 2;
4869
4870 zlib_inflateEnd(bp->strm);
4871
4872 if (rc == Z_STREAM_END)
4873 return 0;
4874
4875 return rc;
4876}
4877
4878/* nic load/unload */
4879
4880/*
34f80b04 4881 * General service functions
a2fbb9ea
ET
4882 */
4883
4884/* send a NIG loopback debug packet */
4885static void bnx2x_lb_pckt(struct bnx2x *bp)
4886{
a2fbb9ea 4887 u32 wb_write[3];
a2fbb9ea
ET
4888
4889 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4890 wb_write[0] = 0x55555555;
4891 wb_write[1] = 0x55555555;
34f80b04 4892 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4893 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4894
4895 /* NON-IP protocol */
a2fbb9ea
ET
4896 wb_write[0] = 0x09000000;
4897 wb_write[1] = 0x55555555;
34f80b04 4898 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4899 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4900}
4901
4902/* some of the internal memories
4903 * are not directly readable from the driver
4904 * to test them we send debug packets
4905 */
4906static int bnx2x_int_mem_test(struct bnx2x *bp)
4907{
4908 int factor;
4909 int count, i;
4910 u32 val = 0;
4911
ad8d3948 4912 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4913 factor = 120;
ad8d3948
EG
4914 else if (CHIP_REV_IS_EMUL(bp))
4915 factor = 200;
4916 else
a2fbb9ea 4917 factor = 1;
a2fbb9ea
ET
4918
4919 DP(NETIF_MSG_HW, "start part1\n");
4920
4921 /* Disable inputs of parser neighbor blocks */
4922 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4923 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4924 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4925 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4926
4927 /* Write 0 to parser credits for CFC search request */
4928 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4929
4930 /* send Ethernet packet */
4931 bnx2x_lb_pckt(bp);
4932
4933 /* TODO do i reset NIG statistic? */
4934 /* Wait until NIG register shows 1 packet of size 0x10 */
4935 count = 1000 * factor;
4936 while (count) {
34f80b04 4937
a2fbb9ea
ET
4938 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4939 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4940 if (val == 0x10)
4941 break;
4942
4943 msleep(10);
4944 count--;
4945 }
4946 if (val != 0x10) {
4947 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4948 return -1;
4949 }
4950
4951 /* Wait until PRS register shows 1 packet */
4952 count = 1000 * factor;
4953 while (count) {
4954 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4955 if (val == 1)
4956 break;
4957
4958 msleep(10);
4959 count--;
4960 }
4961 if (val != 0x1) {
4962 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4963 return -2;
4964 }
4965
4966 /* Reset and init BRB, PRS */
34f80b04 4967 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4968 msleep(50);
34f80b04 4969 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4970 msleep(50);
4971 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4972 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4973
4974 DP(NETIF_MSG_HW, "part2\n");
4975
4976 /* Disable inputs of parser neighbor blocks */
4977 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4978 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4979 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4980 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4981
4982 /* Write 0 to parser credits for CFC search request */
4983 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4984
4985 /* send 10 Ethernet packets */
4986 for (i = 0; i < 10; i++)
4987 bnx2x_lb_pckt(bp);
4988
4989 /* Wait until NIG register shows 10 + 1
4990 packets of size 11*0x10 = 0xb0 */
4991 count = 1000 * factor;
4992 while (count) {
34f80b04 4993
a2fbb9ea
ET
4994 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4995 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4996 if (val == 0xb0)
4997 break;
4998
4999 msleep(10);
5000 count--;
5001 }
5002 if (val != 0xb0) {
5003 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5004 return -3;
5005 }
5006
5007 /* Wait until PRS register shows 2 packets */
5008 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5009 if (val != 2)
5010 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5011
5012 /* Write 1 to parser credits for CFC search request */
5013 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5014
5015 /* Wait until PRS register shows 3 packets */
5016 msleep(10 * factor);
5017 /* Wait until NIG register shows 1 packet of size 0x10 */
5018 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5019 if (val != 3)
5020 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5021
5022 /* clear NIG EOP FIFO */
5023 for (i = 0; i < 11; i++)
5024 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5025 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5026 if (val != 1) {
5027 BNX2X_ERR("clear of NIG failed\n");
5028 return -4;
5029 }
5030
5031 /* Reset and init BRB, PRS, NIG */
5032 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5033 msleep(50);
5034 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5035 msleep(50);
5036 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5037 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5038#ifndef BCM_ISCSI
5039 /* set NIC mode */
5040 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5041#endif
5042
5043 /* Enable inputs of parser neighbor blocks */
5044 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5045 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5046 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5047 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5048
5049 DP(NETIF_MSG_HW, "done\n");
5050
5051 return 0; /* OK */
5052}
5053
5054static void enable_blocks_attention(struct bnx2x *bp)
5055{
5056 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5057 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5058 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5059 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5060 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5061 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5062 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5063 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5064 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5065/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5066/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5067 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5068 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5069 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5070/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5071/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5072 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5073 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5074 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5075 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5076/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5077/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5078 if (CHIP_REV_IS_FPGA(bp))
5079 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5080 else
5081 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5082 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5083 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5084 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5085/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5086/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5087 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5088 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5089/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5090 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5091}
5092
34f80b04
EG
5093
5094static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5095{
a2fbb9ea 5096 u32 val, i;
a2fbb9ea 5097
34f80b04 5098 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5099
34f80b04
EG
5100 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5101 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5102
34f80b04
EG
5103 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5104 if (CHIP_IS_E1H(bp))
5105 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5106
34f80b04
EG
5107 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5108 msleep(30);
5109 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5110
34f80b04
EG
5111 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5112 if (CHIP_IS_E1(bp)) {
5113 /* enable HW interrupt from PXP on USDM overflow
5114 bit 16 on INT_MASK_0 */
5115 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5116 }
a2fbb9ea 5117
34f80b04
EG
5118 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5119 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5120
5121#ifdef __BIG_ENDIAN
34f80b04
EG
5122 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5123 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5124 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5125 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5126 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5127 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5128
5129/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5130 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5131 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5132 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5133 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5134#endif
5135
5136#ifndef BCM_ISCSI
5137 /* set NIC mode */
5138 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5139#endif
5140
34f80b04 5141 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5142#ifdef BCM_ISCSI
34f80b04
EG
5143 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5144 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5145 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5146#endif
5147
34f80b04
EG
5148 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5149 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5150
34f80b04
EG
5151 /* let the HW do it's magic ... */
5152 msleep(100);
5153 /* finish PXP init */
5154 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5155 if (val != 1) {
5156 BNX2X_ERR("PXP2 CFG failed\n");
5157 return -EBUSY;
5158 }
5159 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5160 if (val != 1) {
5161 BNX2X_ERR("PXP2 RD_INIT failed\n");
5162 return -EBUSY;
5163 }
a2fbb9ea 5164
34f80b04
EG
5165 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5166 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5167
34f80b04 5168 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5169
34f80b04
EG
5170 /* clean the DMAE memory */
5171 bp->dmae_ready = 1;
5172 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5173
34f80b04
EG
5174 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5175 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5176 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5177 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5178
34f80b04
EG
5179 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5180 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5181 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5182 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5183
5184 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5185 /* soft reset pulse */
5186 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5187 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5188
5189#ifdef BCM_ISCSI
34f80b04 5190 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5191#endif
a2fbb9ea 5192
34f80b04
EG
5193 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5194 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5195 if (!CHIP_REV_IS_SLOW(bp)) {
5196 /* enable hw interrupt from doorbell Q */
5197 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5198 }
a2fbb9ea 5199
34f80b04
EG
5200 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5201 if (CHIP_REV_IS_SLOW(bp)) {
5202 /* fix for emulation and FPGA for no pause */
5203 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5204 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5205 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5206 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5207 }
a2fbb9ea 5208
34f80b04
EG
5209 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5210 if (CHIP_IS_E1H(bp))
5211 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5212
34f80b04
EG
5213 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5214 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5215 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5216 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5217
34f80b04
EG
5218 if (CHIP_IS_E1H(bp)) {
5219 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5220 STORM_INTMEM_SIZE_E1H/2);
5221 bnx2x_init_fill(bp,
5222 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5223 0, STORM_INTMEM_SIZE_E1H/2);
5224 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5225 STORM_INTMEM_SIZE_E1H/2);
5226 bnx2x_init_fill(bp,
5227 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5228 0, STORM_INTMEM_SIZE_E1H/2);
5229 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5230 STORM_INTMEM_SIZE_E1H/2);
5231 bnx2x_init_fill(bp,
5232 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5233 0, STORM_INTMEM_SIZE_E1H/2);
5234 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5235 STORM_INTMEM_SIZE_E1H/2);
5236 bnx2x_init_fill(bp,
5237 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5238 0, STORM_INTMEM_SIZE_E1H/2);
5239 } else { /* E1 */
ad8d3948
EG
5240 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5241 STORM_INTMEM_SIZE_E1);
5242 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5243 STORM_INTMEM_SIZE_E1);
5244 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1);
5246 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5247 STORM_INTMEM_SIZE_E1);
34f80b04 5248 }
a2fbb9ea 5249
34f80b04
EG
5250 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5251 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5252 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5253 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5254
34f80b04
EG
5255 /* sync semi rtc */
5256 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5257 0x80000000);
5258 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5259 0x80000000);
a2fbb9ea 5260
34f80b04
EG
5261 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5262 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5263 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5264
34f80b04
EG
5265 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5266 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5267 REG_WR(bp, i, 0xc0cac01a);
5268 /* TODO: replace with something meaningful */
5269 }
5270 if (CHIP_IS_E1H(bp))
5271 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5272 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5273
34f80b04
EG
5274 if (sizeof(union cdu_context) != 1024)
5275 /* we currently assume that a context is 1024 bytes */
5276 printk(KERN_ALERT PFX "please adjust the size of"
5277 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5278
34f80b04
EG
5279 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5280 val = (4 << 24) + (0 << 12) + 1024;
5281 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5282 if (CHIP_IS_E1(bp)) {
5283 /* !!! fix pxp client crdit until excel update */
5284 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5285 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5286 }
a2fbb9ea 5287
34f80b04
EG
5288 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5289 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5290
34f80b04
EG
5291 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5292 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5293
34f80b04
EG
5294 /* PXPCS COMMON comes here */
5295 /* Reset PCIE errors for debug */
5296 REG_WR(bp, 0x2814, 0xffffffff);
5297 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5298
34f80b04
EG
5299 /* EMAC0 COMMON comes here */
5300 /* EMAC1 COMMON comes here */
5301 /* DBU COMMON comes here */
5302 /* DBG COMMON comes here */
5303
5304 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5305 if (CHIP_IS_E1H(bp)) {
5306 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5307 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5308 }
5309
5310 if (CHIP_REV_IS_SLOW(bp))
5311 msleep(200);
5312
5313 /* finish CFC init */
5314 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5315 if (val != 1) {
5316 BNX2X_ERR("CFC LL_INIT failed\n");
5317 return -EBUSY;
5318 }
5319 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5320 if (val != 1) {
5321 BNX2X_ERR("CFC AC_INIT failed\n");
5322 return -EBUSY;
5323 }
5324 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5325 if (val != 1) {
5326 BNX2X_ERR("CFC CAM_INIT failed\n");
5327 return -EBUSY;
5328 }
5329 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5330
34f80b04
EG
5331 /* read NIG statistic
5332 to see if this is our first up since powerup */
5333 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5334 val = *bnx2x_sp(bp, wb_data[0]);
5335
5336 /* do internal memory self test */
5337 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5338 BNX2X_ERR("internal mem self test failed\n");
5339 return -EBUSY;
5340 }
5341
5342 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5343 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5344 /* Fan failure is indicated by SPIO 5 */
5345 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5346 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5347
5348 /* set to active low mode */
5349 val = REG_RD(bp, MISC_REG_SPIO_INT);
5350 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5351 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5352 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5353
34f80b04
EG
5354 /* enable interrupt to signal the IGU */
5355 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5356 val |= (1 << MISC_REGISTERS_SPIO_5);
5357 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5358 break;
f1410647 5359
34f80b04
EG
5360 default:
5361 break;
5362 }
f1410647 5363
34f80b04
EG
5364 /* clear PXP2 attentions */
5365 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5366
34f80b04 5367 enable_blocks_attention(bp);
a2fbb9ea 5368
7a9b2557
VZ
5369 if (bp->flags & TPA_ENABLE_FLAG) {
5370 struct tstorm_eth_tpa_exist tmp = {0};
5371
5372 tmp.tpa_exist = 1;
5373
5374 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5375 ((u32 *)&tmp)[0]);
5376 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5377 ((u32 *)&tmp)[1]);
5378 }
5379
34f80b04
EG
5380 return 0;
5381}
a2fbb9ea 5382
34f80b04
EG
5383static int bnx2x_init_port(struct bnx2x *bp)
5384{
5385 int port = BP_PORT(bp);
5386 u32 val;
a2fbb9ea 5387
34f80b04
EG
5388 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5389
5390 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5391
5392 /* Port PXP comes here */
5393 /* Port PXP2 comes here */
a2fbb9ea
ET
5394#ifdef BCM_ISCSI
5395 /* Port0 1
5396 * Port1 385 */
5397 i++;
5398 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5399 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5400 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5401 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5402
5403 /* Port0 2
5404 * Port1 386 */
5405 i++;
5406 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5407 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5408 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5409 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5410
5411 /* Port0 3
5412 * Port1 387 */
5413 i++;
5414 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5415 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5416 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5417 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5418#endif
34f80b04 5419 /* Port CMs come here */
a2fbb9ea
ET
5420
5421 /* Port QM comes here */
a2fbb9ea
ET
5422#ifdef BCM_ISCSI
5423 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5424 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5425
5426 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5427 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5428#endif
5429 /* Port DQ comes here */
5430 /* Port BRB1 comes here */
ad8d3948 5431 /* Port PRS comes here */
a2fbb9ea
ET
5432 /* Port TSDM comes here */
5433 /* Port CSDM comes here */
5434 /* Port USDM comes here */
5435 /* Port XSDM comes here */
34f80b04
EG
5436 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5437 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5438 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5439 port ? USEM_PORT1_END : USEM_PORT0_END);
5440 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5441 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5442 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5443 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5444 /* Port UPB comes here */
34f80b04
EG
5445 /* Port XPB comes here */
5446
5447 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5448 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5449
5450 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5451 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5452
5453 /* update threshold */
34f80b04 5454 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5455 /* update init credit */
34f80b04 5456 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5457
5458 /* probe changes */
34f80b04 5459 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5460 msleep(5);
34f80b04 5461 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5462
5463#ifdef BCM_ISCSI
5464 /* tell the searcher where the T2 table is */
5465 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5466
5467 wb_write[0] = U64_LO(bp->t2_mapping);
5468 wb_write[1] = U64_HI(bp->t2_mapping);
5469 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5470 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5471 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5472 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5473
5474 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5475 /* Port SRCH comes here */
5476#endif
5477 /* Port CDU comes here */
5478 /* Port CFC comes here */
34f80b04
EG
5479
5480 if (CHIP_IS_E1(bp)) {
5481 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5482 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5483 }
5484 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5485 port ? HC_PORT1_END : HC_PORT0_END);
5486
5487 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5488 MISC_AEU_PORT0_START,
34f80b04
EG
5489 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5490 /* init aeu_mask_attn_func_0/1:
5491 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5492 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5493 * bits 4-7 are used for "per vn group attention" */
5494 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5495 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5496
a2fbb9ea
ET
5497 /* Port PXPCS comes here */
5498 /* Port EMAC0 comes here */
5499 /* Port EMAC1 comes here */
5500 /* Port DBU comes here */
5501 /* Port DBG comes here */
34f80b04
EG
5502 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5503 port ? NIG_PORT1_END : NIG_PORT0_END);
5504
5505 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5506
5507 if (CHIP_IS_E1H(bp)) {
5508 u32 wsum;
5509 struct cmng_struct_per_port m_cmng_port;
5510 int vn;
5511
5512 /* 0x2 disable e1hov, 0x1 enable */
5513 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5514 (IS_E1HMF(bp) ? 0x1 : 0x2));
5515
5516 /* Init RATE SHAPING and FAIRNESS contexts.
5517 Initialize as if there is 10G link. */
5518 wsum = bnx2x_calc_vn_wsum(bp);
5519 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5520 if (IS_E1HMF(bp))
5521 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5522 bnx2x_init_vn_minmax(bp, 2*vn + port,
5523 wsum, 10000, &m_cmng_port);
5524 }
5525
a2fbb9ea
ET
5526 /* Port MCP comes here */
5527 /* Port DMAE comes here */
5528
34f80b04 5529 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
5530 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5531 /* add SPIO 5 to group 0 */
5532 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5533 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5534 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5535 break;
5536
5537 default:
5538 break;
5539 }
5540
c18487ee 5541 bnx2x__link_reset(bp);
a2fbb9ea 5542
34f80b04
EG
5543 return 0;
5544}
5545
5546#define ILT_PER_FUNC (768/2)
5547#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5548/* the phys address is shifted right 12 bits and has an added
5549 1=valid bit added to the 53rd bit
5550 then since this is a wide register(TM)
5551 we split it into two 32 bit writes
5552 */
5553#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5554#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5555#define PXP_ONE_ILT(x) (((x) << 10) | x)
5556#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5557
5558#define CNIC_ILT_LINES 0
5559
5560static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5561{
5562 int reg;
5563
5564 if (CHIP_IS_E1H(bp))
5565 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5566 else /* E1 */
5567 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5568
5569 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5570}
5571
5572static int bnx2x_init_func(struct bnx2x *bp)
5573{
5574 int port = BP_PORT(bp);
5575 int func = BP_FUNC(bp);
5576 int i;
5577
5578 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5579
5580 i = FUNC_ILT_BASE(func);
5581
5582 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5583 if (CHIP_IS_E1H(bp)) {
5584 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5585 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5586 } else /* E1 */
5587 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5588 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5589
5590
5591 if (CHIP_IS_E1H(bp)) {
5592 for (i = 0; i < 9; i++)
5593 bnx2x_init_block(bp,
5594 cm_start[func][i], cm_end[func][i]);
5595
5596 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5597 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5598 }
5599
5600 /* HC init per function */
5601 if (CHIP_IS_E1H(bp)) {
5602 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5603
5604 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5605 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5606 }
5607 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5608
5609 if (CHIP_IS_E1H(bp))
5610 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5611
c14423fe 5612 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5613 REG_WR(bp, 0x2114, 0xffffffff);
5614 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5615
34f80b04
EG
5616 return 0;
5617}
5618
5619static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5620{
5621 int i, rc = 0;
a2fbb9ea 5622
34f80b04
EG
5623 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5624 BP_FUNC(bp), load_code);
a2fbb9ea 5625
34f80b04
EG
5626 bp->dmae_ready = 0;
5627 mutex_init(&bp->dmae_mutex);
5628 bnx2x_gunzip_init(bp);
a2fbb9ea 5629
34f80b04
EG
5630 switch (load_code) {
5631 case FW_MSG_CODE_DRV_LOAD_COMMON:
5632 rc = bnx2x_init_common(bp);
5633 if (rc)
5634 goto init_hw_err;
5635 /* no break */
5636
5637 case FW_MSG_CODE_DRV_LOAD_PORT:
5638 bp->dmae_ready = 1;
5639 rc = bnx2x_init_port(bp);
5640 if (rc)
5641 goto init_hw_err;
5642 /* no break */
5643
5644 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5645 bp->dmae_ready = 1;
5646 rc = bnx2x_init_func(bp);
5647 if (rc)
5648 goto init_hw_err;
5649 break;
5650
5651 default:
5652 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5653 break;
5654 }
5655
5656 if (!BP_NOMCP(bp)) {
5657 int func = BP_FUNC(bp);
a2fbb9ea
ET
5658
5659 bp->fw_drv_pulse_wr_seq =
34f80b04 5660 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5661 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5662 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5663 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5664 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5665 } else
5666 bp->func_stx = 0;
a2fbb9ea 5667
34f80b04
EG
5668 /* this needs to be done before gunzip end */
5669 bnx2x_zero_def_sb(bp);
5670 for_each_queue(bp, i)
5671 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5672
5673init_hw_err:
5674 bnx2x_gunzip_end(bp);
5675
5676 return rc;
a2fbb9ea
ET
5677}
5678
c14423fe 5679/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5680static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5681{
34f80b04 5682 int func = BP_FUNC(bp);
f1410647
ET
5683 u32 seq = ++bp->fw_seq;
5684 u32 rc = 0;
19680c48
EG
5685 u32 cnt = 1;
5686 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5687
34f80b04 5688 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5689 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5690
19680c48
EG
5691 do {
5692 /* let the FW do it's magic ... */
5693 msleep(delay);
a2fbb9ea 5694
19680c48 5695 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5696
19680c48
EG
5697 /* Give the FW up to 2 second (200*10ms) */
5698 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5699
5700 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5701 cnt*delay, rc, seq);
a2fbb9ea
ET
5702
5703 /* is this a reply to our command? */
5704 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5705 rc &= FW_MSG_CODE_MASK;
f1410647 5706
a2fbb9ea
ET
5707 } else {
5708 /* FW BUG! */
5709 BNX2X_ERR("FW failed to respond!\n");
5710 bnx2x_fw_dump(bp);
5711 rc = 0;
5712 }
f1410647 5713
a2fbb9ea
ET
5714 return rc;
5715}
5716
5717static void bnx2x_free_mem(struct bnx2x *bp)
5718{
5719
5720#define BNX2X_PCI_FREE(x, y, size) \
5721 do { \
5722 if (x) { \
5723 pci_free_consistent(bp->pdev, size, x, y); \
5724 x = NULL; \
5725 y = 0; \
5726 } \
5727 } while (0)
5728
5729#define BNX2X_FREE(x) \
5730 do { \
5731 if (x) { \
5732 vfree(x); \
5733 x = NULL; \
5734 } \
5735 } while (0)
5736
5737 int i;
5738
5739 /* fastpath */
5740 for_each_queue(bp, i) {
5741
5742 /* Status blocks */
5743 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5744 bnx2x_fp(bp, i, status_blk_mapping),
5745 sizeof(struct host_status_block) +
5746 sizeof(struct eth_tx_db_data));
5747
5748 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5749 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5750 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5751 bnx2x_fp(bp, i, tx_desc_mapping),
5752 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5753
5754 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5755 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5756 bnx2x_fp(bp, i, rx_desc_mapping),
5757 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5758
5759 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5760 bnx2x_fp(bp, i, rx_comp_mapping),
5761 sizeof(struct eth_fast_path_rx_cqe) *
5762 NUM_RCQ_BD);
a2fbb9ea 5763
7a9b2557 5764 /* SGE ring */
32626230 5765 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5766 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5767 bnx2x_fp(bp, i, rx_sge_mapping),
5768 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5769 }
a2fbb9ea
ET
5770 /* end of fastpath */
5771
5772 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5773 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5774
5775 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5776 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5777
5778#ifdef BCM_ISCSI
5779 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5780 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5781 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5782 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5783#endif
7a9b2557 5784 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5785
5786#undef BNX2X_PCI_FREE
5787#undef BNX2X_KFREE
5788}
5789
5790static int bnx2x_alloc_mem(struct bnx2x *bp)
5791{
5792
5793#define BNX2X_PCI_ALLOC(x, y, size) \
5794 do { \
5795 x = pci_alloc_consistent(bp->pdev, size, y); \
5796 if (x == NULL) \
5797 goto alloc_mem_err; \
5798 memset(x, 0, size); \
5799 } while (0)
5800
5801#define BNX2X_ALLOC(x, size) \
5802 do { \
5803 x = vmalloc(size); \
5804 if (x == NULL) \
5805 goto alloc_mem_err; \
5806 memset(x, 0, size); \
5807 } while (0)
5808
5809 int i;
5810
5811 /* fastpath */
a2fbb9ea
ET
5812 for_each_queue(bp, i) {
5813 bnx2x_fp(bp, i, bp) = bp;
5814
5815 /* Status blocks */
5816 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5817 &bnx2x_fp(bp, i, status_blk_mapping),
5818 sizeof(struct host_status_block) +
5819 sizeof(struct eth_tx_db_data));
5820
5821 bnx2x_fp(bp, i, hw_tx_prods) =
5822 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5823
5824 bnx2x_fp(bp, i, tx_prods_mapping) =
5825 bnx2x_fp(bp, i, status_blk_mapping) +
5826 sizeof(struct host_status_block);
5827
5828 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5829 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5830 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5831 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5832 &bnx2x_fp(bp, i, tx_desc_mapping),
5833 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5834
5835 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5836 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5837 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5838 &bnx2x_fp(bp, i, rx_desc_mapping),
5839 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5840
5841 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5842 &bnx2x_fp(bp, i, rx_comp_mapping),
5843 sizeof(struct eth_fast_path_rx_cqe) *
5844 NUM_RCQ_BD);
5845
7a9b2557
VZ
5846 /* SGE ring */
5847 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5848 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5849 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5850 &bnx2x_fp(bp, i, rx_sge_mapping),
5851 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5852 }
5853 /* end of fastpath */
5854
5855 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5856 sizeof(struct host_def_status_block));
5857
5858 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5859 sizeof(struct bnx2x_slowpath));
5860
5861#ifdef BCM_ISCSI
5862 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5863
5864 /* Initialize T1 */
5865 for (i = 0; i < 64*1024; i += 64) {
5866 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5867 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5868 }
5869
5870 /* allocate searcher T2 table
5871 we allocate 1/4 of alloc num for T2
5872 (which is not entered into the ILT) */
5873 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5874
5875 /* Initialize T2 */
5876 for (i = 0; i < 16*1024; i += 64)
5877 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5878
c14423fe 5879 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5880 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5881
5882 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5883 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5884
5885 /* QM queues (128*MAX_CONN) */
5886 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5887#endif
5888
5889 /* Slow path ring */
5890 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5891
5892 return 0;
5893
5894alloc_mem_err:
5895 bnx2x_free_mem(bp);
5896 return -ENOMEM;
5897
5898#undef BNX2X_PCI_ALLOC
5899#undef BNX2X_ALLOC
5900}
5901
5902static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5903{
5904 int i;
5905
5906 for_each_queue(bp, i) {
5907 struct bnx2x_fastpath *fp = &bp->fp[i];
5908
5909 u16 bd_cons = fp->tx_bd_cons;
5910 u16 sw_prod = fp->tx_pkt_prod;
5911 u16 sw_cons = fp->tx_pkt_cons;
5912
a2fbb9ea
ET
5913 while (sw_cons != sw_prod) {
5914 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5915 sw_cons++;
5916 }
5917 }
5918}
5919
5920static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5921{
5922 int i, j;
5923
5924 for_each_queue(bp, j) {
5925 struct bnx2x_fastpath *fp = &bp->fp[j];
5926
a2fbb9ea
ET
5927 for (i = 0; i < NUM_RX_BD; i++) {
5928 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5929 struct sk_buff *skb = rx_buf->skb;
5930
5931 if (skb == NULL)
5932 continue;
5933
5934 pci_unmap_single(bp->pdev,
5935 pci_unmap_addr(rx_buf, mapping),
5936 bp->rx_buf_use_size,
5937 PCI_DMA_FROMDEVICE);
5938
5939 rx_buf->skb = NULL;
5940 dev_kfree_skb(skb);
5941 }
7a9b2557 5942 if (!fp->disable_tpa)
32626230
EG
5943 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5944 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5945 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5946 }
5947}
5948
5949static void bnx2x_free_skbs(struct bnx2x *bp)
5950{
5951 bnx2x_free_tx_skbs(bp);
5952 bnx2x_free_rx_skbs(bp);
5953}
5954
5955static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5956{
34f80b04 5957 int i, offset = 1;
a2fbb9ea
ET
5958
5959 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5960 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5961 bp->msix_table[0].vector);
5962
5963 for_each_queue(bp, i) {
c14423fe 5964 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5965 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5966 bnx2x_fp(bp, i, state));
5967
228241eb
ET
5968 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5969 BNX2X_ERR("IRQ of fp #%d being freed while "
5970 "state != closed\n", i);
a2fbb9ea 5971
34f80b04 5972 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5973 }
a2fbb9ea
ET
5974}
5975
5976static void bnx2x_free_irq(struct bnx2x *bp)
5977{
a2fbb9ea 5978 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5979 bnx2x_free_msix_irqs(bp);
5980 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5981 bp->flags &= ~USING_MSIX_FLAG;
5982
5983 } else
5984 free_irq(bp->pdev->irq, bp->dev);
5985}
5986
5987static int bnx2x_enable_msix(struct bnx2x *bp)
5988{
34f80b04 5989 int i, rc, offset;
a2fbb9ea
ET
5990
5991 bp->msix_table[0].entry = 0;
34f80b04
EG
5992 offset = 1;
5993 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 5994
34f80b04
EG
5995 for_each_queue(bp, i) {
5996 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 5997
34f80b04
EG
5998 bp->msix_table[i + offset].entry = igu_vec;
5999 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6000 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6001 }
6002
34f80b04
EG
6003 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6004 bp->num_queues + offset);
6005 if (rc) {
6006 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6007 return -1;
6008 }
a2fbb9ea
ET
6009 bp->flags |= USING_MSIX_FLAG;
6010
6011 return 0;
a2fbb9ea
ET
6012}
6013
a2fbb9ea
ET
6014static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6015{
34f80b04 6016 int i, rc, offset = 1;
a2fbb9ea 6017
a2fbb9ea
ET
6018 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6019 bp->dev->name, bp->dev);
a2fbb9ea
ET
6020 if (rc) {
6021 BNX2X_ERR("request sp irq failed\n");
6022 return -EBUSY;
6023 }
6024
6025 for_each_queue(bp, i) {
34f80b04 6026 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6027 bnx2x_msix_fp_int, 0,
6028 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6029 if (rc) {
34f80b04
EG
6030 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6031 i + offset, rc);
a2fbb9ea
ET
6032 bnx2x_free_msix_irqs(bp);
6033 return -EBUSY;
6034 }
6035
6036 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6037 }
6038
6039 return 0;
a2fbb9ea
ET
6040}
6041
6042static int bnx2x_req_irq(struct bnx2x *bp)
6043{
34f80b04 6044 int rc;
a2fbb9ea 6045
34f80b04
EG
6046 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6047 bp->dev->name, bp->dev);
a2fbb9ea
ET
6048 if (!rc)
6049 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6050
6051 return rc;
a2fbb9ea
ET
6052}
6053
6054/*
6055 * Init service functions
6056 */
6057
3101c2bc 6058static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6059{
6060 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6061 int port = BP_PORT(bp);
a2fbb9ea
ET
6062
6063 /* CAM allocation
6064 * unicasts 0-31:port0 32-63:port1
6065 * multicast 64-127:port0 128-191:port1
6066 */
6067 config->hdr.length_6b = 2;
34f80b04
EG
6068 config->hdr.offset = port ? 31 : 0;
6069 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6070 config->hdr.reserved1 = 0;
6071
6072 /* primary MAC */
6073 config->config_table[0].cam_entry.msb_mac_addr =
6074 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6075 config->config_table[0].cam_entry.middle_mac_addr =
6076 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6077 config->config_table[0].cam_entry.lsb_mac_addr =
6078 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6079 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6080 if (set)
6081 config->config_table[0].target_table_entry.flags = 0;
6082 else
6083 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6084 config->config_table[0].target_table_entry.client_id = 0;
6085 config->config_table[0].target_table_entry.vlan_id = 0;
6086
3101c2bc
YG
6087 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6088 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6089 config->config_table[0].cam_entry.msb_mac_addr,
6090 config->config_table[0].cam_entry.middle_mac_addr,
6091 config->config_table[0].cam_entry.lsb_mac_addr);
6092
6093 /* broadcast */
6094 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6095 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6096 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6097 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6098 if (set)
6099 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6100 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6101 else
6102 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6103 config->config_table[1].target_table_entry.client_id = 0;
6104 config->config_table[1].target_table_entry.vlan_id = 0;
6105
6106 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6107 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6108 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6109}
6110
3101c2bc 6111static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6112{
6113 struct mac_configuration_cmd_e1h *config =
6114 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6115
3101c2bc 6116 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6117 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6118 return;
6119 }
6120
6121 /* CAM allocation for E1H
6122 * unicasts: by func number
6123 * multicast: 20+FUNC*20, 20 each
6124 */
6125 config->hdr.length_6b = 1;
6126 config->hdr.offset = BP_FUNC(bp);
6127 config->hdr.client_id = BP_CL_ID(bp);
6128 config->hdr.reserved1 = 0;
6129
6130 /* primary MAC */
6131 config->config_table[0].msb_mac_addr =
6132 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6133 config->config_table[0].middle_mac_addr =
6134 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6135 config->config_table[0].lsb_mac_addr =
6136 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6137 config->config_table[0].client_id = BP_L_ID(bp);
6138 config->config_table[0].vlan_id = 0;
6139 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6140 if (set)
6141 config->config_table[0].flags = BP_PORT(bp);
6142 else
6143 config->config_table[0].flags =
6144 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6145
3101c2bc
YG
6146 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6147 (set ? "setting" : "clearing"),
34f80b04
EG
6148 config->config_table[0].msb_mac_addr,
6149 config->config_table[0].middle_mac_addr,
6150 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6151
6152 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6153 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6154 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6155}
6156
a2fbb9ea
ET
6157static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6158 int *state_p, int poll)
6159{
6160 /* can take a while if any port is running */
34f80b04 6161 int cnt = 500;
a2fbb9ea 6162
c14423fe
ET
6163 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6164 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6165
6166 might_sleep();
34f80b04 6167 while (cnt--) {
a2fbb9ea
ET
6168 if (poll) {
6169 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6170 /* if index is different from 0
6171 * the reply for some commands will
3101c2bc 6172 * be on the non default queue
a2fbb9ea
ET
6173 */
6174 if (idx)
6175 bnx2x_rx_int(&bp->fp[idx], 10);
6176 }
a2fbb9ea 6177
3101c2bc 6178 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6179 if (*state_p == state)
a2fbb9ea
ET
6180 return 0;
6181
a2fbb9ea 6182 msleep(1);
a2fbb9ea
ET
6183 }
6184
a2fbb9ea 6185 /* timeout! */
49d66772
ET
6186 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6187 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6188#ifdef BNX2X_STOP_ON_ERROR
6189 bnx2x_panic();
6190#endif
a2fbb9ea 6191
49d66772 6192 return -EBUSY;
a2fbb9ea
ET
6193}
6194
6195static int bnx2x_setup_leading(struct bnx2x *bp)
6196{
34f80b04 6197 int rc;
a2fbb9ea 6198
c14423fe 6199 /* reset IGU state */
34f80b04 6200 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6201
6202 /* SETUP ramrod */
6203 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6204
34f80b04
EG
6205 /* Wait for completion */
6206 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6207
34f80b04 6208 return rc;
a2fbb9ea
ET
6209}
6210
6211static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6212{
a2fbb9ea 6213 /* reset IGU state */
34f80b04 6214 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6215
228241eb 6216 /* SETUP ramrod */
a2fbb9ea
ET
6217 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6218 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6219
6220 /* Wait for completion */
6221 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6222 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6223}
6224
a2fbb9ea
ET
6225static int bnx2x_poll(struct napi_struct *napi, int budget);
6226static void bnx2x_set_rx_mode(struct net_device *dev);
6227
34f80b04
EG
6228/* must be called with rtnl_lock */
6229static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6230{
228241eb 6231 u32 load_code;
34f80b04
EG
6232 int i, rc;
6233
6234#ifdef BNX2X_STOP_ON_ERROR
6235 if (unlikely(bp->panic))
6236 return -EPERM;
6237#endif
a2fbb9ea
ET
6238
6239 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6240
34f80b04
EG
6241 /* Send LOAD_REQUEST command to MCP
6242 Returns the type of LOAD command:
6243 if it is the first port to be initialized
6244 common blocks should be initialized, otherwise - not
a2fbb9ea 6245 */
34f80b04 6246 if (!BP_NOMCP(bp)) {
228241eb
ET
6247 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6248 if (!load_code) {
da5a662a 6249 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6250 return -EBUSY;
6251 }
34f80b04 6252 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6253 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6254
a2fbb9ea 6255 } else {
da5a662a
VZ
6256 int port = BP_PORT(bp);
6257
34f80b04
EG
6258 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6259 load_count[0], load_count[1], load_count[2]);
6260 load_count[0]++;
da5a662a 6261 load_count[1 + port]++;
34f80b04
EG
6262 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6263 load_count[0], load_count[1], load_count[2]);
6264 if (load_count[0] == 1)
6265 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6266 else if (load_count[1 + port] == 1)
34f80b04
EG
6267 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6268 else
6269 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6270 }
6271
34f80b04
EG
6272 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6273 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6274 bp->port.pmf = 1;
6275 else
6276 bp->port.pmf = 0;
6277 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6278
6279 /* if we can't use MSI-X we only need one fp,
6280 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6281 * and fallback to inta with one fp
6282 */
34f80b04
EG
6283 if (use_inta) {
6284 bp->num_queues = 1;
6285
6286 } else {
6287 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6288 /* user requested number */
6289 bp->num_queues = use_multi;
6290
6291 else if (use_multi)
6292 bp->num_queues = min_t(u32, num_online_cpus(),
6293 BP_MAX_QUEUES(bp));
6294 else
a2fbb9ea 6295 bp->num_queues = 1;
34f80b04
EG
6296
6297 if (bnx2x_enable_msix(bp)) {
6298 /* failed to enable MSI-X */
6299 bp->num_queues = 1;
6300 if (use_multi)
6301 BNX2X_ERR("Multi requested but failed"
6302 " to enable MSI-X\n");
a2fbb9ea
ET
6303 }
6304 }
34f80b04
EG
6305 DP(NETIF_MSG_IFUP,
6306 "set number of queues to %d\n", bp->num_queues);
c14423fe 6307
a2fbb9ea
ET
6308 if (bnx2x_alloc_mem(bp))
6309 return -ENOMEM;
6310
7a9b2557
VZ
6311 for_each_queue(bp, i)
6312 bnx2x_fp(bp, i, disable_tpa) =
6313 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6314
34f80b04
EG
6315 if (bp->flags & USING_MSIX_FLAG) {
6316 rc = bnx2x_req_msix_irqs(bp);
6317 if (rc) {
6318 pci_disable_msix(bp->pdev);
6319 goto load_error;
6320 }
6321 } else {
6322 bnx2x_ack_int(bp);
6323 rc = bnx2x_req_irq(bp);
6324 if (rc) {
6325 BNX2X_ERR("IRQ request failed, aborting\n");
6326 goto load_error;
a2fbb9ea
ET
6327 }
6328 }
6329
6330 for_each_queue(bp, i)
6331 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6332 bnx2x_poll, 128);
6333
a2fbb9ea 6334 /* Initialize HW */
34f80b04
EG
6335 rc = bnx2x_init_hw(bp, load_code);
6336 if (rc) {
a2fbb9ea 6337 BNX2X_ERR("HW init failed, aborting\n");
228241eb 6338 goto load_error;
a2fbb9ea
ET
6339 }
6340
a2fbb9ea 6341 /* Setup NIC internals and enable interrupts */
471de716 6342 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6343
6344 /* Send LOAD_DONE command to MCP */
34f80b04 6345 if (!BP_NOMCP(bp)) {
228241eb
ET
6346 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6347 if (!load_code) {
da5a662a 6348 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6349 rc = -EBUSY;
228241eb 6350 goto load_int_disable;
a2fbb9ea
ET
6351 }
6352 }
6353
bb2a0f7a
YG
6354 bnx2x_stats_init(bp);
6355
a2fbb9ea
ET
6356 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6357
6358 /* Enable Rx interrupt handling before sending the ramrod
6359 as it's completed on Rx FP queue */
6360 for_each_queue(bp, i)
6361 napi_enable(&bnx2x_fp(bp, i, napi));
6362
da5a662a
VZ
6363 /* Enable interrupt handling */
6364 atomic_set(&bp->intr_sem, 0);
6365
34f80b04
EG
6366 rc = bnx2x_setup_leading(bp);
6367 if (rc) {
da5a662a 6368 BNX2X_ERR("Setup leading failed!\n");
228241eb 6369 goto load_stop_netif;
34f80b04 6370 }
a2fbb9ea 6371
34f80b04
EG
6372 if (CHIP_IS_E1H(bp))
6373 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6374 BNX2X_ERR("!!! mf_cfg function disabled\n");
6375 bp->state = BNX2X_STATE_DISABLED;
6376 }
a2fbb9ea 6377
34f80b04
EG
6378 if (bp->state == BNX2X_STATE_OPEN)
6379 for_each_nondefault_queue(bp, i) {
6380 rc = bnx2x_setup_multi(bp, i);
6381 if (rc)
6382 goto load_stop_netif;
6383 }
a2fbb9ea 6384
34f80b04 6385 if (CHIP_IS_E1(bp))
3101c2bc 6386 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6387 else
3101c2bc 6388 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6389
6390 if (bp->port.pmf)
6391 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6392
6393 /* Start fast path */
34f80b04
EG
6394 switch (load_mode) {
6395 case LOAD_NORMAL:
6396 /* Tx queue should be only reenabled */
6397 netif_wake_queue(bp->dev);
6398 bnx2x_set_rx_mode(bp->dev);
6399 break;
6400
6401 case LOAD_OPEN:
a2fbb9ea 6402 netif_start_queue(bp->dev);
34f80b04 6403 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6404 if (bp->flags & USING_MSIX_FLAG)
6405 printk(KERN_INFO PFX "%s: using MSI-X\n",
6406 bp->dev->name);
34f80b04 6407 break;
a2fbb9ea 6408
34f80b04 6409 case LOAD_DIAG:
a2fbb9ea 6410 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6411 bp->state = BNX2X_STATE_DIAG;
6412 break;
6413
6414 default:
6415 break;
a2fbb9ea
ET
6416 }
6417
34f80b04
EG
6418 if (!bp->port.pmf)
6419 bnx2x__link_status_update(bp);
6420
a2fbb9ea
ET
6421 /* start the timer */
6422 mod_timer(&bp->timer, jiffies + bp->current_interval);
6423
34f80b04 6424
a2fbb9ea
ET
6425 return 0;
6426
228241eb 6427load_stop_netif:
a2fbb9ea
ET
6428 for_each_queue(bp, i)
6429 napi_disable(&bnx2x_fp(bp, i, napi));
6430
228241eb 6431load_int_disable:
615f8fd9 6432 bnx2x_int_disable_sync(bp);
a2fbb9ea 6433
34f80b04 6434 /* Release IRQs */
a2fbb9ea
ET
6435 bnx2x_free_irq(bp);
6436
7a9b2557
VZ
6437 /* Free SKBs, SGEs, TPA pool and driver internals */
6438 bnx2x_free_skbs(bp);
6439 for_each_queue(bp, i)
6440 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6441 RX_SGE_CNT*NUM_RX_SGE_PAGES);
228241eb 6442load_error:
a2fbb9ea
ET
6443 bnx2x_free_mem(bp);
6444
6445 /* TBD we really need to reset the chip
6446 if we want to recover from this */
34f80b04 6447 return rc;
a2fbb9ea
ET
6448}
6449
6450static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6451{
a2fbb9ea
ET
6452 int rc;
6453
c14423fe 6454 /* halt the connection */
a2fbb9ea
ET
6455 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6456 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6457
34f80b04 6458 /* Wait for completion */
a2fbb9ea 6459 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6460 &(bp->fp[index].state), 1);
c14423fe 6461 if (rc) /* timeout */
a2fbb9ea
ET
6462 return rc;
6463
6464 /* delete cfc entry */
6465 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6466
34f80b04
EG
6467 /* Wait for completion */
6468 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6469 &(bp->fp[index].state), 1);
6470 return rc;
a2fbb9ea
ET
6471}
6472
da5a662a 6473static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6474{
49d66772 6475 u16 dsb_sp_prod_idx;
c14423fe 6476 /* if the other port is handling traffic,
a2fbb9ea 6477 this can take a lot of time */
34f80b04
EG
6478 int cnt = 500;
6479 int rc;
a2fbb9ea
ET
6480
6481 might_sleep();
6482
6483 /* Send HALT ramrod */
6484 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6485 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6486
34f80b04
EG
6487 /* Wait for completion */
6488 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6489 &(bp->fp[0].state), 1);
6490 if (rc) /* timeout */
da5a662a 6491 return rc;
a2fbb9ea 6492
49d66772 6493 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6494
228241eb 6495 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6496 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6497
49d66772 6498 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6499 we are going to reset the chip anyway
6500 so there is not much to do if this times out
6501 */
34f80b04 6502 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6503 if (!cnt) {
6504 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6505 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6506 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6507#ifdef BNX2X_STOP_ON_ERROR
6508 bnx2x_panic();
da5a662a
VZ
6509#else
6510 rc = -EBUSY;
34f80b04
EG
6511#endif
6512 break;
6513 }
6514 cnt--;
da5a662a 6515 msleep(1);
49d66772
ET
6516 }
6517 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6518 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6519
6520 return rc;
a2fbb9ea
ET
6521}
6522
34f80b04
EG
6523static void bnx2x_reset_func(struct bnx2x *bp)
6524{
6525 int port = BP_PORT(bp);
6526 int func = BP_FUNC(bp);
6527 int base, i;
6528
6529 /* Configure IGU */
6530 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6531 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6532
6533 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6534
6535 /* Clear ILT */
6536 base = FUNC_ILT_BASE(func);
6537 for (i = base; i < base + ILT_PER_FUNC; i++)
6538 bnx2x_ilt_wr(bp, i, 0);
6539}
6540
6541static void bnx2x_reset_port(struct bnx2x *bp)
6542{
6543 int port = BP_PORT(bp);
6544 u32 val;
6545
6546 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6547
6548 /* Do not rcv packets to BRB */
6549 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6550 /* Do not direct rcv packets that are not for MCP to the BRB */
6551 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6552 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6553
6554 /* Configure AEU */
6555 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6556
6557 msleep(100);
6558 /* Check for BRB port occupancy */
6559 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6560 if (val)
6561 DP(NETIF_MSG_IFDOWN,
6562 "BRB1 is not empty %d blooks are occupied\n", val);
6563
6564 /* TODO: Close Doorbell port? */
6565}
6566
6567static void bnx2x_reset_common(struct bnx2x *bp)
6568{
6569 /* reset_common */
6570 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6571 0xd3ffff7f);
6572 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6573}
6574
6575static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6576{
6577 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6578 BP_FUNC(bp), reset_code);
6579
6580 switch (reset_code) {
6581 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6582 bnx2x_reset_port(bp);
6583 bnx2x_reset_func(bp);
6584 bnx2x_reset_common(bp);
6585 break;
6586
6587 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6588 bnx2x_reset_port(bp);
6589 bnx2x_reset_func(bp);
6590 break;
6591
6592 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6593 bnx2x_reset_func(bp);
6594 break;
49d66772 6595
34f80b04
EG
6596 default:
6597 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6598 break;
6599 }
6600}
6601
6602/* msut be called with rtnl_lock */
6603static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6604{
da5a662a 6605 int port = BP_PORT(bp);
a2fbb9ea 6606 u32 reset_code = 0;
da5a662a 6607 int i, cnt, rc;
a2fbb9ea
ET
6608
6609 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6610
228241eb
ET
6611 bp->rx_mode = BNX2X_RX_MODE_NONE;
6612 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6613
228241eb
ET
6614 if (netif_running(bp->dev)) {
6615 netif_tx_disable(bp->dev);
6616 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6617 }
6618
34f80b04
EG
6619 del_timer_sync(&bp->timer);
6620 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6621 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6622 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6623
da5a662a 6624 /* Wait until tx fast path tasks complete */
228241eb
ET
6625 for_each_queue(bp, i) {
6626 struct bnx2x_fastpath *fp = &bp->fp[i];
6627
34f80b04
EG
6628 cnt = 1000;
6629 smp_rmb();
da5a662a
VZ
6630 while (BNX2X_HAS_TX_WORK(fp)) {
6631
6632 if (!netif_running(bp->dev))
6633 bnx2x_tx_int(fp, 1000);
6634
34f80b04
EG
6635 if (!cnt) {
6636 BNX2X_ERR("timeout waiting for queue[%d]\n",
6637 i);
6638#ifdef BNX2X_STOP_ON_ERROR
6639 bnx2x_panic();
6640 return -EBUSY;
6641#else
6642 break;
6643#endif
6644 }
6645 cnt--;
da5a662a 6646 msleep(1);
34f80b04
EG
6647 smp_rmb();
6648 }
228241eb 6649 }
a2fbb9ea 6650
da5a662a
VZ
6651 /* Give HW time to discard old tx messages */
6652 msleep(1);
a2fbb9ea 6653
228241eb
ET
6654 for_each_queue(bp, i)
6655 napi_disable(&bnx2x_fp(bp, i, napi));
6656 /* Disable interrupts after Tx and Rx are disabled on stack level */
6657 bnx2x_int_disable_sync(bp);
a2fbb9ea 6658
34f80b04
EG
6659 /* Release IRQs */
6660 bnx2x_free_irq(bp);
6661
da5a662a
VZ
6662 if (unload_mode == UNLOAD_NORMAL)
6663 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6664
6665 else if (bp->flags & NO_WOL_FLAG) {
a2fbb9ea 6666 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
da5a662a
VZ
6667 if (CHIP_IS_E1H(bp))
6668 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
228241eb 6669
da5a662a
VZ
6670 } else if (bp->wol) {
6671 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6672 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6673 u32 val;
34f80b04
EG
6674 /* The mac address is written to entries 1-4 to
6675 preserve entry 0 which is used by the PMF */
da5a662a
VZ
6676 u8 entry = (BP_E1HVN(bp) + 1)*8;
6677
a2fbb9ea 6678 val = (mac_addr[0] << 8) | mac_addr[1];
da5a662a 6679 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
a2fbb9ea
ET
6680
6681 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6682 (mac_addr[4] << 8) | mac_addr[5];
da5a662a 6683 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
a2fbb9ea
ET
6684
6685 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6686
a2fbb9ea
ET
6687 } else
6688 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6689
3101c2bc
YG
6690 if (CHIP_IS_E1(bp)) {
6691 struct mac_configuration_cmd *config =
6692 bnx2x_sp(bp, mcast_config);
6693
6694 bnx2x_set_mac_addr_e1(bp, 0);
6695
6696 for (i = 0; i < config->hdr.length_6b; i++)
6697 CAM_INVALIDATE(config->config_table[i]);
6698
6699 config->hdr.length_6b = i;
6700 if (CHIP_REV_IS_SLOW(bp))
6701 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6702 else
6703 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6704 config->hdr.client_id = BP_CL_ID(bp);
6705 config->hdr.reserved1 = 0;
6706
6707 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6708 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6709 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6710
6711 } else { /* E1H */
6712 bnx2x_set_mac_addr_e1h(bp, 0);
6713
6714 for (i = 0; i < MC_HASH_SIZE; i++)
6715 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6716 }
6717
da5a662a
VZ
6718 if (CHIP_IS_E1H(bp))
6719 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6720
34f80b04
EG
6721 /* Close multi and leading connections
6722 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6723 for_each_nondefault_queue(bp, i)
6724 if (bnx2x_stop_multi(bp, i))
228241eb 6725 goto unload_error;
a2fbb9ea 6726
da5a662a
VZ
6727 rc = bnx2x_stop_leading(bp);
6728 if (rc) {
34f80b04 6729 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6730#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6731 return -EBUSY;
da5a662a
VZ
6732#else
6733 goto unload_error;
34f80b04 6734#endif
228241eb
ET
6735 }
6736
6737unload_error:
34f80b04 6738 if (!BP_NOMCP(bp))
228241eb 6739 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6740 else {
6741 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6742 load_count[0], load_count[1], load_count[2]);
6743 load_count[0]--;
da5a662a 6744 load_count[1 + port]--;
34f80b04
EG
6745 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6746 load_count[0], load_count[1], load_count[2]);
6747 if (load_count[0] == 0)
6748 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6749 else if (load_count[1 + port] == 0)
34f80b04
EG
6750 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6751 else
6752 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6753 }
a2fbb9ea 6754
34f80b04
EG
6755 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6756 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6757 bnx2x__link_reset(bp);
a2fbb9ea
ET
6758
6759 /* Reset the chip */
228241eb 6760 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6761
6762 /* Report UNLOAD_DONE to MCP */
34f80b04 6763 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6764 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6765
7a9b2557 6766 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6767 bnx2x_free_skbs(bp);
7a9b2557
VZ
6768 for_each_queue(bp, i)
6769 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6770 RX_SGE_CNT*NUM_RX_SGE_PAGES);
a2fbb9ea
ET
6771 bnx2x_free_mem(bp);
6772
6773 bp->state = BNX2X_STATE_CLOSED;
228241eb 6774
a2fbb9ea
ET
6775 netif_carrier_off(bp->dev);
6776
6777 return 0;
6778}
6779
34f80b04
EG
6780static void bnx2x_reset_task(struct work_struct *work)
6781{
6782 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6783
6784#ifdef BNX2X_STOP_ON_ERROR
6785 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6786 " so reset not done to allow debug dump,\n"
6787 KERN_ERR " you will need to reboot when done\n");
6788 return;
6789#endif
6790
6791 rtnl_lock();
6792
6793 if (!netif_running(bp->dev))
6794 goto reset_task_exit;
6795
6796 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6797 bnx2x_nic_load(bp, LOAD_NORMAL);
6798
6799reset_task_exit:
6800 rtnl_unlock();
6801}
6802
a2fbb9ea
ET
6803/* end of nic load/unload */
6804
6805/* ethtool_ops */
6806
6807/*
6808 * Init service functions
6809 */
6810
34f80b04
EG
6811static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6812{
6813 u32 val;
6814
6815 /* Check if there is any driver already loaded */
6816 val = REG_RD(bp, MISC_REG_UNPREPARED);
6817 if (val == 0x1) {
6818 /* Check if it is the UNDI driver
6819 * UNDI driver initializes CID offset for normal bell to 0x7
6820 */
4a37fb66 6821 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6822 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6823 if (val == 0x7) {
6824 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6825 /* save our func */
34f80b04 6826 int func = BP_FUNC(bp);
da5a662a
VZ
6827 u32 swap_en;
6828 u32 swap_val;
34f80b04
EG
6829
6830 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6831
6832 /* try unload UNDI on port 0 */
6833 bp->func = 0;
da5a662a
VZ
6834 bp->fw_seq =
6835 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6836 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6837 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6838
6839 /* if UNDI is loaded on the other port */
6840 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6841
da5a662a
VZ
6842 /* send "DONE" for previous unload */
6843 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6844
6845 /* unload UNDI on port 1 */
34f80b04 6846 bp->func = 1;
da5a662a
VZ
6847 bp->fw_seq =
6848 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6849 DRV_MSG_SEQ_NUMBER_MASK);
6850 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6851
6852 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6853 }
6854
da5a662a
VZ
6855 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6856 HC_REG_CONFIG_0), 0x1000);
6857
6858 /* close input traffic and wait for it */
6859 /* Do not rcv packets to BRB */
6860 REG_WR(bp,
6861 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6862 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6863 /* Do not direct rcv packets that are not for MCP to
6864 * the BRB */
6865 REG_WR(bp,
6866 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6867 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6868 /* clear AEU */
6869 REG_WR(bp,
6870 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6871 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6872 msleep(10);
6873
6874 /* save NIG port swap info */
6875 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6876 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6877 /* reset device */
6878 REG_WR(bp,
6879 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6880 0xd3ffffff);
34f80b04
EG
6881 REG_WR(bp,
6882 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6883 0x1403);
da5a662a
VZ
6884 /* take the NIG out of reset and restore swap values */
6885 REG_WR(bp,
6886 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6887 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6888 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6889 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6890
6891 /* send unload done to the MCP */
6892 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6893
6894 /* restore our func and fw_seq */
6895 bp->func = func;
6896 bp->fw_seq =
6897 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6898 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6899 }
4a37fb66 6900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6901 }
6902}
6903
6904static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6905{
6906 u32 val, val2, val3, val4, id;
72ce58c3 6907 u16 pmc;
34f80b04
EG
6908
6909 /* Get the chip revision id and number. */
6910 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6911 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6912 id = ((val & 0xffff) << 16);
6913 val = REG_RD(bp, MISC_REG_CHIP_REV);
6914 id |= ((val & 0xf) << 12);
6915 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6916 id |= ((val & 0xff) << 4);
6917 REG_RD(bp, MISC_REG_BOND_ID);
6918 id |= (val & 0xf);
6919 bp->common.chip_id = id;
6920 bp->link_params.chip_id = bp->common.chip_id;
6921 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6922
6923 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6924 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6925 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6926 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6927 bp->common.flash_size, bp->common.flash_size);
6928
6929 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6930 bp->link_params.shmem_base = bp->common.shmem_base;
6931 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6932
6933 if (!bp->common.shmem_base ||
6934 (bp->common.shmem_base < 0xA0000) ||
6935 (bp->common.shmem_base >= 0xC0000)) {
6936 BNX2X_DEV_INFO("MCP not active\n");
6937 bp->flags |= NO_MCP_FLAG;
6938 return;
6939 }
6940
6941 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6942 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6943 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6944 BNX2X_ERR("BAD MCP validity signature\n");
6945
6946 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6947 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6948
6949 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6950 bp->common.hw_config, bp->common.board);
6951
6952 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6953 SHARED_HW_CFG_LED_MODE_MASK) >>
6954 SHARED_HW_CFG_LED_MODE_SHIFT);
6955
6956 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6957 bp->common.bc_ver = val;
6958 BNX2X_DEV_INFO("bc_ver %X\n", val);
6959 if (val < BNX2X_BC_VER) {
6960 /* for now only warn
6961 * later we might need to enforce this */
6962 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6963 " please upgrade BC\n", BNX2X_BC_VER, val);
6964 }
72ce58c3
EG
6965
6966 if (BP_E1HVN(bp) == 0) {
6967 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6968 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6969 } else {
6970 /* no WOL capability for E1HVN != 0 */
6971 bp->flags |= NO_WOL_FLAG;
6972 }
6973 BNX2X_DEV_INFO("%sWoL capable\n",
6974 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
6975
6976 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6977 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6978 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6979 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6980
6981 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6982 val, val2, val3, val4);
6983}
6984
6985static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6986 u32 switch_cfg)
a2fbb9ea 6987{
34f80b04 6988 int port = BP_PORT(bp);
a2fbb9ea
ET
6989 u32 ext_phy_type;
6990
a2fbb9ea
ET
6991 switch (switch_cfg) {
6992 case SWITCH_CFG_1G:
6993 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6994
c18487ee
YR
6995 ext_phy_type =
6996 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6997 switch (ext_phy_type) {
6998 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6999 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7000 ext_phy_type);
7001
34f80b04
EG
7002 bp->port.supported |= (SUPPORTED_10baseT_Half |
7003 SUPPORTED_10baseT_Full |
7004 SUPPORTED_100baseT_Half |
7005 SUPPORTED_100baseT_Full |
7006 SUPPORTED_1000baseT_Full |
7007 SUPPORTED_2500baseX_Full |
7008 SUPPORTED_TP |
7009 SUPPORTED_FIBRE |
7010 SUPPORTED_Autoneg |
7011 SUPPORTED_Pause |
7012 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7013 break;
7014
7015 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7016 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7017 ext_phy_type);
7018
34f80b04
EG
7019 bp->port.supported |= (SUPPORTED_10baseT_Half |
7020 SUPPORTED_10baseT_Full |
7021 SUPPORTED_100baseT_Half |
7022 SUPPORTED_100baseT_Full |
7023 SUPPORTED_1000baseT_Full |
7024 SUPPORTED_TP |
7025 SUPPORTED_FIBRE |
7026 SUPPORTED_Autoneg |
7027 SUPPORTED_Pause |
7028 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7029 break;
7030
7031 default:
7032 BNX2X_ERR("NVRAM config error. "
7033 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7034 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7035 return;
7036 }
7037
34f80b04
EG
7038 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7039 port*0x10);
7040 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7041 break;
7042
7043 case SWITCH_CFG_10G:
7044 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7045
c18487ee
YR
7046 ext_phy_type =
7047 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7048 switch (ext_phy_type) {
7049 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7050 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7051 ext_phy_type);
7052
34f80b04
EG
7053 bp->port.supported |= (SUPPORTED_10baseT_Half |
7054 SUPPORTED_10baseT_Full |
7055 SUPPORTED_100baseT_Half |
7056 SUPPORTED_100baseT_Full |
7057 SUPPORTED_1000baseT_Full |
7058 SUPPORTED_2500baseX_Full |
7059 SUPPORTED_10000baseT_Full |
7060 SUPPORTED_TP |
7061 SUPPORTED_FIBRE |
7062 SUPPORTED_Autoneg |
7063 SUPPORTED_Pause |
7064 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7065 break;
7066
7067 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7068 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7069 ext_phy_type);
f1410647 7070
34f80b04
EG
7071 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7072 SUPPORTED_FIBRE |
7073 SUPPORTED_Pause |
7074 SUPPORTED_Asym_Pause);
f1410647
ET
7075 break;
7076
a2fbb9ea 7077 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7078 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7079 ext_phy_type);
7080
34f80b04
EG
7081 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7082 SUPPORTED_1000baseT_Full |
7083 SUPPORTED_FIBRE |
7084 SUPPORTED_Pause |
7085 SUPPORTED_Asym_Pause);
f1410647
ET
7086 break;
7087
7088 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7089 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7090 ext_phy_type);
7091
34f80b04
EG
7092 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7093 SUPPORTED_1000baseT_Full |
7094 SUPPORTED_FIBRE |
7095 SUPPORTED_Autoneg |
7096 SUPPORTED_Pause |
7097 SUPPORTED_Asym_Pause);
f1410647
ET
7098 break;
7099
c18487ee
YR
7100 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7101 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7102 ext_phy_type);
7103
34f80b04
EG
7104 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7105 SUPPORTED_2500baseX_Full |
7106 SUPPORTED_1000baseT_Full |
7107 SUPPORTED_FIBRE |
7108 SUPPORTED_Autoneg |
7109 SUPPORTED_Pause |
7110 SUPPORTED_Asym_Pause);
c18487ee
YR
7111 break;
7112
f1410647
ET
7113 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7114 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7115 ext_phy_type);
7116
34f80b04
EG
7117 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7118 SUPPORTED_TP |
7119 SUPPORTED_Autoneg |
7120 SUPPORTED_Pause |
7121 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7122 break;
7123
c18487ee
YR
7124 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7125 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7126 bp->link_params.ext_phy_config);
7127 break;
7128
a2fbb9ea
ET
7129 default:
7130 BNX2X_ERR("NVRAM config error. "
7131 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7132 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7133 return;
7134 }
7135
34f80b04
EG
7136 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7137 port*0x18);
7138 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7139
a2fbb9ea
ET
7140 break;
7141
7142 default:
7143 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7144 bp->port.link_config);
a2fbb9ea
ET
7145 return;
7146 }
34f80b04 7147 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7148
7149 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7150 if (!(bp->link_params.speed_cap_mask &
7151 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7152 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7153
c18487ee
YR
7154 if (!(bp->link_params.speed_cap_mask &
7155 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7156 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7157
c18487ee
YR
7158 if (!(bp->link_params.speed_cap_mask &
7159 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7160 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7161
c18487ee
YR
7162 if (!(bp->link_params.speed_cap_mask &
7163 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7164 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7165
c18487ee
YR
7166 if (!(bp->link_params.speed_cap_mask &
7167 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7168 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7169 SUPPORTED_1000baseT_Full);
a2fbb9ea 7170
c18487ee
YR
7171 if (!(bp->link_params.speed_cap_mask &
7172 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7173 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7174
c18487ee
YR
7175 if (!(bp->link_params.speed_cap_mask &
7176 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7177 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7178
34f80b04 7179 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7180}
7181
34f80b04 7182static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7183{
c18487ee 7184 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7185
34f80b04 7186 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7187 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7188 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7189 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7190 bp->port.advertising = bp->port.supported;
a2fbb9ea 7191 } else {
c18487ee
YR
7192 u32 ext_phy_type =
7193 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7194
7195 if ((ext_phy_type ==
7196 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7197 (ext_phy_type ==
7198 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7199 /* force 10G, no AN */
c18487ee 7200 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7201 bp->port.advertising =
a2fbb9ea
ET
7202 (ADVERTISED_10000baseT_Full |
7203 ADVERTISED_FIBRE);
7204 break;
7205 }
7206 BNX2X_ERR("NVRAM config error. "
7207 "Invalid link_config 0x%x"
7208 " Autoneg not supported\n",
34f80b04 7209 bp->port.link_config);
a2fbb9ea
ET
7210 return;
7211 }
7212 break;
7213
7214 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7215 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7216 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7217 bp->port.advertising = (ADVERTISED_10baseT_Full |
7218 ADVERTISED_TP);
a2fbb9ea
ET
7219 } else {
7220 BNX2X_ERR("NVRAM config error. "
7221 "Invalid link_config 0x%x"
7222 " speed_cap_mask 0x%x\n",
34f80b04 7223 bp->port.link_config,
c18487ee 7224 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7225 return;
7226 }
7227 break;
7228
7229 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7230 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7231 bp->link_params.req_line_speed = SPEED_10;
7232 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7233 bp->port.advertising = (ADVERTISED_10baseT_Half |
7234 ADVERTISED_TP);
a2fbb9ea
ET
7235 } else {
7236 BNX2X_ERR("NVRAM config error. "
7237 "Invalid link_config 0x%x"
7238 " speed_cap_mask 0x%x\n",
34f80b04 7239 bp->port.link_config,
c18487ee 7240 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7241 return;
7242 }
7243 break;
7244
7245 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7246 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7247 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7248 bp->port.advertising = (ADVERTISED_100baseT_Full |
7249 ADVERTISED_TP);
a2fbb9ea
ET
7250 } else {
7251 BNX2X_ERR("NVRAM config error. "
7252 "Invalid link_config 0x%x"
7253 " speed_cap_mask 0x%x\n",
34f80b04 7254 bp->port.link_config,
c18487ee 7255 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7256 return;
7257 }
7258 break;
7259
7260 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7261 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7262 bp->link_params.req_line_speed = SPEED_100;
7263 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7264 bp->port.advertising = (ADVERTISED_100baseT_Half |
7265 ADVERTISED_TP);
a2fbb9ea
ET
7266 } else {
7267 BNX2X_ERR("NVRAM config error. "
7268 "Invalid link_config 0x%x"
7269 " speed_cap_mask 0x%x\n",
34f80b04 7270 bp->port.link_config,
c18487ee 7271 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7272 return;
7273 }
7274 break;
7275
7276 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7277 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7278 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7279 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7280 ADVERTISED_TP);
a2fbb9ea
ET
7281 } else {
7282 BNX2X_ERR("NVRAM config error. "
7283 "Invalid link_config 0x%x"
7284 " speed_cap_mask 0x%x\n",
34f80b04 7285 bp->port.link_config,
c18487ee 7286 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7287 return;
7288 }
7289 break;
7290
7291 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7292 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7293 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7294 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7295 ADVERTISED_TP);
a2fbb9ea
ET
7296 } else {
7297 BNX2X_ERR("NVRAM config error. "
7298 "Invalid link_config 0x%x"
7299 " speed_cap_mask 0x%x\n",
34f80b04 7300 bp->port.link_config,
c18487ee 7301 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7302 return;
7303 }
7304 break;
7305
7306 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7307 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7308 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7309 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7310 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7311 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7312 ADVERTISED_FIBRE);
a2fbb9ea
ET
7313 } else {
7314 BNX2X_ERR("NVRAM config error. "
7315 "Invalid link_config 0x%x"
7316 " speed_cap_mask 0x%x\n",
34f80b04 7317 bp->port.link_config,
c18487ee 7318 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7319 return;
7320 }
7321 break;
7322
7323 default:
7324 BNX2X_ERR("NVRAM config error. "
7325 "BAD link speed link_config 0x%x\n",
34f80b04 7326 bp->port.link_config);
c18487ee 7327 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7328 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7329 break;
7330 }
a2fbb9ea 7331
34f80b04
EG
7332 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7333 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7334 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7335 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7336 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7337
c18487ee 7338 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7339 " advertising 0x%x\n",
c18487ee
YR
7340 bp->link_params.req_line_speed,
7341 bp->link_params.req_duplex,
34f80b04 7342 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7343}
7344
34f80b04 7345static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7346{
34f80b04
EG
7347 int port = BP_PORT(bp);
7348 u32 val, val2;
a2fbb9ea 7349
c18487ee 7350 bp->link_params.bp = bp;
34f80b04 7351 bp->link_params.port = port;
c18487ee 7352
c18487ee 7353 bp->link_params.serdes_config =
f1410647 7354 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7355 bp->link_params.lane_config =
a2fbb9ea 7356 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7357 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7358 SHMEM_RD(bp,
7359 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7360 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7361 SHMEM_RD(bp,
7362 dev_info.port_hw_config[port].speed_capability_mask);
7363
34f80b04 7364 bp->port.link_config =
a2fbb9ea
ET
7365 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7366
34f80b04
EG
7367 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7368 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7369 " link_config 0x%08x\n",
c18487ee
YR
7370 bp->link_params.serdes_config,
7371 bp->link_params.lane_config,
7372 bp->link_params.ext_phy_config,
34f80b04 7373 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7374
34f80b04 7375 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7376 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7377 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7378
7379 bnx2x_link_settings_requested(bp);
7380
7381 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7382 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7383 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7384 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7385 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7386 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7387 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7388 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7389 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7390 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7391}
7392
7393static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7394{
7395 int func = BP_FUNC(bp);
7396 u32 val, val2;
7397 int rc = 0;
a2fbb9ea 7398
34f80b04 7399 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7400
34f80b04
EG
7401 bp->e1hov = 0;
7402 bp->e1hmf = 0;
7403 if (CHIP_IS_E1H(bp)) {
7404 bp->mf_config =
7405 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7406
34f80b04
EG
7407 val =
7408 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7409 FUNC_MF_CFG_E1HOV_TAG_MASK);
7410 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7411
34f80b04
EG
7412 bp->e1hov = val;
7413 bp->e1hmf = 1;
7414 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7415 "(0x%04x)\n",
7416 func, bp->e1hov, bp->e1hov);
7417 } else {
7418 BNX2X_DEV_INFO("Single function mode\n");
7419 if (BP_E1HVN(bp)) {
7420 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7421 " aborting\n", func);
7422 rc = -EPERM;
7423 }
7424 }
7425 }
a2fbb9ea 7426
34f80b04
EG
7427 if (!BP_NOMCP(bp)) {
7428 bnx2x_get_port_hwinfo(bp);
7429
7430 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7431 DRV_MSG_SEQ_NUMBER_MASK);
7432 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7433 }
7434
7435 if (IS_E1HMF(bp)) {
7436 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7437 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7438 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7439 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7440 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7441 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7442 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7443 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7444 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7445 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7446 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7447 ETH_ALEN);
7448 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7449 ETH_ALEN);
a2fbb9ea 7450 }
34f80b04
EG
7451
7452 return rc;
a2fbb9ea
ET
7453 }
7454
34f80b04
EG
7455 if (BP_NOMCP(bp)) {
7456 /* only supposed to happen on emulation/FPGA */
7457 BNX2X_ERR("warning rendom MAC workaround active\n");
7458 random_ether_addr(bp->dev->dev_addr);
7459 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7460 }
a2fbb9ea 7461
34f80b04
EG
7462 return rc;
7463}
7464
7465static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7466{
7467 int func = BP_FUNC(bp);
7468 int rc;
7469
da5a662a
VZ
7470 /* Disable interrupt handling until HW is initialized */
7471 atomic_set(&bp->intr_sem, 1);
7472
34f80b04 7473 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7474
34f80b04
EG
7475 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7476 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7477
7478 rc = bnx2x_get_hwinfo(bp);
7479
7480 /* need to reset chip if undi was active */
7481 if (!BP_NOMCP(bp))
7482 bnx2x_undi_unload(bp);
7483
7484 if (CHIP_REV_IS_FPGA(bp))
7485 printk(KERN_ERR PFX "FPGA detected\n");
7486
7487 if (BP_NOMCP(bp) && (func == 0))
7488 printk(KERN_ERR PFX
7489 "MCP disabled, must load devices in order!\n");
7490
7a9b2557
VZ
7491 /* Set TPA flags */
7492 if (disable_tpa) {
7493 bp->flags &= ~TPA_ENABLE_FLAG;
7494 bp->dev->features &= ~NETIF_F_LRO;
7495 } else {
7496 bp->flags |= TPA_ENABLE_FLAG;
7497 bp->dev->features |= NETIF_F_LRO;
7498 }
7499
7500
34f80b04
EG
7501 bp->tx_ring_size = MAX_TX_AVAIL;
7502 bp->rx_ring_size = MAX_RX_AVAIL;
7503
7504 bp->rx_csum = 1;
7505 bp->rx_offset = 0;
7506
7507 bp->tx_ticks = 50;
7508 bp->rx_ticks = 25;
7509
34f80b04
EG
7510 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7511 bp->current_interval = (poll ? poll : bp->timer_interval);
7512
7513 init_timer(&bp->timer);
7514 bp->timer.expires = jiffies + bp->current_interval;
7515 bp->timer.data = (unsigned long) bp;
7516 bp->timer.function = bnx2x_timer;
7517
7518 return rc;
a2fbb9ea
ET
7519}
7520
7521/*
7522 * ethtool service functions
7523 */
7524
7525/* All ethtool functions called with rtnl_lock */
7526
7527static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7528{
7529 struct bnx2x *bp = netdev_priv(dev);
7530
34f80b04
EG
7531 cmd->supported = bp->port.supported;
7532 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7533
7534 if (netif_carrier_ok(dev)) {
c18487ee
YR
7535 cmd->speed = bp->link_vars.line_speed;
7536 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7537 } else {
c18487ee
YR
7538 cmd->speed = bp->link_params.req_line_speed;
7539 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7540 }
34f80b04
EG
7541 if (IS_E1HMF(bp)) {
7542 u16 vn_max_rate;
7543
7544 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7545 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7546 if (vn_max_rate < cmd->speed)
7547 cmd->speed = vn_max_rate;
7548 }
a2fbb9ea 7549
c18487ee
YR
7550 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7551 u32 ext_phy_type =
7552 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7553
7554 switch (ext_phy_type) {
7555 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7556 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7557 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7558 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7559 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7560 cmd->port = PORT_FIBRE;
7561 break;
7562
7563 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7564 cmd->port = PORT_TP;
7565 break;
7566
c18487ee
YR
7567 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7568 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7569 bp->link_params.ext_phy_config);
7570 break;
7571
f1410647
ET
7572 default:
7573 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7574 bp->link_params.ext_phy_config);
7575 break;
f1410647
ET
7576 }
7577 } else
a2fbb9ea 7578 cmd->port = PORT_TP;
a2fbb9ea 7579
34f80b04 7580 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7581 cmd->transceiver = XCVR_INTERNAL;
7582
c18487ee 7583 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7584 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7585 else
a2fbb9ea 7586 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7587
7588 cmd->maxtxpkt = 0;
7589 cmd->maxrxpkt = 0;
7590
7591 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7592 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7593 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7594 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7595 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7596 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7597 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7598
7599 return 0;
7600}
7601
7602static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7603{
7604 struct bnx2x *bp = netdev_priv(dev);
7605 u32 advertising;
7606
34f80b04
EG
7607 if (IS_E1HMF(bp))
7608 return 0;
7609
a2fbb9ea
ET
7610 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7611 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7612 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7613 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7614 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7615 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7616 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7617
a2fbb9ea 7618 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7619 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7620 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7621 return -EINVAL;
f1410647 7622 }
a2fbb9ea
ET
7623
7624 /* advertise the requested speed and duplex if supported */
34f80b04 7625 cmd->advertising &= bp->port.supported;
a2fbb9ea 7626
c18487ee
YR
7627 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7628 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7629 bp->port.advertising |= (ADVERTISED_Autoneg |
7630 cmd->advertising);
a2fbb9ea
ET
7631
7632 } else { /* forced speed */
7633 /* advertise the requested speed and duplex if supported */
7634 switch (cmd->speed) {
7635 case SPEED_10:
7636 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7637 if (!(bp->port.supported &
f1410647
ET
7638 SUPPORTED_10baseT_Full)) {
7639 DP(NETIF_MSG_LINK,
7640 "10M full not supported\n");
a2fbb9ea 7641 return -EINVAL;
f1410647 7642 }
a2fbb9ea
ET
7643
7644 advertising = (ADVERTISED_10baseT_Full |
7645 ADVERTISED_TP);
7646 } else {
34f80b04 7647 if (!(bp->port.supported &
f1410647
ET
7648 SUPPORTED_10baseT_Half)) {
7649 DP(NETIF_MSG_LINK,
7650 "10M half not supported\n");
a2fbb9ea 7651 return -EINVAL;
f1410647 7652 }
a2fbb9ea
ET
7653
7654 advertising = (ADVERTISED_10baseT_Half |
7655 ADVERTISED_TP);
7656 }
7657 break;
7658
7659 case SPEED_100:
7660 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7661 if (!(bp->port.supported &
f1410647
ET
7662 SUPPORTED_100baseT_Full)) {
7663 DP(NETIF_MSG_LINK,
7664 "100M full not supported\n");
a2fbb9ea 7665 return -EINVAL;
f1410647 7666 }
a2fbb9ea
ET
7667
7668 advertising = (ADVERTISED_100baseT_Full |
7669 ADVERTISED_TP);
7670 } else {
34f80b04 7671 if (!(bp->port.supported &
f1410647
ET
7672 SUPPORTED_100baseT_Half)) {
7673 DP(NETIF_MSG_LINK,
7674 "100M half not supported\n");
a2fbb9ea 7675 return -EINVAL;
f1410647 7676 }
a2fbb9ea
ET
7677
7678 advertising = (ADVERTISED_100baseT_Half |
7679 ADVERTISED_TP);
7680 }
7681 break;
7682
7683 case SPEED_1000:
f1410647
ET
7684 if (cmd->duplex != DUPLEX_FULL) {
7685 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7686 return -EINVAL;
f1410647 7687 }
a2fbb9ea 7688
34f80b04 7689 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7690 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7691 return -EINVAL;
f1410647 7692 }
a2fbb9ea
ET
7693
7694 advertising = (ADVERTISED_1000baseT_Full |
7695 ADVERTISED_TP);
7696 break;
7697
7698 case SPEED_2500:
f1410647
ET
7699 if (cmd->duplex != DUPLEX_FULL) {
7700 DP(NETIF_MSG_LINK,
7701 "2.5G half not supported\n");
a2fbb9ea 7702 return -EINVAL;
f1410647 7703 }
a2fbb9ea 7704
34f80b04 7705 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7706 DP(NETIF_MSG_LINK,
7707 "2.5G full not supported\n");
a2fbb9ea 7708 return -EINVAL;
f1410647 7709 }
a2fbb9ea 7710
f1410647 7711 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7712 ADVERTISED_TP);
7713 break;
7714
7715 case SPEED_10000:
f1410647
ET
7716 if (cmd->duplex != DUPLEX_FULL) {
7717 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7718 return -EINVAL;
f1410647 7719 }
a2fbb9ea 7720
34f80b04 7721 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7722 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7723 return -EINVAL;
f1410647 7724 }
a2fbb9ea
ET
7725
7726 advertising = (ADVERTISED_10000baseT_Full |
7727 ADVERTISED_FIBRE);
7728 break;
7729
7730 default:
f1410647 7731 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7732 return -EINVAL;
7733 }
7734
c18487ee
YR
7735 bp->link_params.req_line_speed = cmd->speed;
7736 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7737 bp->port.advertising = advertising;
a2fbb9ea
ET
7738 }
7739
c18487ee 7740 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7741 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7742 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7743 bp->port.advertising);
a2fbb9ea 7744
34f80b04 7745 if (netif_running(dev)) {
bb2a0f7a 7746 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7747 bnx2x_link_set(bp);
7748 }
a2fbb9ea
ET
7749
7750 return 0;
7751}
7752
c18487ee
YR
7753#define PHY_FW_VER_LEN 10
7754
a2fbb9ea
ET
7755static void bnx2x_get_drvinfo(struct net_device *dev,
7756 struct ethtool_drvinfo *info)
7757{
7758 struct bnx2x *bp = netdev_priv(dev);
c18487ee 7759 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7760
7761 strcpy(info->driver, DRV_MODULE_NAME);
7762 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7763
7764 phy_fw_ver[0] = '\0';
34f80b04 7765 if (bp->port.pmf) {
4a37fb66 7766 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7767 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7768 (bp->state != BNX2X_STATE_CLOSED),
7769 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7770 bnx2x_release_phy_lock(bp);
34f80b04 7771 }
c18487ee
YR
7772
7773 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 7774 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 7775 BCM_5710_FW_REVISION_VERSION,
34f80b04 7776 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 7777 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
7778 strcpy(info->bus_info, pci_name(bp->pdev));
7779 info->n_stats = BNX2X_NUM_STATS;
7780 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7781 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7782 info->regdump_len = 0;
7783}
7784
7785static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7786{
7787 struct bnx2x *bp = netdev_priv(dev);
7788
7789 if (bp->flags & NO_WOL_FLAG) {
7790 wol->supported = 0;
7791 wol->wolopts = 0;
7792 } else {
7793 wol->supported = WAKE_MAGIC;
7794 if (bp->wol)
7795 wol->wolopts = WAKE_MAGIC;
7796 else
7797 wol->wolopts = 0;
7798 }
7799 memset(&wol->sopass, 0, sizeof(wol->sopass));
7800}
7801
7802static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7803{
7804 struct bnx2x *bp = netdev_priv(dev);
7805
7806 if (wol->wolopts & ~WAKE_MAGIC)
7807 return -EINVAL;
7808
7809 if (wol->wolopts & WAKE_MAGIC) {
7810 if (bp->flags & NO_WOL_FLAG)
7811 return -EINVAL;
7812
7813 bp->wol = 1;
34f80b04 7814 } else
a2fbb9ea 7815 bp->wol = 0;
34f80b04 7816
a2fbb9ea
ET
7817 return 0;
7818}
7819
7820static u32 bnx2x_get_msglevel(struct net_device *dev)
7821{
7822 struct bnx2x *bp = netdev_priv(dev);
7823
7824 return bp->msglevel;
7825}
7826
7827static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7828{
7829 struct bnx2x *bp = netdev_priv(dev);
7830
7831 if (capable(CAP_NET_ADMIN))
7832 bp->msglevel = level;
7833}
7834
7835static int bnx2x_nway_reset(struct net_device *dev)
7836{
7837 struct bnx2x *bp = netdev_priv(dev);
7838
34f80b04
EG
7839 if (!bp->port.pmf)
7840 return 0;
a2fbb9ea 7841
34f80b04 7842 if (netif_running(dev)) {
bb2a0f7a 7843 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7844 bnx2x_link_set(bp);
7845 }
a2fbb9ea
ET
7846
7847 return 0;
7848}
7849
7850static int bnx2x_get_eeprom_len(struct net_device *dev)
7851{
7852 struct bnx2x *bp = netdev_priv(dev);
7853
34f80b04 7854 return bp->common.flash_size;
a2fbb9ea
ET
7855}
7856
7857static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7858{
34f80b04 7859 int port = BP_PORT(bp);
a2fbb9ea
ET
7860 int count, i;
7861 u32 val = 0;
7862
7863 /* adjust timeout for emulation/FPGA */
7864 count = NVRAM_TIMEOUT_COUNT;
7865 if (CHIP_REV_IS_SLOW(bp))
7866 count *= 100;
7867
7868 /* request access to nvram interface */
7869 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7870 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7871
7872 for (i = 0; i < count*10; i++) {
7873 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7874 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7875 break;
7876
7877 udelay(5);
7878 }
7879
7880 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7881 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7882 return -EBUSY;
7883 }
7884
7885 return 0;
7886}
7887
7888static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7889{
34f80b04 7890 int port = BP_PORT(bp);
a2fbb9ea
ET
7891 int count, i;
7892 u32 val = 0;
7893
7894 /* adjust timeout for emulation/FPGA */
7895 count = NVRAM_TIMEOUT_COUNT;
7896 if (CHIP_REV_IS_SLOW(bp))
7897 count *= 100;
7898
7899 /* relinquish nvram interface */
7900 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7901 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7902
7903 for (i = 0; i < count*10; i++) {
7904 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7905 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7906 break;
7907
7908 udelay(5);
7909 }
7910
7911 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7912 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7913 return -EBUSY;
7914 }
7915
7916 return 0;
7917}
7918
7919static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7920{
7921 u32 val;
7922
7923 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7924
7925 /* enable both bits, even on read */
7926 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7927 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7928 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7929}
7930
7931static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7932{
7933 u32 val;
7934
7935 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7936
7937 /* disable both bits, even after read */
7938 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7939 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7940 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7941}
7942
7943static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7944 u32 cmd_flags)
7945{
f1410647 7946 int count, i, rc;
a2fbb9ea
ET
7947 u32 val;
7948
7949 /* build the command word */
7950 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7951
7952 /* need to clear DONE bit separately */
7953 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7954
7955 /* address of the NVRAM to read from */
7956 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7957 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7958
7959 /* issue a read command */
7960 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7961
7962 /* adjust timeout for emulation/FPGA */
7963 count = NVRAM_TIMEOUT_COUNT;
7964 if (CHIP_REV_IS_SLOW(bp))
7965 count *= 100;
7966
7967 /* wait for completion */
7968 *ret_val = 0;
7969 rc = -EBUSY;
7970 for (i = 0; i < count; i++) {
7971 udelay(5);
7972 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7973
7974 if (val & MCPR_NVM_COMMAND_DONE) {
7975 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7976 /* we read nvram data in cpu order
7977 * but ethtool sees it as an array of bytes
7978 * converting to big-endian will do the work */
7979 val = cpu_to_be32(val);
7980 *ret_val = val;
7981 rc = 0;
7982 break;
7983 }
7984 }
7985
7986 return rc;
7987}
7988
7989static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7990 int buf_size)
7991{
7992 int rc;
7993 u32 cmd_flags;
7994 u32 val;
7995
7996 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7997 DP(BNX2X_MSG_NVM,
c14423fe 7998 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7999 offset, buf_size);
8000 return -EINVAL;
8001 }
8002
34f80b04
EG
8003 if (offset + buf_size > bp->common.flash_size) {
8004 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8005 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8006 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8007 return -EINVAL;
8008 }
8009
8010 /* request access to nvram interface */
8011 rc = bnx2x_acquire_nvram_lock(bp);
8012 if (rc)
8013 return rc;
8014
8015 /* enable access to nvram interface */
8016 bnx2x_enable_nvram_access(bp);
8017
8018 /* read the first word(s) */
8019 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8020 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8021 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8022 memcpy(ret_buf, &val, 4);
8023
8024 /* advance to the next dword */
8025 offset += sizeof(u32);
8026 ret_buf += sizeof(u32);
8027 buf_size -= sizeof(u32);
8028 cmd_flags = 0;
8029 }
8030
8031 if (rc == 0) {
8032 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8033 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8034 memcpy(ret_buf, &val, 4);
8035 }
8036
8037 /* disable access to nvram interface */
8038 bnx2x_disable_nvram_access(bp);
8039 bnx2x_release_nvram_lock(bp);
8040
8041 return rc;
8042}
8043
8044static int bnx2x_get_eeprom(struct net_device *dev,
8045 struct ethtool_eeprom *eeprom, u8 *eebuf)
8046{
8047 struct bnx2x *bp = netdev_priv(dev);
8048 int rc;
8049
34f80b04 8050 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8051 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8052 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8053 eeprom->len, eeprom->len);
8054
8055 /* parameters already validated in ethtool_get_eeprom */
8056
8057 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8058
8059 return rc;
8060}
8061
8062static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8063 u32 cmd_flags)
8064{
f1410647 8065 int count, i, rc;
a2fbb9ea
ET
8066
8067 /* build the command word */
8068 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8069
8070 /* need to clear DONE bit separately */
8071 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8072
8073 /* write the data */
8074 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8075
8076 /* address of the NVRAM to write to */
8077 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8078 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8079
8080 /* issue the write command */
8081 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8082
8083 /* adjust timeout for emulation/FPGA */
8084 count = NVRAM_TIMEOUT_COUNT;
8085 if (CHIP_REV_IS_SLOW(bp))
8086 count *= 100;
8087
8088 /* wait for completion */
8089 rc = -EBUSY;
8090 for (i = 0; i < count; i++) {
8091 udelay(5);
8092 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8093 if (val & MCPR_NVM_COMMAND_DONE) {
8094 rc = 0;
8095 break;
8096 }
8097 }
8098
8099 return rc;
8100}
8101
f1410647 8102#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8103
8104static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8105 int buf_size)
8106{
8107 int rc;
8108 u32 cmd_flags;
8109 u32 align_offset;
8110 u32 val;
8111
34f80b04
EG
8112 if (offset + buf_size > bp->common.flash_size) {
8113 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8114 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8115 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8116 return -EINVAL;
8117 }
8118
8119 /* request access to nvram interface */
8120 rc = bnx2x_acquire_nvram_lock(bp);
8121 if (rc)
8122 return rc;
8123
8124 /* enable access to nvram interface */
8125 bnx2x_enable_nvram_access(bp);
8126
8127 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8128 align_offset = (offset & ~0x03);
8129 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8130
8131 if (rc == 0) {
8132 val &= ~(0xff << BYTE_OFFSET(offset));
8133 val |= (*data_buf << BYTE_OFFSET(offset));
8134
8135 /* nvram data is returned as an array of bytes
8136 * convert it back to cpu order */
8137 val = be32_to_cpu(val);
8138
a2fbb9ea
ET
8139 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8140 cmd_flags);
8141 }
8142
8143 /* disable access to nvram interface */
8144 bnx2x_disable_nvram_access(bp);
8145 bnx2x_release_nvram_lock(bp);
8146
8147 return rc;
8148}
8149
8150static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8151 int buf_size)
8152{
8153 int rc;
8154 u32 cmd_flags;
8155 u32 val;
8156 u32 written_so_far;
8157
34f80b04 8158 if (buf_size == 1) /* ethtool */
a2fbb9ea 8159 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8160
8161 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8162 DP(BNX2X_MSG_NVM,
c14423fe 8163 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8164 offset, buf_size);
8165 return -EINVAL;
8166 }
8167
34f80b04
EG
8168 if (offset + buf_size > bp->common.flash_size) {
8169 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8170 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8171 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8172 return -EINVAL;
8173 }
8174
8175 /* request access to nvram interface */
8176 rc = bnx2x_acquire_nvram_lock(bp);
8177 if (rc)
8178 return rc;
8179
8180 /* enable access to nvram interface */
8181 bnx2x_enable_nvram_access(bp);
8182
8183 written_so_far = 0;
8184 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8185 while ((written_so_far < buf_size) && (rc == 0)) {
8186 if (written_so_far == (buf_size - sizeof(u32)))
8187 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8188 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8189 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8190 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8191 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8192
8193 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8194
8195 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8196
8197 /* advance to the next dword */
8198 offset += sizeof(u32);
8199 data_buf += sizeof(u32);
8200 written_so_far += sizeof(u32);
8201 cmd_flags = 0;
8202 }
8203
8204 /* disable access to nvram interface */
8205 bnx2x_disable_nvram_access(bp);
8206 bnx2x_release_nvram_lock(bp);
8207
8208 return rc;
8209}
8210
8211static int bnx2x_set_eeprom(struct net_device *dev,
8212 struct ethtool_eeprom *eeprom, u8 *eebuf)
8213{
8214 struct bnx2x *bp = netdev_priv(dev);
8215 int rc;
8216
34f80b04 8217 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8218 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8219 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8220 eeprom->len, eeprom->len);
8221
8222 /* parameters already validated in ethtool_set_eeprom */
8223
c18487ee 8224 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8225 if (eeprom->magic == 0x00504859)
8226 if (bp->port.pmf) {
8227
4a37fb66 8228 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8229 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8230 bp->link_params.ext_phy_config,
8231 (bp->state != BNX2X_STATE_CLOSED),
8232 eebuf, eeprom->len);
bb2a0f7a
YG
8233 if ((bp->state == BNX2X_STATE_OPEN) ||
8234 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8235 rc |= bnx2x_link_reset(&bp->link_params,
8236 &bp->link_vars);
8237 rc |= bnx2x_phy_init(&bp->link_params,
8238 &bp->link_vars);
bb2a0f7a 8239 }
4a37fb66 8240 bnx2x_release_phy_lock(bp);
34f80b04
EG
8241
8242 } else /* Only the PMF can access the PHY */
8243 return -EINVAL;
8244 else
c18487ee 8245 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8246
8247 return rc;
8248}
8249
8250static int bnx2x_get_coalesce(struct net_device *dev,
8251 struct ethtool_coalesce *coal)
8252{
8253 struct bnx2x *bp = netdev_priv(dev);
8254
8255 memset(coal, 0, sizeof(struct ethtool_coalesce));
8256
8257 coal->rx_coalesce_usecs = bp->rx_ticks;
8258 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8259
8260 return 0;
8261}
8262
8263static int bnx2x_set_coalesce(struct net_device *dev,
8264 struct ethtool_coalesce *coal)
8265{
8266 struct bnx2x *bp = netdev_priv(dev);
8267
8268 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8269 if (bp->rx_ticks > 3000)
8270 bp->rx_ticks = 3000;
8271
8272 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8273 if (bp->tx_ticks > 0x3000)
8274 bp->tx_ticks = 0x3000;
8275
34f80b04 8276 if (netif_running(dev))
a2fbb9ea
ET
8277 bnx2x_update_coalesce(bp);
8278
8279 return 0;
8280}
8281
8282static void bnx2x_get_ringparam(struct net_device *dev,
8283 struct ethtool_ringparam *ering)
8284{
8285 struct bnx2x *bp = netdev_priv(dev);
8286
8287 ering->rx_max_pending = MAX_RX_AVAIL;
8288 ering->rx_mini_max_pending = 0;
8289 ering->rx_jumbo_max_pending = 0;
8290
8291 ering->rx_pending = bp->rx_ring_size;
8292 ering->rx_mini_pending = 0;
8293 ering->rx_jumbo_pending = 0;
8294
8295 ering->tx_max_pending = MAX_TX_AVAIL;
8296 ering->tx_pending = bp->tx_ring_size;
8297}
8298
8299static int bnx2x_set_ringparam(struct net_device *dev,
8300 struct ethtool_ringparam *ering)
8301{
8302 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8303 int rc = 0;
a2fbb9ea
ET
8304
8305 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8306 (ering->tx_pending > MAX_TX_AVAIL) ||
8307 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8308 return -EINVAL;
8309
8310 bp->rx_ring_size = ering->rx_pending;
8311 bp->tx_ring_size = ering->tx_pending;
8312
34f80b04
EG
8313 if (netif_running(dev)) {
8314 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8315 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8316 }
8317
34f80b04 8318 return rc;
a2fbb9ea
ET
8319}
8320
8321static void bnx2x_get_pauseparam(struct net_device *dev,
8322 struct ethtool_pauseparam *epause)
8323{
8324 struct bnx2x *bp = netdev_priv(dev);
8325
c18487ee
YR
8326 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8327 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8328
8329 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8330 FLOW_CTRL_RX);
8331 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8332 FLOW_CTRL_TX);
a2fbb9ea
ET
8333
8334 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8335 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8336 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8337}
8338
8339static int bnx2x_set_pauseparam(struct net_device *dev,
8340 struct ethtool_pauseparam *epause)
8341{
8342 struct bnx2x *bp = netdev_priv(dev);
8343
34f80b04
EG
8344 if (IS_E1HMF(bp))
8345 return 0;
8346
a2fbb9ea
ET
8347 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8348 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8349 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8350
c18487ee 8351 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8352
f1410647 8353 if (epause->rx_pause)
c18487ee
YR
8354 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8355
f1410647 8356 if (epause->tx_pause)
c18487ee
YR
8357 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8358
8359 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8360 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8361
c18487ee 8362 if (epause->autoneg) {
34f80b04 8363 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
8364 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8365 return -EINVAL;
8366 }
a2fbb9ea 8367
c18487ee
YR
8368 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8369 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8370 }
a2fbb9ea 8371
c18487ee
YR
8372 DP(NETIF_MSG_LINK,
8373 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8374
8375 if (netif_running(dev)) {
bb2a0f7a 8376 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8377 bnx2x_link_set(bp);
8378 }
a2fbb9ea
ET
8379
8380 return 0;
8381}
8382
df0f2343
VZ
8383static int bnx2x_set_flags(struct net_device *dev, u32 data)
8384{
8385 struct bnx2x *bp = netdev_priv(dev);
8386 int changed = 0;
8387 int rc = 0;
8388
8389 /* TPA requires Rx CSUM offloading */
8390 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8391 if (!(dev->features & NETIF_F_LRO)) {
8392 dev->features |= NETIF_F_LRO;
8393 bp->flags |= TPA_ENABLE_FLAG;
8394 changed = 1;
8395 }
8396
8397 } else if (dev->features & NETIF_F_LRO) {
8398 dev->features &= ~NETIF_F_LRO;
8399 bp->flags &= ~TPA_ENABLE_FLAG;
8400 changed = 1;
8401 }
8402
8403 if (changed && netif_running(dev)) {
8404 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8405 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8406 }
8407
8408 return rc;
8409}
8410
a2fbb9ea
ET
8411static u32 bnx2x_get_rx_csum(struct net_device *dev)
8412{
8413 struct bnx2x *bp = netdev_priv(dev);
8414
8415 return bp->rx_csum;
8416}
8417
8418static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8419{
8420 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8421 int rc = 0;
a2fbb9ea
ET
8422
8423 bp->rx_csum = data;
df0f2343
VZ
8424
8425 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8426 TPA'ed packets will be discarded due to wrong TCP CSUM */
8427 if (!data) {
8428 u32 flags = ethtool_op_get_flags(dev);
8429
8430 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8431 }
8432
8433 return rc;
a2fbb9ea
ET
8434}
8435
8436static int bnx2x_set_tso(struct net_device *dev, u32 data)
8437{
755735eb 8438 if (data) {
a2fbb9ea 8439 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8440 dev->features |= NETIF_F_TSO6;
8441 } else {
a2fbb9ea 8442 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8443 dev->features &= ~NETIF_F_TSO6;
8444 }
8445
a2fbb9ea
ET
8446 return 0;
8447}
8448
f3c87cdd 8449static const struct {
a2fbb9ea
ET
8450 char string[ETH_GSTRING_LEN];
8451} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8452 { "register_test (offline)" },
8453 { "memory_test (offline)" },
8454 { "loopback_test (offline)" },
8455 { "nvram_test (online)" },
8456 { "interrupt_test (online)" },
8457 { "link_test (online)" },
8458 { "idle check (online)" },
8459 { "MC errors (online)" }
a2fbb9ea
ET
8460};
8461
8462static int bnx2x_self_test_count(struct net_device *dev)
8463{
8464 return BNX2X_NUM_TESTS;
8465}
8466
f3c87cdd
YG
8467static int bnx2x_test_registers(struct bnx2x *bp)
8468{
8469 int idx, i, rc = -ENODEV;
8470 u32 wr_val = 0;
9dabc424 8471 int port = BP_PORT(bp);
f3c87cdd
YG
8472 static const struct {
8473 u32 offset0;
8474 u32 offset1;
8475 u32 mask;
8476 } reg_tbl[] = {
8477/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8478 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8479 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8480 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8481 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8482 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8483 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8484 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8485 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8486 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8487/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8488 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8489 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8490 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8491 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8492 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8493 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8494 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8495 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8496 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8497/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8498 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8499 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8500 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8501 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8502 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8503 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8504 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8505 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8506 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8507/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8508 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8509 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8510 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8511 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8512 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8513 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8514 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8515
8516 { 0xffffffff, 0, 0x00000000 }
8517 };
8518
8519 if (!netif_running(bp->dev))
8520 return rc;
8521
8522 /* Repeat the test twice:
8523 First by writing 0x00000000, second by writing 0xffffffff */
8524 for (idx = 0; idx < 2; idx++) {
8525
8526 switch (idx) {
8527 case 0:
8528 wr_val = 0;
8529 break;
8530 case 1:
8531 wr_val = 0xffffffff;
8532 break;
8533 }
8534
8535 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8536 u32 offset, mask, save_val, val;
f3c87cdd
YG
8537
8538 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8539 mask = reg_tbl[i].mask;
8540
8541 save_val = REG_RD(bp, offset);
8542
8543 REG_WR(bp, offset, wr_val);
8544 val = REG_RD(bp, offset);
8545
8546 /* Restore the original register's value */
8547 REG_WR(bp, offset, save_val);
8548
8549 /* verify that value is as expected value */
8550 if ((val & mask) != (wr_val & mask))
8551 goto test_reg_exit;
8552 }
8553 }
8554
8555 rc = 0;
8556
8557test_reg_exit:
8558 return rc;
8559}
8560
8561static int bnx2x_test_memory(struct bnx2x *bp)
8562{
8563 int i, j, rc = -ENODEV;
8564 u32 val;
8565 static const struct {
8566 u32 offset;
8567 int size;
8568 } mem_tbl[] = {
8569 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8570 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8571 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8572 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8573 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8574 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8575 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8576
8577 { 0xffffffff, 0 }
8578 };
8579 static const struct {
8580 char *name;
8581 u32 offset;
9dabc424
YG
8582 u32 e1_mask;
8583 u32 e1h_mask;
f3c87cdd 8584 } prty_tbl[] = {
9dabc424
YG
8585 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8586 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8587 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8588 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8589 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8590 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8591
8592 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8593 };
8594
8595 if (!netif_running(bp->dev))
8596 return rc;
8597
8598 /* Go through all the memories */
8599 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8600 for (j = 0; j < mem_tbl[i].size; j++)
8601 REG_RD(bp, mem_tbl[i].offset + j*4);
8602
8603 /* Check the parity status */
8604 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8605 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8606 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8607 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8608 DP(NETIF_MSG_HW,
8609 "%s is 0x%x\n", prty_tbl[i].name, val);
8610 goto test_mem_exit;
8611 }
8612 }
8613
8614 rc = 0;
8615
8616test_mem_exit:
8617 return rc;
8618}
8619
8620static void bnx2x_netif_start(struct bnx2x *bp)
8621{
8622 int i;
8623
8624 if (atomic_dec_and_test(&bp->intr_sem)) {
8625 if (netif_running(bp->dev)) {
8626 bnx2x_int_enable(bp);
8627 for_each_queue(bp, i)
8628 napi_enable(&bnx2x_fp(bp, i, napi));
8629 if (bp->state == BNX2X_STATE_OPEN)
8630 netif_wake_queue(bp->dev);
8631 }
8632 }
8633}
8634
8635static void bnx2x_netif_stop(struct bnx2x *bp)
8636{
8637 int i;
8638
8639 if (netif_running(bp->dev)) {
8640 netif_tx_disable(bp->dev);
8641 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8642 for_each_queue(bp, i)
8643 napi_disable(&bnx2x_fp(bp, i, napi));
8644 }
8645 bnx2x_int_disable_sync(bp);
8646}
8647
8648static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8649{
8650 int cnt = 1000;
8651
8652 if (link_up)
8653 while (bnx2x_link_test(bp) && cnt--)
8654 msleep(10);
8655}
8656
8657static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8658{
8659 unsigned int pkt_size, num_pkts, i;
8660 struct sk_buff *skb;
8661 unsigned char *packet;
8662 struct bnx2x_fastpath *fp = &bp->fp[0];
8663 u16 tx_start_idx, tx_idx;
8664 u16 rx_start_idx, rx_idx;
8665 u16 pkt_prod;
8666 struct sw_tx_bd *tx_buf;
8667 struct eth_tx_bd *tx_bd;
8668 dma_addr_t mapping;
8669 union eth_rx_cqe *cqe;
8670 u8 cqe_fp_flags;
8671 struct sw_rx_bd *rx_buf;
8672 u16 len;
8673 int rc = -ENODEV;
8674
8675 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8676 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8677 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8678 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8679 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8680
8681 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8682 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8683 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8684 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8685 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8686 /* wait until link state is restored */
8687 bnx2x_wait_for_link(bp, link_up);
8688
8689 } else
8690 return -EINVAL;
8691
8692 pkt_size = 1514;
8693 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8694 if (!skb) {
8695 rc = -ENOMEM;
8696 goto test_loopback_exit;
8697 }
8698 packet = skb_put(skb, pkt_size);
8699 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8700 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8701 for (i = ETH_HLEN; i < pkt_size; i++)
8702 packet[i] = (unsigned char) (i & 0xff);
8703
8704 num_pkts = 0;
8705 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8706 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8707
8708 pkt_prod = fp->tx_pkt_prod++;
8709 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8710 tx_buf->first_bd = fp->tx_bd_prod;
8711 tx_buf->skb = skb;
8712
8713 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8714 mapping = pci_map_single(bp->pdev, skb->data,
8715 skb_headlen(skb), PCI_DMA_TODEVICE);
8716 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8717 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8718 tx_bd->nbd = cpu_to_le16(1);
8719 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8720 tx_bd->vlan = cpu_to_le16(pkt_prod);
8721 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8722 ETH_TX_BD_FLAGS_END_BD);
8723 tx_bd->general_data = ((UNICAST_ADDRESS <<
8724 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8725
8726 fp->hw_tx_prods->bds_prod =
8727 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8728 mb(); /* FW restriction: must not reorder writing nbd and packets */
8729 fp->hw_tx_prods->packets_prod =
8730 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8731 DOORBELL(bp, FP_IDX(fp), 0);
8732
8733 mmiowb();
8734
8735 num_pkts++;
8736 fp->tx_bd_prod++;
8737 bp->dev->trans_start = jiffies;
8738
8739 udelay(100);
8740
8741 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8742 if (tx_idx != tx_start_idx + num_pkts)
8743 goto test_loopback_exit;
8744
8745 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8746 if (rx_idx != rx_start_idx + num_pkts)
8747 goto test_loopback_exit;
8748
8749 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8750 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8751 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8752 goto test_loopback_rx_exit;
8753
8754 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8755 if (len != pkt_size)
8756 goto test_loopback_rx_exit;
8757
8758 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8759 skb = rx_buf->skb;
8760 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8761 for (i = ETH_HLEN; i < pkt_size; i++)
8762 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8763 goto test_loopback_rx_exit;
8764
8765 rc = 0;
8766
8767test_loopback_rx_exit:
8768 bp->dev->last_rx = jiffies;
8769
8770 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8771 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8772 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8773 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8774
8775 /* Update producers */
8776 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8777 fp->rx_sge_prod);
8778 mmiowb(); /* keep prod updates ordered */
8779
8780test_loopback_exit:
8781 bp->link_params.loopback_mode = LOOPBACK_NONE;
8782
8783 return rc;
8784}
8785
8786static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8787{
8788 int rc = 0;
8789
8790 if (!netif_running(bp->dev))
8791 return BNX2X_LOOPBACK_FAILED;
8792
8793 bnx2x_netif_stop(bp);
8794
8795 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8796 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8797 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8798 }
8799
8800 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8801 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8802 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8803 }
8804
8805 bnx2x_netif_start(bp);
8806
8807 return rc;
8808}
8809
8810#define CRC32_RESIDUAL 0xdebb20e3
8811
8812static int bnx2x_test_nvram(struct bnx2x *bp)
8813{
8814 static const struct {
8815 int offset;
8816 int size;
8817 } nvram_tbl[] = {
8818 { 0, 0x14 }, /* bootstrap */
8819 { 0x14, 0xec }, /* dir */
8820 { 0x100, 0x350 }, /* manuf_info */
8821 { 0x450, 0xf0 }, /* feature_info */
8822 { 0x640, 0x64 }, /* upgrade_key_info */
8823 { 0x6a4, 0x64 },
8824 { 0x708, 0x70 }, /* manuf_key_info */
8825 { 0x778, 0x70 },
8826 { 0, 0 }
8827 };
8828 u32 buf[0x350 / 4];
8829 u8 *data = (u8 *)buf;
8830 int i, rc;
8831 u32 magic, csum;
8832
8833 rc = bnx2x_nvram_read(bp, 0, data, 4);
8834 if (rc) {
8835 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8836 goto test_nvram_exit;
8837 }
8838
8839 magic = be32_to_cpu(buf[0]);
8840 if (magic != 0x669955aa) {
8841 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8842 rc = -ENODEV;
8843 goto test_nvram_exit;
8844 }
8845
8846 for (i = 0; nvram_tbl[i].size; i++) {
8847
8848 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8849 nvram_tbl[i].size);
8850 if (rc) {
8851 DP(NETIF_MSG_PROBE,
8852 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8853 goto test_nvram_exit;
8854 }
8855
8856 csum = ether_crc_le(nvram_tbl[i].size, data);
8857 if (csum != CRC32_RESIDUAL) {
8858 DP(NETIF_MSG_PROBE,
8859 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8860 rc = -ENODEV;
8861 goto test_nvram_exit;
8862 }
8863 }
8864
8865test_nvram_exit:
8866 return rc;
8867}
8868
8869static int bnx2x_test_intr(struct bnx2x *bp)
8870{
8871 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8872 int i, rc;
8873
8874 if (!netif_running(bp->dev))
8875 return -ENODEV;
8876
8877 config->hdr.length_6b = 0;
8878 config->hdr.offset = 0;
8879 config->hdr.client_id = BP_CL_ID(bp);
8880 config->hdr.reserved1 = 0;
8881
8882 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8883 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8884 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8885 if (rc == 0) {
8886 bp->set_mac_pending++;
8887 for (i = 0; i < 10; i++) {
8888 if (!bp->set_mac_pending)
8889 break;
8890 msleep_interruptible(10);
8891 }
8892 if (i == 10)
8893 rc = -ENODEV;
8894 }
8895
8896 return rc;
8897}
8898
a2fbb9ea
ET
8899static void bnx2x_self_test(struct net_device *dev,
8900 struct ethtool_test *etest, u64 *buf)
8901{
8902 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8903
8904 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8905
f3c87cdd 8906 if (!netif_running(dev))
a2fbb9ea 8907 return;
a2fbb9ea 8908
f3c87cdd
YG
8909 /* offline tests are not suppoerted in MF mode */
8910 if (IS_E1HMF(bp))
8911 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8912
8913 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8914 u8 link_up;
8915
8916 link_up = bp->link_vars.link_up;
8917 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8918 bnx2x_nic_load(bp, LOAD_DIAG);
8919 /* wait until link state is restored */
8920 bnx2x_wait_for_link(bp, link_up);
8921
8922 if (bnx2x_test_registers(bp) != 0) {
8923 buf[0] = 1;
8924 etest->flags |= ETH_TEST_FL_FAILED;
8925 }
8926 if (bnx2x_test_memory(bp) != 0) {
8927 buf[1] = 1;
8928 etest->flags |= ETH_TEST_FL_FAILED;
8929 }
8930 buf[2] = bnx2x_test_loopback(bp, link_up);
8931 if (buf[2] != 0)
8932 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8933
f3c87cdd
YG
8934 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8935 bnx2x_nic_load(bp, LOAD_NORMAL);
8936 /* wait until link state is restored */
8937 bnx2x_wait_for_link(bp, link_up);
8938 }
8939 if (bnx2x_test_nvram(bp) != 0) {
8940 buf[3] = 1;
a2fbb9ea
ET
8941 etest->flags |= ETH_TEST_FL_FAILED;
8942 }
f3c87cdd
YG
8943 if (bnx2x_test_intr(bp) != 0) {
8944 buf[4] = 1;
8945 etest->flags |= ETH_TEST_FL_FAILED;
8946 }
8947 if (bp->port.pmf)
8948 if (bnx2x_link_test(bp) != 0) {
8949 buf[5] = 1;
8950 etest->flags |= ETH_TEST_FL_FAILED;
8951 }
8952 buf[7] = bnx2x_mc_assert(bp);
8953 if (buf[7] != 0)
8954 etest->flags |= ETH_TEST_FL_FAILED;
8955
8956#ifdef BNX2X_EXTRA_DEBUG
8957 bnx2x_panic_dump(bp);
8958#endif
a2fbb9ea
ET
8959}
8960
bb2a0f7a
YG
8961static const struct {
8962 long offset;
8963 int size;
8964 u32 flags;
66e855f3
YG
8965#define STATS_FLAGS_PORT 1
8966#define STATS_FLAGS_FUNC 2
8967 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8968} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8969/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8970 8, STATS_FLAGS_FUNC, "rx_bytes" },
8971 { STATS_OFFSET32(error_bytes_received_hi),
8972 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8973 { STATS_OFFSET32(total_bytes_transmitted_hi),
8974 8, STATS_FLAGS_FUNC, "tx_bytes" },
8975 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8976 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8977 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8978 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8979 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8980 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8981 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8982 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8983 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8984 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8985 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8986 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8987/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8988 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8989 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8990 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8991 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8992 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8993 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8994 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8995 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 8996 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 8997 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 8998 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 8999 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9000 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9001 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9002 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9003 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9004 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9005 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9006 8, STATS_FLAGS_PORT, "rx_fragments" },
9007/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9008 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9009 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9010 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9011 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9012 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9013 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9014 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9015 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9016 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9017 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9018 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9019 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9020 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9021 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9022 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9023 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9024 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9025 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9026 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9027/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9028 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9029 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9030 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9031 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9032 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9033 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9034 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9035 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9036 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9037 { STATS_OFFSET32(mac_filter_discard),
9038 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9039 { STATS_OFFSET32(no_buff_discard),
9040 4, STATS_FLAGS_FUNC, "rx_discards" },
9041 { STATS_OFFSET32(xxoverflow_discard),
9042 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9043 { STATS_OFFSET32(brb_drop_hi),
9044 8, STATS_FLAGS_PORT, "brb_discard" },
9045 { STATS_OFFSET32(brb_truncate_hi),
9046 8, STATS_FLAGS_PORT, "brb_truncate" },
9047/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9048 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9049 { STATS_OFFSET32(rx_skb_alloc_failed),
9050 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9051/* 42 */{ STATS_OFFSET32(hw_csum_err),
9052 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9053};
9054
66e855f3
YG
9055#define IS_NOT_E1HMF_STAT(bp, i) \
9056 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9057
a2fbb9ea
ET
9058static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9059{
bb2a0f7a
YG
9060 struct bnx2x *bp = netdev_priv(dev);
9061 int i, j;
9062
a2fbb9ea
ET
9063 switch (stringset) {
9064 case ETH_SS_STATS:
bb2a0f7a 9065 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9066 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9067 continue;
9068 strcpy(buf + j*ETH_GSTRING_LEN,
9069 bnx2x_stats_arr[i].string);
9070 j++;
9071 }
a2fbb9ea
ET
9072 break;
9073
9074 case ETH_SS_TEST:
9075 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9076 break;
9077 }
9078}
9079
9080static int bnx2x_get_stats_count(struct net_device *dev)
9081{
bb2a0f7a
YG
9082 struct bnx2x *bp = netdev_priv(dev);
9083 int i, num_stats = 0;
9084
9085 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9086 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9087 continue;
9088 num_stats++;
9089 }
9090 return num_stats;
a2fbb9ea
ET
9091}
9092
9093static void bnx2x_get_ethtool_stats(struct net_device *dev,
9094 struct ethtool_stats *stats, u64 *buf)
9095{
9096 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9097 u32 *hw_stats = (u32 *)&bp->eth_stats;
9098 int i, j;
a2fbb9ea 9099
bb2a0f7a 9100 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9101 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9102 continue;
bb2a0f7a
YG
9103
9104 if (bnx2x_stats_arr[i].size == 0) {
9105 /* skip this counter */
9106 buf[j] = 0;
9107 j++;
a2fbb9ea
ET
9108 continue;
9109 }
bb2a0f7a 9110 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9111 /* 4-byte counter */
bb2a0f7a
YG
9112 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9113 j++;
a2fbb9ea
ET
9114 continue;
9115 }
9116 /* 8-byte counter */
bb2a0f7a
YG
9117 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9118 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9119 j++;
a2fbb9ea
ET
9120 }
9121}
9122
9123static int bnx2x_phys_id(struct net_device *dev, u32 data)
9124{
9125 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9126 int port = BP_PORT(bp);
a2fbb9ea
ET
9127 int i;
9128
34f80b04
EG
9129 if (!netif_running(dev))
9130 return 0;
9131
9132 if (!bp->port.pmf)
9133 return 0;
9134
a2fbb9ea
ET
9135 if (data == 0)
9136 data = 2;
9137
9138 for (i = 0; i < (data * 2); i++) {
c18487ee 9139 if ((i % 2) == 0)
34f80b04 9140 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9141 bp->link_params.hw_led_mode,
9142 bp->link_params.chip_id);
9143 else
34f80b04 9144 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9145 bp->link_params.hw_led_mode,
9146 bp->link_params.chip_id);
9147
a2fbb9ea
ET
9148 msleep_interruptible(500);
9149 if (signal_pending(current))
9150 break;
9151 }
9152
c18487ee 9153 if (bp->link_vars.link_up)
34f80b04 9154 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9155 bp->link_vars.line_speed,
9156 bp->link_params.hw_led_mode,
9157 bp->link_params.chip_id);
a2fbb9ea
ET
9158
9159 return 0;
9160}
9161
9162static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9163 .get_settings = bnx2x_get_settings,
9164 .set_settings = bnx2x_set_settings,
9165 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9166 .get_wol = bnx2x_get_wol,
9167 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9168 .get_msglevel = bnx2x_get_msglevel,
9169 .set_msglevel = bnx2x_set_msglevel,
9170 .nway_reset = bnx2x_nway_reset,
9171 .get_link = ethtool_op_get_link,
9172 .get_eeprom_len = bnx2x_get_eeprom_len,
9173 .get_eeprom = bnx2x_get_eeprom,
9174 .set_eeprom = bnx2x_set_eeprom,
9175 .get_coalesce = bnx2x_get_coalesce,
9176 .set_coalesce = bnx2x_set_coalesce,
9177 .get_ringparam = bnx2x_get_ringparam,
9178 .set_ringparam = bnx2x_set_ringparam,
9179 .get_pauseparam = bnx2x_get_pauseparam,
9180 .set_pauseparam = bnx2x_set_pauseparam,
9181 .get_rx_csum = bnx2x_get_rx_csum,
9182 .set_rx_csum = bnx2x_set_rx_csum,
9183 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9184 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9185 .set_flags = bnx2x_set_flags,
9186 .get_flags = ethtool_op_get_flags,
9187 .get_sg = ethtool_op_get_sg,
9188 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9189 .get_tso = ethtool_op_get_tso,
9190 .set_tso = bnx2x_set_tso,
9191 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9192 .self_test = bnx2x_self_test,
9193 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9194 .phys_id = bnx2x_phys_id,
9195 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9196 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9197};
9198
9199/* end of ethtool_ops */
9200
9201/****************************************************************************
9202* General service functions
9203****************************************************************************/
9204
9205static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9206{
9207 u16 pmcsr;
9208
9209 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9210
9211 switch (state) {
9212 case PCI_D0:
34f80b04 9213 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9214 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9215 PCI_PM_CTRL_PME_STATUS));
9216
9217 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9218 /* delay required during transition out of D3hot */
9219 msleep(20);
34f80b04 9220 break;
a2fbb9ea 9221
34f80b04
EG
9222 case PCI_D3hot:
9223 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9224 pmcsr |= 3;
a2fbb9ea 9225
34f80b04
EG
9226 if (bp->wol)
9227 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9228
34f80b04
EG
9229 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9230 pmcsr);
a2fbb9ea 9231
34f80b04
EG
9232 /* No more memory access after this point until
9233 * device is brought back to D0.
9234 */
9235 break;
9236
9237 default:
9238 return -EINVAL;
9239 }
9240 return 0;
a2fbb9ea
ET
9241}
9242
34f80b04
EG
9243/*
9244 * net_device service functions
9245 */
9246
a2fbb9ea
ET
9247static int bnx2x_poll(struct napi_struct *napi, int budget)
9248{
9249 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9250 napi);
9251 struct bnx2x *bp = fp->bp;
9252 int work_done = 0;
9253
9254#ifdef BNX2X_STOP_ON_ERROR
9255 if (unlikely(bp->panic))
34f80b04 9256 goto poll_panic;
a2fbb9ea
ET
9257#endif
9258
9259 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9260 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9261 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9262
9263 bnx2x_update_fpsb_idx(fp);
9264
da5a662a 9265 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9266 bnx2x_tx_int(fp, budget);
9267
da5a662a 9268 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9269 work_done = bnx2x_rx_int(fp, budget);
9270
da5a662a 9271 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9272
9273 /* must not complete if we consumed full budget */
da5a662a 9274 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9275
9276#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9277poll_panic:
a2fbb9ea
ET
9278#endif
9279 netif_rx_complete(bp->dev, napi);
9280
34f80b04 9281 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9282 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9283 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9284 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9285 }
a2fbb9ea
ET
9286 return work_done;
9287}
9288
755735eb
EG
9289
9290/* we split the first BD into headers and data BDs
9291 * to ease the pain of our fellow micocode engineers
9292 * we use one mapping for both BDs
9293 * So far this has only been observed to happen
9294 * in Other Operating Systems(TM)
9295 */
9296static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9297 struct bnx2x_fastpath *fp,
9298 struct eth_tx_bd **tx_bd, u16 hlen,
9299 u16 bd_prod, int nbd)
9300{
9301 struct eth_tx_bd *h_tx_bd = *tx_bd;
9302 struct eth_tx_bd *d_tx_bd;
9303 dma_addr_t mapping;
9304 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9305
9306 /* first fix first BD */
9307 h_tx_bd->nbd = cpu_to_le16(nbd);
9308 h_tx_bd->nbytes = cpu_to_le16(hlen);
9309
9310 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9311 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9312 h_tx_bd->addr_lo, h_tx_bd->nbd);
9313
9314 /* now get a new data BD
9315 * (after the pbd) and fill it */
9316 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9317 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9318
9319 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9320 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9321
9322 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9323 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9324 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9325 d_tx_bd->vlan = 0;
9326 /* this marks the BD as one that has no individual mapping
9327 * the FW ignores this flag in a BD not marked start
9328 */
9329 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9330 DP(NETIF_MSG_TX_QUEUED,
9331 "TSO split data size is %d (%x:%x)\n",
9332 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9333
9334 /* update tx_bd for marking the last BD flag */
9335 *tx_bd = d_tx_bd;
9336
9337 return bd_prod;
9338}
9339
9340static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9341{
9342 if (fix > 0)
9343 csum = (u16) ~csum_fold(csum_sub(csum,
9344 csum_partial(t_header - fix, fix, 0)));
9345
9346 else if (fix < 0)
9347 csum = (u16) ~csum_fold(csum_add(csum,
9348 csum_partial(t_header, -fix, 0)));
9349
9350 return swab16(csum);
9351}
9352
9353static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9354{
9355 u32 rc;
9356
9357 if (skb->ip_summed != CHECKSUM_PARTIAL)
9358 rc = XMIT_PLAIN;
9359
9360 else {
9361 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9362 rc = XMIT_CSUM_V6;
9363 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9364 rc |= XMIT_CSUM_TCP;
9365
9366 } else {
9367 rc = XMIT_CSUM_V4;
9368 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9369 rc |= XMIT_CSUM_TCP;
9370 }
9371 }
9372
9373 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9374 rc |= XMIT_GSO_V4;
9375
9376 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9377 rc |= XMIT_GSO_V6;
9378
9379 return rc;
9380}
9381
9382/* check if packet requires linearization (packet is too fragmented) */
9383static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9384 u32 xmit_type)
9385{
9386 int to_copy = 0;
9387 int hlen = 0;
9388 int first_bd_sz = 0;
9389
9390 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9391 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9392
9393 if (xmit_type & XMIT_GSO) {
9394 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9395 /* Check if LSO packet needs to be copied:
9396 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9397 int wnd_size = MAX_FETCH_BD - 3;
9398 /* Number of widnows to check */
9399 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9400 int wnd_idx = 0;
9401 int frag_idx = 0;
9402 u32 wnd_sum = 0;
9403
9404 /* Headers length */
9405 hlen = (int)(skb_transport_header(skb) - skb->data) +
9406 tcp_hdrlen(skb);
9407
9408 /* Amount of data (w/o headers) on linear part of SKB*/
9409 first_bd_sz = skb_headlen(skb) - hlen;
9410
9411 wnd_sum = first_bd_sz;
9412
9413 /* Calculate the first sum - it's special */
9414 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9415 wnd_sum +=
9416 skb_shinfo(skb)->frags[frag_idx].size;
9417
9418 /* If there was data on linear skb data - check it */
9419 if (first_bd_sz > 0) {
9420 if (unlikely(wnd_sum < lso_mss)) {
9421 to_copy = 1;
9422 goto exit_lbl;
9423 }
9424
9425 wnd_sum -= first_bd_sz;
9426 }
9427
9428 /* Others are easier: run through the frag list and
9429 check all windows */
9430 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9431 wnd_sum +=
9432 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9433
9434 if (unlikely(wnd_sum < lso_mss)) {
9435 to_copy = 1;
9436 break;
9437 }
9438 wnd_sum -=
9439 skb_shinfo(skb)->frags[wnd_idx].size;
9440 }
9441
9442 } else {
9443 /* in non-LSO too fragmented packet should always
9444 be linearized */
9445 to_copy = 1;
9446 }
9447 }
9448
9449exit_lbl:
9450 if (unlikely(to_copy))
9451 DP(NETIF_MSG_TX_QUEUED,
9452 "Linearization IS REQUIRED for %s packet. "
9453 "num_frags %d hlen %d first_bd_sz %d\n",
9454 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9455 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9456
9457 return to_copy;
9458}
9459
9460/* called with netif_tx_lock
a2fbb9ea 9461 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9462 * netif_wake_queue()
a2fbb9ea
ET
9463 */
9464static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9465{
9466 struct bnx2x *bp = netdev_priv(dev);
9467 struct bnx2x_fastpath *fp;
9468 struct sw_tx_bd *tx_buf;
9469 struct eth_tx_bd *tx_bd;
9470 struct eth_tx_parse_bd *pbd = NULL;
9471 u16 pkt_prod, bd_prod;
755735eb 9472 int nbd, fp_index;
a2fbb9ea 9473 dma_addr_t mapping;
755735eb
EG
9474 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9475 int vlan_off = (bp->e1hov ? 4 : 0);
9476 int i;
9477 u8 hlen = 0;
a2fbb9ea
ET
9478
9479#ifdef BNX2X_STOP_ON_ERROR
9480 if (unlikely(bp->panic))
9481 return NETDEV_TX_BUSY;
9482#endif
9483
755735eb 9484 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9485 fp = &bp->fp[fp_index];
755735eb 9486
a2fbb9ea
ET
9487 if (unlikely(bnx2x_tx_avail(bp->fp) <
9488 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9489 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9490 netif_stop_queue(dev);
9491 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9492 return NETDEV_TX_BUSY;
9493 }
9494
755735eb
EG
9495 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9496 " gso type %x xmit_type %x\n",
9497 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9498 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9499
9500 /* First, check if we need to linearaize the skb
9501 (due to FW restrictions) */
9502 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9503 /* Statistics of linearization */
9504 bp->lin_cnt++;
9505 if (skb_linearize(skb) != 0) {
9506 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9507 "silently dropping this SKB\n");
9508 dev_kfree_skb_any(skb);
da5a662a 9509 return NETDEV_TX_OK;
755735eb
EG
9510 }
9511 }
9512
a2fbb9ea 9513 /*
755735eb 9514 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9515 then for TSO or xsum we have a parsing info BD,
755735eb 9516 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9517 (don't forget to mark the last one as last,
9518 and to unmap only AFTER you write to the BD ...)
755735eb 9519 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9520 */
9521
9522 pkt_prod = fp->tx_pkt_prod++;
755735eb 9523 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9524
755735eb 9525 /* get a tx_buf and first BD */
a2fbb9ea
ET
9526 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9527 tx_bd = &fp->tx_desc_ring[bd_prod];
9528
9529 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9530 tx_bd->general_data = (UNICAST_ADDRESS <<
9531 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9532 tx_bd->general_data |= 1; /* header nbd */
9533
755735eb
EG
9534 /* remember the first BD of the packet */
9535 tx_buf->first_bd = fp->tx_bd_prod;
9536 tx_buf->skb = skb;
a2fbb9ea
ET
9537
9538 DP(NETIF_MSG_TX_QUEUED,
9539 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9540 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9541
755735eb
EG
9542 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9543 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9544 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9545 vlan_off += 4;
9546 } else
9547 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9548
755735eb 9549 if (xmit_type) {
a2fbb9ea 9550
755735eb 9551 /* turn on parsing and get a BD */
a2fbb9ea
ET
9552 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9553 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9554
9555 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9556 }
9557
9558 if (xmit_type & XMIT_CSUM) {
9559 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9560
9561 /* for now NS flag is not used in Linux */
755735eb 9562 pbd->global_data = (hlen |
96fc1784 9563 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9564 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9565
755735eb
EG
9566 pbd->ip_hlen = (skb_transport_header(skb) -
9567 skb_network_header(skb)) / 2;
9568
9569 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9570
755735eb
EG
9571 pbd->total_hlen = cpu_to_le16(hlen);
9572 hlen = hlen*2 - vlan_off;
a2fbb9ea 9573
755735eb
EG
9574 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9575
9576 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9577 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9578 ETH_TX_BD_FLAGS_IP_CSUM;
9579 else
9580 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9581
9582 if (xmit_type & XMIT_CSUM_TCP) {
9583 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9584
9585 } else {
9586 s8 fix = SKB_CS_OFF(skb); /* signed! */
9587
a2fbb9ea 9588 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9589 pbd->cs_offset = fix / 2;
a2fbb9ea 9590
755735eb
EG
9591 DP(NETIF_MSG_TX_QUEUED,
9592 "hlen %d offset %d fix %d csum before fix %x\n",
9593 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9594 SKB_CS(skb));
9595
9596 /* HW bug: fixup the CSUM */
9597 pbd->tcp_pseudo_csum =
9598 bnx2x_csum_fix(skb_transport_header(skb),
9599 SKB_CS(skb), fix);
9600
9601 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9602 pbd->tcp_pseudo_csum);
9603 }
a2fbb9ea
ET
9604 }
9605
9606 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9607 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9608
9609 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9610 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9611 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9612 tx_bd->nbd = cpu_to_le16(nbd);
9613 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9614
9615 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9616 " nbytes %d flags %x vlan %x\n",
9617 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9618 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9619 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9620
755735eb 9621 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9622
9623 DP(NETIF_MSG_TX_QUEUED,
9624 "TSO packet len %d hlen %d total len %d tso size %d\n",
9625 skb->len, hlen, skb_headlen(skb),
9626 skb_shinfo(skb)->gso_size);
9627
9628 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9629
755735eb
EG
9630 if (unlikely(skb_headlen(skb) > hlen))
9631 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9632 bd_prod, ++nbd);
a2fbb9ea
ET
9633
9634 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9635 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9636 pbd->tcp_flags = pbd_tcp_flags(skb);
9637
9638 if (xmit_type & XMIT_GSO_V4) {
9639 pbd->ip_id = swab16(ip_hdr(skb)->id);
9640 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9641 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9642 ip_hdr(skb)->daddr,
9643 0, IPPROTO_TCP, 0));
755735eb
EG
9644
9645 } else
9646 pbd->tcp_pseudo_csum =
9647 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9648 &ipv6_hdr(skb)->daddr,
9649 0, IPPROTO_TCP, 0));
9650
a2fbb9ea
ET
9651 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9652 }
9653
755735eb
EG
9654 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9655 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9656
755735eb
EG
9657 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9658 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9659
755735eb
EG
9660 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9661 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9662
755735eb
EG
9663 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9664 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9665 tx_bd->nbytes = cpu_to_le16(frag->size);
9666 tx_bd->vlan = cpu_to_le16(pkt_prod);
9667 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9668
755735eb
EG
9669 DP(NETIF_MSG_TX_QUEUED,
9670 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9671 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9672 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9673 }
9674
755735eb 9675 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9676 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9677
9678 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9679 tx_bd, tx_bd->bd_flags.as_bitfield);
9680
a2fbb9ea
ET
9681 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9682
755735eb 9683 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9684 * if the packet contains or ends with it
9685 */
9686 if (TX_BD_POFF(bd_prod) < nbd)
9687 nbd++;
9688
9689 if (pbd)
9690 DP(NETIF_MSG_TX_QUEUED,
9691 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9692 " tcp_flags %x xsum %x seq %u hlen %u\n",
9693 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9694 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9695 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9696
755735eb 9697 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9698
96fc1784
ET
9699 fp->hw_tx_prods->bds_prod =
9700 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9701 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9702 fp->hw_tx_prods->packets_prod =
9703 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9704 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9705
9706 mmiowb();
9707
755735eb 9708 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9709 dev->trans_start = jiffies;
9710
9711 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9712 netif_stop_queue(dev);
bb2a0f7a 9713 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9714 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9715 netif_wake_queue(dev);
9716 }
9717 fp->tx_pkt++;
9718
9719 return NETDEV_TX_OK;
9720}
9721
bb2a0f7a 9722/* called with rtnl_lock */
a2fbb9ea
ET
9723static int bnx2x_open(struct net_device *dev)
9724{
9725 struct bnx2x *bp = netdev_priv(dev);
9726
9727 bnx2x_set_power_state(bp, PCI_D0);
9728
bb2a0f7a 9729 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9730}
9731
bb2a0f7a 9732/* called with rtnl_lock */
a2fbb9ea
ET
9733static int bnx2x_close(struct net_device *dev)
9734{
a2fbb9ea
ET
9735 struct bnx2x *bp = netdev_priv(dev);
9736
9737 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9738 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9739 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9740 if (!CHIP_REV_IS_SLOW(bp))
9741 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9742
9743 return 0;
9744}
9745
34f80b04
EG
9746/* called with netif_tx_lock from set_multicast */
9747static void bnx2x_set_rx_mode(struct net_device *dev)
9748{
9749 struct bnx2x *bp = netdev_priv(dev);
9750 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9751 int port = BP_PORT(bp);
9752
9753 if (bp->state != BNX2X_STATE_OPEN) {
9754 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9755 return;
9756 }
9757
9758 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9759
9760 if (dev->flags & IFF_PROMISC)
9761 rx_mode = BNX2X_RX_MODE_PROMISC;
9762
9763 else if ((dev->flags & IFF_ALLMULTI) ||
9764 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9765 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9766
9767 else { /* some multicasts */
9768 if (CHIP_IS_E1(bp)) {
9769 int i, old, offset;
9770 struct dev_mc_list *mclist;
9771 struct mac_configuration_cmd *config =
9772 bnx2x_sp(bp, mcast_config);
9773
9774 for (i = 0, mclist = dev->mc_list;
9775 mclist && (i < dev->mc_count);
9776 i++, mclist = mclist->next) {
9777
9778 config->config_table[i].
9779 cam_entry.msb_mac_addr =
9780 swab16(*(u16 *)&mclist->dmi_addr[0]);
9781 config->config_table[i].
9782 cam_entry.middle_mac_addr =
9783 swab16(*(u16 *)&mclist->dmi_addr[2]);
9784 config->config_table[i].
9785 cam_entry.lsb_mac_addr =
9786 swab16(*(u16 *)&mclist->dmi_addr[4]);
9787 config->config_table[i].cam_entry.flags =
9788 cpu_to_le16(port);
9789 config->config_table[i].
9790 target_table_entry.flags = 0;
9791 config->config_table[i].
9792 target_table_entry.client_id = 0;
9793 config->config_table[i].
9794 target_table_entry.vlan_id = 0;
9795
9796 DP(NETIF_MSG_IFUP,
9797 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9798 config->config_table[i].
9799 cam_entry.msb_mac_addr,
9800 config->config_table[i].
9801 cam_entry.middle_mac_addr,
9802 config->config_table[i].
9803 cam_entry.lsb_mac_addr);
9804 }
9805 old = config->hdr.length_6b;
9806 if (old > i) {
9807 for (; i < old; i++) {
9808 if (CAM_IS_INVALID(config->
9809 config_table[i])) {
9810 i--; /* already invalidated */
9811 break;
9812 }
9813 /* invalidate */
9814 CAM_INVALIDATE(config->
9815 config_table[i]);
9816 }
9817 }
9818
9819 if (CHIP_REV_IS_SLOW(bp))
9820 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9821 else
9822 offset = BNX2X_MAX_MULTICAST*(1 + port);
9823
9824 config->hdr.length_6b = i;
9825 config->hdr.offset = offset;
9826 config->hdr.client_id = BP_CL_ID(bp);
9827 config->hdr.reserved1 = 0;
9828
9829 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9830 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9831 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9832 0);
9833 } else { /* E1H */
9834 /* Accept one or more multicasts */
9835 struct dev_mc_list *mclist;
9836 u32 mc_filter[MC_HASH_SIZE];
9837 u32 crc, bit, regidx;
9838 int i;
9839
9840 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9841
9842 for (i = 0, mclist = dev->mc_list;
9843 mclist && (i < dev->mc_count);
9844 i++, mclist = mclist->next) {
9845
9846 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9847 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9848 mclist->dmi_addr[0], mclist->dmi_addr[1],
9849 mclist->dmi_addr[2], mclist->dmi_addr[3],
9850 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9851
9852 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9853 bit = (crc >> 24) & 0xff;
9854 regidx = bit >> 5;
9855 bit &= 0x1f;
9856 mc_filter[regidx] |= (1 << bit);
9857 }
9858
9859 for (i = 0; i < MC_HASH_SIZE; i++)
9860 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9861 mc_filter[i]);
9862 }
9863 }
9864
9865 bp->rx_mode = rx_mode;
9866 bnx2x_set_storm_rx_mode(bp);
9867}
9868
9869/* called with rtnl_lock */
a2fbb9ea
ET
9870static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9871{
9872 struct sockaddr *addr = p;
9873 struct bnx2x *bp = netdev_priv(dev);
9874
34f80b04 9875 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9876 return -EINVAL;
9877
9878 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9879 if (netif_running(dev)) {
9880 if (CHIP_IS_E1(bp))
3101c2bc 9881 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9882 else
3101c2bc 9883 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9884 }
a2fbb9ea
ET
9885
9886 return 0;
9887}
9888
c18487ee 9889/* called with rtnl_lock */
a2fbb9ea
ET
9890static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9891{
9892 struct mii_ioctl_data *data = if_mii(ifr);
9893 struct bnx2x *bp = netdev_priv(dev);
9894 int err;
9895
9896 switch (cmd) {
9897 case SIOCGMIIPHY:
34f80b04 9898 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9899
c14423fe 9900 /* fallthrough */
c18487ee 9901
a2fbb9ea 9902 case SIOCGMIIREG: {
c18487ee 9903 u16 mii_regval;
a2fbb9ea 9904
c18487ee
YR
9905 if (!netif_running(dev))
9906 return -EAGAIN;
a2fbb9ea 9907
34f80b04
EG
9908 mutex_lock(&bp->port.phy_mutex);
9909 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9910 DEFAULT_PHY_DEV_ADDR,
9911 (data->reg_num & 0x1f), &mii_regval);
9912 data->val_out = mii_regval;
34f80b04 9913 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9914 return err;
9915 }
9916
9917 case SIOCSMIIREG:
9918 if (!capable(CAP_NET_ADMIN))
9919 return -EPERM;
9920
c18487ee
YR
9921 if (!netif_running(dev))
9922 return -EAGAIN;
9923
34f80b04
EG
9924 mutex_lock(&bp->port.phy_mutex);
9925 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9926 DEFAULT_PHY_DEV_ADDR,
9927 (data->reg_num & 0x1f), data->val_in);
34f80b04 9928 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9929 return err;
9930
9931 default:
9932 /* do nothing */
9933 break;
9934 }
9935
9936 return -EOPNOTSUPP;
9937}
9938
34f80b04 9939/* called with rtnl_lock */
a2fbb9ea
ET
9940static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9941{
9942 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9943 int rc = 0;
a2fbb9ea
ET
9944
9945 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9946 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9947 return -EINVAL;
9948
9949 /* This does not race with packet allocation
c14423fe 9950 * because the actual alloc size is
a2fbb9ea
ET
9951 * only updated as part of load
9952 */
9953 dev->mtu = new_mtu;
9954
9955 if (netif_running(dev)) {
34f80b04
EG
9956 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9957 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9958 }
34f80b04
EG
9959
9960 return rc;
a2fbb9ea
ET
9961}
9962
9963static void bnx2x_tx_timeout(struct net_device *dev)
9964{
9965 struct bnx2x *bp = netdev_priv(dev);
9966
9967#ifdef BNX2X_STOP_ON_ERROR
9968 if (!bp->panic)
9969 bnx2x_panic();
9970#endif
9971 /* This allows the netif to be shutdown gracefully before resetting */
9972 schedule_work(&bp->reset_task);
9973}
9974
9975#ifdef BCM_VLAN
34f80b04 9976/* called with rtnl_lock */
a2fbb9ea
ET
9977static void bnx2x_vlan_rx_register(struct net_device *dev,
9978 struct vlan_group *vlgrp)
9979{
9980 struct bnx2x *bp = netdev_priv(dev);
9981
9982 bp->vlgrp = vlgrp;
9983 if (netif_running(dev))
49d66772 9984 bnx2x_set_client_config(bp);
a2fbb9ea 9985}
34f80b04 9986
a2fbb9ea
ET
9987#endif
9988
9989#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9990static void poll_bnx2x(struct net_device *dev)
9991{
9992 struct bnx2x *bp = netdev_priv(dev);
9993
9994 disable_irq(bp->pdev->irq);
9995 bnx2x_interrupt(bp->pdev->irq, dev);
9996 enable_irq(bp->pdev->irq);
9997}
9998#endif
9999
34f80b04
EG
10000static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10001 struct net_device *dev)
a2fbb9ea
ET
10002{
10003 struct bnx2x *bp;
10004 int rc;
10005
10006 SET_NETDEV_DEV(dev, &pdev->dev);
10007 bp = netdev_priv(dev);
10008
34f80b04
EG
10009 bp->dev = dev;
10010 bp->pdev = pdev;
a2fbb9ea 10011 bp->flags = 0;
34f80b04 10012 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10013
10014 rc = pci_enable_device(pdev);
10015 if (rc) {
10016 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10017 goto err_out;
10018 }
10019
10020 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10021 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10022 " aborting\n");
10023 rc = -ENODEV;
10024 goto err_out_disable;
10025 }
10026
10027 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10028 printk(KERN_ERR PFX "Cannot find second PCI device"
10029 " base address, aborting\n");
10030 rc = -ENODEV;
10031 goto err_out_disable;
10032 }
10033
34f80b04
EG
10034 if (atomic_read(&pdev->enable_cnt) == 1) {
10035 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10036 if (rc) {
10037 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10038 " aborting\n");
10039 goto err_out_disable;
10040 }
a2fbb9ea 10041
34f80b04
EG
10042 pci_set_master(pdev);
10043 pci_save_state(pdev);
10044 }
a2fbb9ea
ET
10045
10046 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10047 if (bp->pm_cap == 0) {
10048 printk(KERN_ERR PFX "Cannot find power management"
10049 " capability, aborting\n");
10050 rc = -EIO;
10051 goto err_out_release;
10052 }
10053
10054 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10055 if (bp->pcie_cap == 0) {
10056 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10057 " aborting\n");
10058 rc = -EIO;
10059 goto err_out_release;
10060 }
10061
10062 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10063 bp->flags |= USING_DAC_FLAG;
10064 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10065 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10066 " failed, aborting\n");
10067 rc = -EIO;
10068 goto err_out_release;
10069 }
10070
10071 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10072 printk(KERN_ERR PFX "System does not support DMA,"
10073 " aborting\n");
10074 rc = -EIO;
10075 goto err_out_release;
10076 }
10077
34f80b04
EG
10078 dev->mem_start = pci_resource_start(pdev, 0);
10079 dev->base_addr = dev->mem_start;
10080 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10081
10082 dev->irq = pdev->irq;
10083
10084 bp->regview = ioremap_nocache(dev->base_addr,
10085 pci_resource_len(pdev, 0));
10086 if (!bp->regview) {
10087 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10088 rc = -ENOMEM;
10089 goto err_out_release;
10090 }
10091
34f80b04
EG
10092 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10093 min_t(u64, BNX2X_DB_SIZE,
10094 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10095 if (!bp->doorbells) {
10096 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10097 rc = -ENOMEM;
10098 goto err_out_unmap;
10099 }
10100
10101 bnx2x_set_power_state(bp, PCI_D0);
10102
34f80b04
EG
10103 /* clean indirect addresses */
10104 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10105 PCICFG_VENDOR_ID_OFFSET);
10106 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10107 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10108 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10109 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10110
34f80b04
EG
10111 dev->hard_start_xmit = bnx2x_start_xmit;
10112 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10113
34f80b04
EG
10114 dev->ethtool_ops = &bnx2x_ethtool_ops;
10115 dev->open = bnx2x_open;
10116 dev->stop = bnx2x_close;
10117 dev->set_multicast_list = bnx2x_set_rx_mode;
10118 dev->set_mac_address = bnx2x_change_mac_addr;
10119 dev->do_ioctl = bnx2x_ioctl;
10120 dev->change_mtu = bnx2x_change_mtu;
10121 dev->tx_timeout = bnx2x_tx_timeout;
10122#ifdef BCM_VLAN
10123 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10124#endif
10125#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10126 dev->poll_controller = poll_bnx2x;
10127#endif
10128 dev->features |= NETIF_F_SG;
10129 dev->features |= NETIF_F_HW_CSUM;
10130 if (bp->flags & USING_DAC_FLAG)
10131 dev->features |= NETIF_F_HIGHDMA;
10132#ifdef BCM_VLAN
10133 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10134#endif
10135 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10136 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10137
10138 return 0;
10139
10140err_out_unmap:
10141 if (bp->regview) {
10142 iounmap(bp->regview);
10143 bp->regview = NULL;
10144 }
a2fbb9ea
ET
10145 if (bp->doorbells) {
10146 iounmap(bp->doorbells);
10147 bp->doorbells = NULL;
10148 }
10149
10150err_out_release:
34f80b04
EG
10151 if (atomic_read(&pdev->enable_cnt) == 1)
10152 pci_release_regions(pdev);
a2fbb9ea
ET
10153
10154err_out_disable:
10155 pci_disable_device(pdev);
10156 pci_set_drvdata(pdev, NULL);
10157
10158err_out:
10159 return rc;
10160}
10161
25047950
ET
10162static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10163{
10164 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10165
10166 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10167 return val;
10168}
10169
10170/* return value of 1=2.5GHz 2=5GHz */
10171static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10172{
10173 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10174
10175 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10176 return val;
10177}
10178
a2fbb9ea
ET
10179static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10180 const struct pci_device_id *ent)
10181{
10182 static int version_printed;
10183 struct net_device *dev = NULL;
10184 struct bnx2x *bp;
25047950 10185 int rc;
25047950 10186 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10187
10188 if (version_printed++ == 0)
10189 printk(KERN_INFO "%s", version);
10190
10191 /* dev zeroed in init_etherdev */
10192 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10193 if (!dev) {
10194 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10195 return -ENOMEM;
34f80b04 10196 }
a2fbb9ea
ET
10197
10198 netif_carrier_off(dev);
10199
10200 bp = netdev_priv(dev);
10201 bp->msglevel = debug;
10202
34f80b04 10203 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10204 if (rc < 0) {
10205 free_netdev(dev);
10206 return rc;
10207 }
10208
a2fbb9ea
ET
10209 rc = register_netdev(dev);
10210 if (rc) {
c14423fe 10211 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10212 goto init_one_exit;
a2fbb9ea
ET
10213 }
10214
10215 pci_set_drvdata(pdev, dev);
10216
34f80b04
EG
10217 rc = bnx2x_init_bp(bp);
10218 if (rc) {
10219 unregister_netdev(dev);
10220 goto init_one_exit;
10221 }
10222
10223 bp->common.name = board_info[ent->driver_data].name;
25047950 10224 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10225 " IRQ %d, ", dev->name, bp->common.name,
10226 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10227 bnx2x_get_pcie_width(bp),
10228 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10229 dev->base_addr, bp->pdev->irq);
10230 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10231 return 0;
34f80b04
EG
10232
10233init_one_exit:
10234 if (bp->regview)
10235 iounmap(bp->regview);
10236
10237 if (bp->doorbells)
10238 iounmap(bp->doorbells);
10239
10240 free_netdev(dev);
10241
10242 if (atomic_read(&pdev->enable_cnt) == 1)
10243 pci_release_regions(pdev);
10244
10245 pci_disable_device(pdev);
10246 pci_set_drvdata(pdev, NULL);
10247
10248 return rc;
a2fbb9ea
ET
10249}
10250
10251static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10252{
10253 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10254 struct bnx2x *bp;
10255
10256 if (!dev) {
228241eb
ET
10257 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10258 return;
10259 }
228241eb 10260 bp = netdev_priv(dev);
a2fbb9ea 10261
a2fbb9ea
ET
10262 unregister_netdev(dev);
10263
10264 if (bp->regview)
10265 iounmap(bp->regview);
10266
10267 if (bp->doorbells)
10268 iounmap(bp->doorbells);
10269
10270 free_netdev(dev);
34f80b04
EG
10271
10272 if (atomic_read(&pdev->enable_cnt) == 1)
10273 pci_release_regions(pdev);
10274
a2fbb9ea
ET
10275 pci_disable_device(pdev);
10276 pci_set_drvdata(pdev, NULL);
10277}
10278
10279static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10280{
10281 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10282 struct bnx2x *bp;
10283
34f80b04
EG
10284 if (!dev) {
10285 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10286 return -ENODEV;
10287 }
10288 bp = netdev_priv(dev);
a2fbb9ea 10289
34f80b04 10290 rtnl_lock();
a2fbb9ea 10291
34f80b04 10292 pci_save_state(pdev);
228241eb 10293
34f80b04
EG
10294 if (!netif_running(dev)) {
10295 rtnl_unlock();
10296 return 0;
10297 }
a2fbb9ea
ET
10298
10299 netif_device_detach(dev);
a2fbb9ea 10300
da5a662a 10301 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10302
a2fbb9ea 10303 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10304
34f80b04
EG
10305 rtnl_unlock();
10306
a2fbb9ea
ET
10307 return 0;
10308}
10309
10310static int bnx2x_resume(struct pci_dev *pdev)
10311{
10312 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10313 struct bnx2x *bp;
a2fbb9ea
ET
10314 int rc;
10315
228241eb
ET
10316 if (!dev) {
10317 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10318 return -ENODEV;
10319 }
228241eb 10320 bp = netdev_priv(dev);
a2fbb9ea 10321
34f80b04
EG
10322 rtnl_lock();
10323
228241eb 10324 pci_restore_state(pdev);
34f80b04
EG
10325
10326 if (!netif_running(dev)) {
10327 rtnl_unlock();
10328 return 0;
10329 }
10330
a2fbb9ea
ET
10331 bnx2x_set_power_state(bp, PCI_D0);
10332 netif_device_attach(dev);
10333
da5a662a 10334 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10335
34f80b04
EG
10336 rtnl_unlock();
10337
10338 return rc;
a2fbb9ea
ET
10339}
10340
493adb1f
WX
10341/**
10342 * bnx2x_io_error_detected - called when PCI error is detected
10343 * @pdev: Pointer to PCI device
10344 * @state: The current pci connection state
10345 *
10346 * This function is called after a PCI bus error affecting
10347 * this device has been detected.
10348 */
10349static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10350 pci_channel_state_t state)
10351{
10352 struct net_device *dev = pci_get_drvdata(pdev);
10353 struct bnx2x *bp = netdev_priv(dev);
10354
10355 rtnl_lock();
10356
10357 netif_device_detach(dev);
10358
10359 if (netif_running(dev))
10360 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10361
10362 pci_disable_device(pdev);
10363
10364 rtnl_unlock();
10365
10366 /* Request a slot reset */
10367 return PCI_ERS_RESULT_NEED_RESET;
10368}
10369
10370/**
10371 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10372 * @pdev: Pointer to PCI device
10373 *
10374 * Restart the card from scratch, as if from a cold-boot.
10375 */
10376static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10377{
10378 struct net_device *dev = pci_get_drvdata(pdev);
10379 struct bnx2x *bp = netdev_priv(dev);
10380
10381 rtnl_lock();
10382
10383 if (pci_enable_device(pdev)) {
10384 dev_err(&pdev->dev,
10385 "Cannot re-enable PCI device after reset\n");
10386 rtnl_unlock();
10387 return PCI_ERS_RESULT_DISCONNECT;
10388 }
10389
10390 pci_set_master(pdev);
10391 pci_restore_state(pdev);
10392
10393 if (netif_running(dev))
10394 bnx2x_set_power_state(bp, PCI_D0);
10395
10396 rtnl_unlock();
10397
10398 return PCI_ERS_RESULT_RECOVERED;
10399}
10400
10401/**
10402 * bnx2x_io_resume - called when traffic can start flowing again
10403 * @pdev: Pointer to PCI device
10404 *
10405 * This callback is called when the error recovery driver tells us that
10406 * its OK to resume normal operation.
10407 */
10408static void bnx2x_io_resume(struct pci_dev *pdev)
10409{
10410 struct net_device *dev = pci_get_drvdata(pdev);
10411 struct bnx2x *bp = netdev_priv(dev);
10412
10413 rtnl_lock();
10414
10415 if (netif_running(dev))
10416 bnx2x_nic_load(bp, LOAD_OPEN);
10417
10418 netif_device_attach(dev);
10419
10420 rtnl_unlock();
10421}
10422
10423static struct pci_error_handlers bnx2x_err_handler = {
10424 .error_detected = bnx2x_io_error_detected,
10425 .slot_reset = bnx2x_io_slot_reset,
10426 .resume = bnx2x_io_resume,
10427};
10428
a2fbb9ea 10429static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10430 .name = DRV_MODULE_NAME,
10431 .id_table = bnx2x_pci_tbl,
10432 .probe = bnx2x_init_one,
10433 .remove = __devexit_p(bnx2x_remove_one),
10434 .suspend = bnx2x_suspend,
10435 .resume = bnx2x_resume,
10436 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10437};
10438
10439static int __init bnx2x_init(void)
10440{
10441 return pci_register_driver(&bnx2x_pci_driver);
10442}
10443
10444static void __exit bnx2x_cleanup(void)
10445{
10446 pci_unregister_driver(&bnx2x_pci_driver);
10447}
10448
10449module_init(bnx2x_init);
10450module_exit(bnx2x_cleanup);
10451