]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Update vlan_features
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
56ed4351
VZ
59#define DRV_MODULE_VERSION "1.48.105-1"
60#define DRV_MODULE_RELDATE "2009/04/22"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
2059aba7 83MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
555f6c78 84
19680c48 85static int disable_tpa;
19680c48 86module_param(disable_tpa, int, 0);
9898f86d 87MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
88
89static int int_mode;
90module_param(int_mode, int, 0);
91MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
9898f86d 93static int poll;
a2fbb9ea 94module_param(poll, int, 0);
9898f86d 95MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
96
97static int mrrs = -1;
98module_param(mrrs, int, 0);
99MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
9898f86d 101static int debug;
a2fbb9ea 102module_param(debug, int, 0);
9898f86d
EG
103MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 106
1cf167f2 107static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
108
109enum bnx2x_board_type {
110 BCM57710 = 0,
34f80b04
EG
111 BCM57711 = 1,
112 BCM57711E = 2,
a2fbb9ea
ET
113};
114
34f80b04 115/* indexed by board_type, above */
53a10565 116static struct {
a2fbb9ea
ET
117 char *name;
118} board_info[] __devinitdata = {
34f80b04
EG
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
122};
123
34f80b04 124
a2fbb9ea
ET
125static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
132 { 0 }
133};
134
135MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137/****************************************************************************
138* General service functions
139****************************************************************************/
140
141/* used only at init
142 * locking is done by mcp
143 */
144static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145{
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150}
151
a2fbb9ea
ET
152static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153{
154 u32 val;
155
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
160
161 return val;
162}
a2fbb9ea
ET
163
164static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169};
170
171/* copy command into DMAE command memory and set DMAE command go */
172static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173 int idx)
174{
175 u32 cmd_offset;
176 int i;
177
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
ad8d3948
EG
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
184 }
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
186}
187
ad8d3948
EG
188void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189 u32 len32)
a2fbb9ea 190{
ad8d3948 191 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
193 int cnt = 200;
194
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201 return;
202 }
203
204 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
205
206 memset(dmae, 0, sizeof(struct dmae_command));
207
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211#ifdef __BIG_ENDIAN
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
213#else
214 DMAE_CMD_ENDIANITY_DW_SWAP |
215#endif
34f80b04
EG
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
222 dmae->len = len32;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 225 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 226
c3eefaf6 227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
237
238 *wb_comp = 0;
239
34f80b04 240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
241
242 udelay(5);
ad8d3948
EG
243
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
ad8d3948 247 if (!cnt) {
c3eefaf6 248 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
249 break;
250 }
ad8d3948 251 cnt--;
12469401
YG
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
254 msleep(100);
255 else
256 udelay(5);
a2fbb9ea 257 }
ad8d3948
EG
258
259 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
260}
261
c18487ee 262void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 263{
ad8d3948 264 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
266 int cnt = 200;
267
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
270 int i;
271
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276 return;
277 }
278
279 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
280
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
283
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287#ifdef __BIG_ENDIAN
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
289#else
290 DMAE_CMD_ENDIANITY_DW_SWAP |
291#endif
34f80b04
EG
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298 dmae->len = len32;
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 301 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 302
c3eefaf6 303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
310
311 *wb_comp = 0;
312
34f80b04 313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
314
315 udelay(5);
ad8d3948
EG
316
317 while (*wb_comp != DMAE_COMP_VAL) {
318
ad8d3948 319 if (!cnt) {
c3eefaf6 320 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
321 break;
322 }
ad8d3948 323 cnt--;
12469401
YG
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
326 msleep(100);
327 else
328 udelay(5);
a2fbb9ea 329 }
ad8d3948 330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
333
334 mutex_unlock(&bp->dmae_mutex);
335}
336
337/* used only for slowpath so not inlined */
338static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339{
340 u32 wb_write[2];
341
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 345}
a2fbb9ea 346
ad8d3948
EG
347#ifdef USE_WB_RD
348static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349{
350 u32 wb_data[2];
351
352 REG_RD_DMAE(bp, reg, wb_data, 2);
353
354 return HILO_U64(wb_data[0], wb_data[1]);
355}
356#endif
357
a2fbb9ea
ET
358static int bnx2x_mc_assert(struct bnx2x *bp)
359{
a2fbb9ea 360 char last_idx;
34f80b04
EG
361 int i, rc = 0;
362 u32 row0, row1, row2, row3;
363
364 /* XSTORM */
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
367 if (last_idx)
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
386 rc++;
387 } else {
388 break;
389 }
390 }
391
392 /* TSTORM */
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
395 if (last_idx)
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
414 rc++;
415 } else {
416 break;
417 }
418 }
419
420 /* CSTORM */
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
423 if (last_idx)
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
442 rc++;
443 } else {
444 break;
445 }
446 }
447
448 /* USTORM */
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
451 if (last_idx)
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
470 rc++;
471 } else {
472 break;
a2fbb9ea
ET
473 }
474 }
34f80b04 475
a2fbb9ea
ET
476 return rc;
477}
c14423fe 478
a2fbb9ea
ET
479static void bnx2x_fw_dump(struct bnx2x *bp)
480{
481 u32 mark, offset;
4781bfad 482 __be32 data[9];
a2fbb9ea
ET
483 int word;
484
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
488
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499 offset + 4*word));
500 data[8] = 0x0;
49d66772 501 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
502 }
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
504}
505
506static void bnx2x_panic_dump(struct bnx2x *bp)
507{
508 int i;
509 u16 j, start, end;
510
66e855f3
YG
511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
a2fbb9ea
ET
514 BNX2X_ERR("begin crash dump -----------------\n");
515
8440d2b6
EG
516 /* Indices */
517 /* Common */
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524 /* Rx */
525 for_each_rx_queue(bp, i) {
a2fbb9ea 526 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 527
c3eefaf6 528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 531 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
539 }
a2fbb9ea 540
8440d2b6
EG
541 /* Tx */
542 for_each_tx_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 545
c3eefaf6 546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
8440d2b6
EG
551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552 fp->status_blk->c_status_block.status_block_index,
553 hw_prods->packets_prod, hw_prods->bds_prod);
554 }
a2fbb9ea 555
8440d2b6
EG
556 /* Rings */
557 /* Rx */
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
560
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 563 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
c3eefaf6
EG
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
569 }
570
3196a88a
EG
571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
8440d2b6 573 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
c3eefaf6
EG
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
579 }
580
a2fbb9ea
ET
581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
c3eefaf6
EG
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
588 }
589 }
590
8440d2b6
EG
591 /* Tx */
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
c3eefaf6
EG
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
602 }
603
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
611 }
612 }
a2fbb9ea 613
34f80b04 614 bnx2x_fw_dump(bp);
a2fbb9ea
ET
615 bnx2x_mc_assert(bp);
616 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
617}
618
615f8fd9 619static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 620{
34f80b04 621 int port = BP_PORT(bp);
a2fbb9ea
ET
622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
626
627 if (msix) {
8badd27a
EG
628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
632 } else if (msi) {
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
637 } else {
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 642
8badd27a
EG
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
615f8fd9
ET
645
646 REG_WR(bp, addr, val);
647
a2fbb9ea
ET
648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649 }
650
8badd27a
EG
651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
653
654 REG_WR(bp, addr, val);
37dbbf32
EG
655 /*
656 * Ensure that HC_CONFIG is written before leading/trailing edge config
657 */
658 mmiowb();
659 barrier();
34f80b04
EG
660
661 if (CHIP_IS_E1H(bp)) {
662 /* init leading/trailing edge */
663 if (IS_E1HMF(bp)) {
8badd27a 664 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 665 if (bp->port.pmf)
4acac6a5
EG
666 /* enable nig and gpio3 attention */
667 val |= 0x1100;
34f80b04
EG
668 } else
669 val = 0xffff;
670
671 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
672 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
673 }
37dbbf32
EG
674
675 /* Make sure that interrupts are indeed enabled from here on */
676 mmiowb();
a2fbb9ea
ET
677}
678
615f8fd9 679static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 680{
34f80b04 681 int port = BP_PORT(bp);
a2fbb9ea
ET
682 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
683 u32 val = REG_RD(bp, addr);
684
685 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
686 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
687 HC_CONFIG_0_REG_INT_LINE_EN_0 |
688 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
689
690 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
691 val, port, addr);
692
8badd27a
EG
693 /* flush all outstanding writes */
694 mmiowb();
695
a2fbb9ea
ET
696 REG_WR(bp, addr, val);
697 if (REG_RD(bp, addr) != val)
698 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 699
a2fbb9ea
ET
700}
701
f8ef6e44 702static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 703{
a2fbb9ea 704 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 705 int i, offset;
a2fbb9ea 706
34f80b04 707 /* disable interrupt handling */
a2fbb9ea 708 atomic_inc(&bp->intr_sem);
e1510706
EG
709 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
710
f8ef6e44
YG
711 if (disable_hw)
712 /* prevent the HW from sending interrupts */
713 bnx2x_int_disable(bp);
a2fbb9ea
ET
714
715 /* make sure all ISRs are done */
716 if (msix) {
8badd27a
EG
717 synchronize_irq(bp->msix_table[0].vector);
718 offset = 1;
a2fbb9ea 719 for_each_queue(bp, i)
8badd27a 720 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
721 } else
722 synchronize_irq(bp->pdev->irq);
723
724 /* make sure sp_task is not running */
1cf167f2
EG
725 cancel_delayed_work(&bp->sp_task);
726 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
727}
728
34f80b04 729/* fast path */
a2fbb9ea
ET
730
731/*
34f80b04 732 * General service functions
a2fbb9ea
ET
733 */
734
34f80b04 735static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
736 u8 storm, u16 index, u8 op, u8 update)
737{
5c862848
EG
738 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
739 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
740 struct igu_ack_register igu_ack;
741
742 igu_ack.status_block_index = index;
743 igu_ack.sb_id_and_flags =
34f80b04 744 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
745 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
746 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
747 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
748
5c862848
EG
749 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
750 (*(u32 *)&igu_ack), hc_addr);
751 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
752
753 /* Make sure that ACK is written */
754 mmiowb();
755 barrier();
a2fbb9ea
ET
756}
757
758static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
759{
760 struct host_status_block *fpsb = fp->status_blk;
761 u16 rc = 0;
762
763 barrier(); /* status block is written to by the chip */
764 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
765 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
766 rc |= 1;
767 }
768 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
769 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
770 rc |= 2;
771 }
772 return rc;
773}
774
a2fbb9ea
ET
775static u16 bnx2x_ack_int(struct bnx2x *bp)
776{
5c862848
EG
777 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
778 COMMAND_REG_SIMD_MASK);
779 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 780
5c862848
EG
781 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
782 result, hc_addr);
a2fbb9ea 783
a2fbb9ea
ET
784 return result;
785}
786
787
788/*
789 * fast path service functions
790 */
791
237907c1
EG
792static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
793{
794 u16 tx_cons_sb;
795
796 /* Tell compiler that status block fields can change */
797 barrier();
798 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
799 return (fp->tx_pkt_cons != tx_cons_sb);
800}
801
802static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
803{
804 /* Tell compiler that consumer and producer can change */
805 barrier();
806 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
807}
808
a2fbb9ea
ET
809/* free skb in the packet ring at pos idx
810 * return idx of last bd freed
811 */
812static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
813 u16 idx)
814{
815 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
816 struct eth_tx_bd *tx_bd;
817 struct sk_buff *skb = tx_buf->skb;
34f80b04 818 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
819 int nbd;
820
821 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
822 idx, tx_buf, skb);
823
824 /* unmap first bd */
825 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
828 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
829
830 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 831 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
832#ifdef BNX2X_STOP_ON_ERROR
833 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 834 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
835 bnx2x_panic();
836 }
837#endif
838
839 /* Skip a parse bd and the TSO split header bd
840 since they have no mapping */
841 if (nbd)
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
845 ETH_TX_BD_FLAGS_TCP_CSUM |
846 ETH_TX_BD_FLAGS_SW_LSO)) {
847 if (--nbd)
848 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
849 tx_bd = &fp->tx_desc_ring[bd_idx];
850 /* is this a TSO split header bd? */
851 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
852 if (--nbd)
853 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
854 }
855 }
856
857 /* now free frags */
858 while (nbd > 0) {
859
860 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
861 tx_bd = &fp->tx_desc_ring[bd_idx];
862 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
863 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
864 if (--nbd)
865 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866 }
867
868 /* release skb */
53e5e96e 869 WARN_ON(!skb);
a2fbb9ea
ET
870 dev_kfree_skb(skb);
871 tx_buf->first_bd = 0;
872 tx_buf->skb = NULL;
873
34f80b04 874 return new_cons;
a2fbb9ea
ET
875}
876
34f80b04 877static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 878{
34f80b04
EG
879 s16 used;
880 u16 prod;
881 u16 cons;
a2fbb9ea 882
34f80b04 883 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
884 prod = fp->tx_bd_prod;
885 cons = fp->tx_bd_cons;
886
34f80b04
EG
887 /* NUM_TX_RINGS = number of "next-page" entries
888 It will be used as a threshold */
889 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 890
34f80b04 891#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
892 WARN_ON(used < 0);
893 WARN_ON(used > fp->bp->tx_ring_size);
894 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 895#endif
a2fbb9ea 896
34f80b04 897 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
898}
899
7961f791 900static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
901{
902 struct bnx2x *bp = fp->bp;
555f6c78 903 struct netdev_queue *txq;
a2fbb9ea
ET
904 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
905 int done = 0;
906
907#ifdef BNX2X_STOP_ON_ERROR
908 if (unlikely(bp->panic))
909 return;
910#endif
911
555f6c78 912 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
913 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
914 sw_cons = fp->tx_pkt_cons;
915
916 while (sw_cons != hw_cons) {
917 u16 pkt_cons;
918
919 pkt_cons = TX_BD(sw_cons);
920
921 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
922
34f80b04 923 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
924 hw_cons, sw_cons, pkt_cons);
925
34f80b04 926/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
927 rmb();
928 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
929 }
930*/
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
932 sw_cons++;
933 done++;
a2fbb9ea
ET
934 }
935
936 fp->tx_pkt_cons = sw_cons;
937 fp->tx_bd_cons = bd_cons;
938
a2fbb9ea 939 /* TBD need a thresh? */
555f6c78 940 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 941
555f6c78 942 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 943
6044735d
EG
944 /* Need to make the tx_bd_cons update visible to start_xmit()
945 * before checking for netif_tx_queue_stopped(). Without the
946 * memory barrier, there is a small possibility that
947 * start_xmit() will miss it and cause the queue to be stopped
948 * forever.
949 */
950 smp_mb();
951
555f6c78 952 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 953 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 954 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 955 netif_tx_wake_queue(txq);
a2fbb9ea 956
555f6c78 957 __netif_tx_unlock(txq);
a2fbb9ea
ET
958 }
959}
960
3196a88a 961
a2fbb9ea
ET
962static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
963 union eth_rx_cqe *rr_cqe)
964{
965 struct bnx2x *bp = fp->bp;
966 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
967 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
968
34f80b04 969 DP(BNX2X_MSG_SP,
a2fbb9ea 970 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 971 fp->index, cid, command, bp->state,
34f80b04 972 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
973
974 bp->spq_left++;
975
0626b899 976 if (fp->index) {
a2fbb9ea
ET
977 switch (command | fp->state) {
978 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
979 BNX2X_FP_STATE_OPENING):
980 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
981 cid);
982 fp->state = BNX2X_FP_STATE_OPEN;
983 break;
984
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
986 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
987 cid);
988 fp->state = BNX2X_FP_STATE_HALTED;
989 break;
990
991 default:
34f80b04
EG
992 BNX2X_ERR("unexpected MC reply (%d) "
993 "fp->state is %x\n", command, fp->state);
994 break;
a2fbb9ea 995 }
34f80b04 996 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
997 return;
998 }
c14423fe 999
a2fbb9ea
ET
1000 switch (command | bp->state) {
1001 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1002 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1003 bp->state = BNX2X_STATE_OPEN;
1004 break;
1005
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1007 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1008 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1009 fp->state = BNX2X_FP_STATE_HALTED;
1010 break;
1011
a2fbb9ea 1012 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1013 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1014 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1015 break;
1016
3196a88a 1017
a2fbb9ea 1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1019 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1020 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1021 bp->set_mac_pending = 0;
a2fbb9ea
ET
1022 break;
1023
49d66772 1024 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1025 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1026 break;
1027
a2fbb9ea 1028 default:
34f80b04 1029 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1030 command, bp->state);
34f80b04 1031 break;
a2fbb9ea 1032 }
34f80b04 1033 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1034}
1035
7a9b2557
VZ
1036static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1037 struct bnx2x_fastpath *fp, u16 index)
1038{
1039 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1040 struct page *page = sw_buf->page;
1041 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1042
1043 /* Skip "next page" elements */
1044 if (!page)
1045 return;
1046
1047 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1048 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1049 __free_pages(page, PAGES_PER_SGE_SHIFT);
1050
1051 sw_buf->page = NULL;
1052 sge->addr_hi = 0;
1053 sge->addr_lo = 0;
1054}
1055
1056static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1057 struct bnx2x_fastpath *fp, int last)
1058{
1059 int i;
1060
1061 for (i = 0; i < last; i++)
1062 bnx2x_free_rx_sge(bp, fp, i);
1063}
1064
1065static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1066 struct bnx2x_fastpath *fp, u16 index)
1067{
1068 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1071 dma_addr_t mapping;
1072
1073 if (unlikely(page == NULL))
1074 return -ENOMEM;
1075
4f40f2cb 1076 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1077 PCI_DMA_FROMDEVICE);
8d8bb39b 1078 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080 return -ENOMEM;
1081 }
1082
1083 sw_buf->page = page;
1084 pci_unmap_addr_set(sw_buf, mapping, mapping);
1085
1086 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1087 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1088
1089 return 0;
1090}
1091
a2fbb9ea
ET
1092static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1093 struct bnx2x_fastpath *fp, u16 index)
1094{
1095 struct sk_buff *skb;
1096 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1097 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1098 dma_addr_t mapping;
1099
1100 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1101 if (unlikely(skb == NULL))
1102 return -ENOMEM;
1103
437cf2f1 1104 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1105 PCI_DMA_FROMDEVICE);
8d8bb39b 1106 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1107 dev_kfree_skb(skb);
1108 return -ENOMEM;
1109 }
1110
1111 rx_buf->skb = skb;
1112 pci_unmap_addr_set(rx_buf, mapping, mapping);
1113
1114 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1115 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1116
1117 return 0;
1118}
1119
1120/* note that we are not allocating a new skb,
1121 * we are just moving one from cons to prod
1122 * we are not creating a new mapping,
1123 * so there is no need to check for dma_mapping_error().
1124 */
1125static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1126 struct sk_buff *skb, u16 cons, u16 prod)
1127{
1128 struct bnx2x *bp = fp->bp;
1129 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1130 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1131 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1132 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1133
1134 pci_dma_sync_single_for_device(bp->pdev,
1135 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1136 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1137
1138 prod_rx_buf->skb = cons_rx_buf->skb;
1139 pci_unmap_addr_set(prod_rx_buf, mapping,
1140 pci_unmap_addr(cons_rx_buf, mapping));
1141 *prod_bd = *cons_bd;
1142}
1143
7a9b2557
VZ
1144static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1145 u16 idx)
1146{
1147 u16 last_max = fp->last_max_sge;
1148
1149 if (SUB_S16(idx, last_max) > 0)
1150 fp->last_max_sge = idx;
1151}
1152
1153static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1154{
1155 int i, j;
1156
1157 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1158 int idx = RX_SGE_CNT * i - 1;
1159
1160 for (j = 0; j < 2; j++) {
1161 SGE_MASK_CLEAR_BIT(fp, idx);
1162 idx--;
1163 }
1164 }
1165}
1166
1167static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1168 struct eth_fast_path_rx_cqe *fp_cqe)
1169{
1170 struct bnx2x *bp = fp->bp;
4f40f2cb 1171 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1172 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1173 SGE_PAGE_SHIFT;
7a9b2557
VZ
1174 u16 last_max, last_elem, first_elem;
1175 u16 delta = 0;
1176 u16 i;
1177
1178 if (!sge_len)
1179 return;
1180
1181 /* First mark all used pages */
1182 for (i = 0; i < sge_len; i++)
1183 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1184
1185 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1186 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1187
1188 /* Here we assume that the last SGE index is the biggest */
1189 prefetch((void *)(fp->sge_mask));
1190 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1191
1192 last_max = RX_SGE(fp->last_max_sge);
1193 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1194 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1195
1196 /* If ring is not full */
1197 if (last_elem + 1 != first_elem)
1198 last_elem++;
1199
1200 /* Now update the prod */
1201 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1202 if (likely(fp->sge_mask[i]))
1203 break;
1204
1205 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1206 delta += RX_SGE_MASK_ELEM_SZ;
1207 }
1208
1209 if (delta > 0) {
1210 fp->rx_sge_prod += delta;
1211 /* clear page-end entries */
1212 bnx2x_clear_sge_mask_next_elems(fp);
1213 }
1214
1215 DP(NETIF_MSG_RX_STATUS,
1216 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1217 fp->last_max_sge, fp->rx_sge_prod);
1218}
1219
1220static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1221{
1222 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1223 memset(fp->sge_mask, 0xff,
1224 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1225
33471629
EG
1226 /* Clear the two last indices in the page to 1:
1227 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1228 hence will never be indicated and should be removed from
1229 the calculations. */
1230 bnx2x_clear_sge_mask_next_elems(fp);
1231}
1232
1233static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1234 struct sk_buff *skb, u16 cons, u16 prod)
1235{
1236 struct bnx2x *bp = fp->bp;
1237 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1238 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1239 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1240 dma_addr_t mapping;
1241
1242 /* move empty skb from pool to prod and map it */
1243 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1244 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1245 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1246 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1247
1248 /* move partial skb from cons to pool (don't unmap yet) */
1249 fp->tpa_pool[queue] = *cons_rx_buf;
1250
1251 /* mark bin state as start - print error if current state != stop */
1252 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1253 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1254
1255 fp->tpa_state[queue] = BNX2X_TPA_START;
1256
1257 /* point prod_bd to new skb */
1258 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1259 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1260
1261#ifdef BNX2X_STOP_ON_ERROR
1262 fp->tpa_queue_used |= (1 << queue);
1263#ifdef __powerpc64__
1264 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1265#else
1266 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1267#endif
1268 fp->tpa_queue_used);
1269#endif
1270}
1271
1272static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273 struct sk_buff *skb,
1274 struct eth_fast_path_rx_cqe *fp_cqe,
1275 u16 cqe_idx)
1276{
1277 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1278 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1279 u32 i, frag_len, frag_size, pages;
1280 int err;
1281 int j;
1282
1283 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1284 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1285
1286 /* This is needed in order to enable forwarding support */
1287 if (frag_size)
4f40f2cb 1288 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1289 max(frag_size, (u32)len_on_bd));
1290
1291#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1292 if (pages >
1293 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1294 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1295 pages, cqe_idx);
1296 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1297 fp_cqe->pkt_len, len_on_bd);
1298 bnx2x_panic();
1299 return -EINVAL;
1300 }
1301#endif
1302
1303 /* Run through the SGL and compose the fragmented skb */
1304 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1305 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1306
1307 /* FW gives the indices of the SGE as if the ring is an array
1308 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1309 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1310 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1311 old_rx_pg = *rx_pg;
1312
1313 /* If we fail to allocate a substitute page, we simply stop
1314 where we are and drop the whole packet */
1315 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1316 if (unlikely(err)) {
de832a55 1317 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1318 return err;
1319 }
1320
1321 /* Unmap the page as we r going to pass it to the stack */
1322 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1323 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1324
1325 /* Add one frag and update the appropriate fields in the skb */
1326 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1327
1328 skb->data_len += frag_len;
1329 skb->truesize += frag_len;
1330 skb->len += frag_len;
1331
1332 frag_size -= frag_len;
1333 }
1334
1335 return 0;
1336}
1337
1338static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1339 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1340 u16 cqe_idx)
1341{
1342 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1343 struct sk_buff *skb = rx_buf->skb;
1344 /* alloc new skb */
1345 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1346
1347 /* Unmap skb in the pool anyway, as we are going to change
1348 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1349 fails. */
1350 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1351 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1352
7a9b2557 1353 if (likely(new_skb)) {
66e855f3
YG
1354 /* fix ip xsum and give it to the stack */
1355 /* (no need to map the new skb) */
0c6671b0
EG
1356#ifdef BCM_VLAN
1357 int is_vlan_cqe =
1358 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1359 PARSING_FLAGS_VLAN);
1360 int is_not_hwaccel_vlan_cqe =
1361 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1362#endif
7a9b2557
VZ
1363
1364 prefetch(skb);
1365 prefetch(((char *)(skb)) + 128);
1366
7a9b2557
VZ
1367#ifdef BNX2X_STOP_ON_ERROR
1368 if (pad + len > bp->rx_buf_size) {
1369 BNX2X_ERR("skb_put is about to fail... "
1370 "pad %d len %d rx_buf_size %d\n",
1371 pad, len, bp->rx_buf_size);
1372 bnx2x_panic();
1373 return;
1374 }
1375#endif
1376
1377 skb_reserve(skb, pad);
1378 skb_put(skb, len);
1379
1380 skb->protocol = eth_type_trans(skb, bp->dev);
1381 skb->ip_summed = CHECKSUM_UNNECESSARY;
1382
1383 {
1384 struct iphdr *iph;
1385
1386 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1387#ifdef BCM_VLAN
1388 /* If there is no Rx VLAN offloading -
1389 take VLAN tag into an account */
1390 if (unlikely(is_not_hwaccel_vlan_cqe))
1391 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1392#endif
7a9b2557
VZ
1393 iph->check = 0;
1394 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1395 }
1396
1397 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1398 &cqe->fast_path_cqe, cqe_idx)) {
1399#ifdef BCM_VLAN
0c6671b0
EG
1400 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1401 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1402 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1403 le16_to_cpu(cqe->fast_path_cqe.
1404 vlan_tag));
1405 else
1406#endif
1407 netif_receive_skb(skb);
1408 } else {
1409 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1410 " - dropping packet!\n");
1411 dev_kfree_skb(skb);
1412 }
1413
7a9b2557
VZ
1414
1415 /* put new skb in bin */
1416 fp->tpa_pool[queue].skb = new_skb;
1417
1418 } else {
66e855f3 1419 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1420 DP(NETIF_MSG_RX_STATUS,
1421 "Failed to allocate new skb - dropping packet!\n");
de832a55 1422 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1423 }
1424
1425 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1426}
1427
1428static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1429 struct bnx2x_fastpath *fp,
1430 u16 bd_prod, u16 rx_comp_prod,
1431 u16 rx_sge_prod)
1432{
8d9c5f34 1433 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1434 int i;
1435
1436 /* Update producers */
1437 rx_prods.bd_prod = bd_prod;
1438 rx_prods.cqe_prod = rx_comp_prod;
1439 rx_prods.sge_prod = rx_sge_prod;
1440
58f4c4cf
EG
1441 /*
1442 * Make sure that the BD and SGE data is updated before updating the
1443 * producers since FW might read the BD/SGE right after the producer
1444 * is updated.
1445 * This is only applicable for weak-ordered memory model archs such
1446 * as IA-64. The following barrier is also mandatory since FW will
1447 * assumes BDs must have buffers.
1448 */
1449 wmb();
1450
8d9c5f34
EG
1451 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1452 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1453 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1454 ((u32 *)&rx_prods)[i]);
1455
58f4c4cf
EG
1456 mmiowb(); /* keep prod updates ordered */
1457
7a9b2557 1458 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1459 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1460 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1461}
1462
a2fbb9ea
ET
1463static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1464{
1465 struct bnx2x *bp = fp->bp;
34f80b04 1466 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1467 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1468 int rx_pkt = 0;
1469
1470#ifdef BNX2X_STOP_ON_ERROR
1471 if (unlikely(bp->panic))
1472 return 0;
1473#endif
1474
34f80b04
EG
1475 /* CQ "next element" is of the size of the regular element,
1476 that's why it's ok here */
a2fbb9ea
ET
1477 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1478 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1479 hw_comp_cons++;
1480
1481 bd_cons = fp->rx_bd_cons;
1482 bd_prod = fp->rx_bd_prod;
34f80b04 1483 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1484 sw_comp_cons = fp->rx_comp_cons;
1485 sw_comp_prod = fp->rx_comp_prod;
1486
1487 /* Memory barrier necessary as speculative reads of the rx
1488 * buffer can be ahead of the index in the status block
1489 */
1490 rmb();
1491
1492 DP(NETIF_MSG_RX_STATUS,
1493 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1494 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1495
1496 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1497 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1498 struct sk_buff *skb;
1499 union eth_rx_cqe *cqe;
34f80b04
EG
1500 u8 cqe_fp_flags;
1501 u16 len, pad;
a2fbb9ea
ET
1502
1503 comp_ring_cons = RCQ_BD(sw_comp_cons);
1504 bd_prod = RX_BD(bd_prod);
1505 bd_cons = RX_BD(bd_cons);
1506
1507 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1508 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1509
a2fbb9ea 1510 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1511 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1512 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1513 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1514 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1515 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1516
1517 /* is this a slowpath msg? */
34f80b04 1518 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1519 bnx2x_sp_event(fp, cqe);
1520 goto next_cqe;
1521
1522 /* this is an rx packet */
1523 } else {
1524 rx_buf = &fp->rx_buf_ring[bd_cons];
1525 skb = rx_buf->skb;
a2fbb9ea
ET
1526 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1527 pad = cqe->fast_path_cqe.placement_offset;
1528
7a9b2557
VZ
1529 /* If CQE is marked both TPA_START and TPA_END
1530 it is a non-TPA CQE */
1531 if ((!fp->disable_tpa) &&
1532 (TPA_TYPE(cqe_fp_flags) !=
1533 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1534 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1535
1536 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1537 DP(NETIF_MSG_RX_STATUS,
1538 "calling tpa_start on queue %d\n",
1539 queue);
1540
1541 bnx2x_tpa_start(fp, queue, skb,
1542 bd_cons, bd_prod);
1543 goto next_rx;
1544 }
1545
1546 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1547 DP(NETIF_MSG_RX_STATUS,
1548 "calling tpa_stop on queue %d\n",
1549 queue);
1550
1551 if (!BNX2X_RX_SUM_FIX(cqe))
1552 BNX2X_ERR("STOP on none TCP "
1553 "data\n");
1554
1555 /* This is a size of the linear data
1556 on this skb */
1557 len = le16_to_cpu(cqe->fast_path_cqe.
1558 len_on_bd);
1559 bnx2x_tpa_stop(bp, fp, queue, pad,
1560 len, cqe, comp_ring_cons);
1561#ifdef BNX2X_STOP_ON_ERROR
1562 if (bp->panic)
17cb4006 1563 return 0;
7a9b2557
VZ
1564#endif
1565
1566 bnx2x_update_sge_prod(fp,
1567 &cqe->fast_path_cqe);
1568 goto next_cqe;
1569 }
1570 }
1571
a2fbb9ea
ET
1572 pci_dma_sync_single_for_device(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
1574 pad + RX_COPY_THRESH,
1575 PCI_DMA_FROMDEVICE);
1576 prefetch(skb);
1577 prefetch(((char *)(skb)) + 128);
1578
1579 /* is this an error packet? */
34f80b04 1580 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1581 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1582 "ERROR flags %x rx packet %u\n",
1583 cqe_fp_flags, sw_comp_cons);
de832a55 1584 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1585 goto reuse_rx;
1586 }
1587
1588 /* Since we don't have a jumbo ring
1589 * copy small packets if mtu > 1500
1590 */
1591 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1592 (len <= RX_COPY_THRESH)) {
1593 struct sk_buff *new_skb;
1594
1595 new_skb = netdev_alloc_skb(bp->dev,
1596 len + pad);
1597 if (new_skb == NULL) {
1598 DP(NETIF_MSG_RX_ERR,
34f80b04 1599 "ERROR packet dropped "
a2fbb9ea 1600 "because of alloc failure\n");
de832a55 1601 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1602 goto reuse_rx;
1603 }
1604
1605 /* aligned copy */
1606 skb_copy_from_linear_data_offset(skb, pad,
1607 new_skb->data + pad, len);
1608 skb_reserve(new_skb, pad);
1609 skb_put(new_skb, len);
1610
1611 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612
1613 skb = new_skb;
1614
1615 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1616 pci_unmap_single(bp->pdev,
1617 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1618 bp->rx_buf_size,
a2fbb9ea
ET
1619 PCI_DMA_FROMDEVICE);
1620 skb_reserve(skb, pad);
1621 skb_put(skb, len);
1622
1623 } else {
1624 DP(NETIF_MSG_RX_ERR,
34f80b04 1625 "ERROR packet dropped because "
a2fbb9ea 1626 "of alloc failure\n");
de832a55 1627 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1628reuse_rx:
1629 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1630 goto next_rx;
1631 }
1632
1633 skb->protocol = eth_type_trans(skb, bp->dev);
1634
1635 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1636 if (bp->rx_csum) {
1adcd8be
EG
1637 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1638 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1639 else
de832a55 1640 fp->eth_q_stats.hw_csum_err++;
66e855f3 1641 }
a2fbb9ea
ET
1642 }
1643
748e5439 1644 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1645#ifdef BCM_VLAN
0c6671b0 1646 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1647 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1648 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1649 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1650 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1651 else
1652#endif
34f80b04 1653 netif_receive_skb(skb);
a2fbb9ea 1654
a2fbb9ea
ET
1655
1656next_rx:
1657 rx_buf->skb = NULL;
1658
1659 bd_cons = NEXT_RX_IDX(bd_cons);
1660 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1661 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1662 rx_pkt++;
a2fbb9ea
ET
1663next_cqe:
1664 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1665 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1666
34f80b04 1667 if (rx_pkt == budget)
a2fbb9ea
ET
1668 break;
1669 } /* while */
1670
1671 fp->rx_bd_cons = bd_cons;
34f80b04 1672 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1673 fp->rx_comp_cons = sw_comp_cons;
1674 fp->rx_comp_prod = sw_comp_prod;
1675
7a9b2557
VZ
1676 /* Update producers */
1677 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1678 fp->rx_sge_prod);
a2fbb9ea
ET
1679
1680 fp->rx_pkt += rx_pkt;
1681 fp->rx_calls++;
1682
1683 return rx_pkt;
1684}
1685
1686static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1687{
1688 struct bnx2x_fastpath *fp = fp_cookie;
1689 struct bnx2x *bp = fp->bp;
0626b899 1690 int index = fp->index;
a2fbb9ea 1691
da5a662a
VZ
1692 /* Return here if interrupt is disabled */
1693 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1694 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1695 return IRQ_HANDLED;
1696 }
1697
34f80b04 1698 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1699 index, fp->sb_id);
1700 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1701
1702#ifdef BNX2X_STOP_ON_ERROR
1703 if (unlikely(bp->panic))
1704 return IRQ_HANDLED;
1705#endif
1706
1707 prefetch(fp->rx_cons_sb);
1708 prefetch(fp->tx_cons_sb);
1709 prefetch(&fp->status_blk->c_status_block.status_block_index);
1710 prefetch(&fp->status_blk->u_status_block.status_block_index);
1711
288379f0 1712 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1713
a2fbb9ea
ET
1714 return IRQ_HANDLED;
1715}
1716
1717static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1718{
555f6c78 1719 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1720 u16 status = bnx2x_ack_int(bp);
34f80b04 1721 u16 mask;
a2fbb9ea 1722
34f80b04 1723 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1724 if (unlikely(status == 0)) {
1725 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1726 return IRQ_NONE;
1727 }
f5372251 1728 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1729
34f80b04 1730 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1731 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1732 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1733 return IRQ_HANDLED;
1734 }
1735
3196a88a
EG
1736#ifdef BNX2X_STOP_ON_ERROR
1737 if (unlikely(bp->panic))
1738 return IRQ_HANDLED;
1739#endif
1740
34f80b04
EG
1741 mask = 0x2 << bp->fp[0].sb_id;
1742 if (status & mask) {
a2fbb9ea
ET
1743 struct bnx2x_fastpath *fp = &bp->fp[0];
1744
1745 prefetch(fp->rx_cons_sb);
1746 prefetch(fp->tx_cons_sb);
1747 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748 prefetch(&fp->status_blk->u_status_block.status_block_index);
1749
288379f0 1750 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1751
34f80b04 1752 status &= ~mask;
a2fbb9ea
ET
1753 }
1754
a2fbb9ea 1755
34f80b04 1756 if (unlikely(status & 0x1)) {
1cf167f2 1757 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1758
1759 status &= ~0x1;
1760 if (!status)
1761 return IRQ_HANDLED;
1762 }
1763
34f80b04
EG
1764 if (status)
1765 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1766 status);
a2fbb9ea 1767
c18487ee 1768 return IRQ_HANDLED;
a2fbb9ea
ET
1769}
1770
c18487ee 1771/* end of fast path */
a2fbb9ea 1772
bb2a0f7a 1773static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1774
c18487ee
YR
1775/* Link */
1776
1777/*
1778 * General service functions
1779 */
a2fbb9ea 1780
4a37fb66 1781static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1782{
1783 u32 lock_status;
1784 u32 resource_bit = (1 << resource);
4a37fb66
YG
1785 int func = BP_FUNC(bp);
1786 u32 hw_lock_control_reg;
c18487ee 1787 int cnt;
a2fbb9ea 1788
c18487ee
YR
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791 DP(NETIF_MSG_HW,
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794 return -EINVAL;
1795 }
a2fbb9ea 1796
4a37fb66
YG
1797 if (func <= 5) {
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799 } else {
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802 }
1803
c18487ee 1804 /* Validating that the resource is not already taken */
4a37fb66 1805 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1806 if (lock_status & resource_bit) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1809 return -EEXIST;
1810 }
a2fbb9ea 1811
46230476
EG
1812 /* Try for 5 second every 5ms */
1813 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1814 /* Try to acquire the lock */
4a37fb66
YG
1815 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1816 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1817 if (lock_status & resource_bit)
1818 return 0;
a2fbb9ea 1819
c18487ee 1820 msleep(5);
a2fbb9ea 1821 }
c18487ee
YR
1822 DP(NETIF_MSG_HW, "Timeout\n");
1823 return -EAGAIN;
1824}
a2fbb9ea 1825
4a37fb66 1826static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1827{
1828 u32 lock_status;
1829 u32 resource_bit = (1 << resource);
4a37fb66
YG
1830 int func = BP_FUNC(bp);
1831 u32 hw_lock_control_reg;
a2fbb9ea 1832
c18487ee
YR
1833 /* Validating that the resource is within range */
1834 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1835 DP(NETIF_MSG_HW,
1836 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1837 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1838 return -EINVAL;
1839 }
1840
4a37fb66
YG
1841 if (func <= 5) {
1842 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1843 } else {
1844 hw_lock_control_reg =
1845 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1846 }
1847
c18487ee 1848 /* Validating that the resource is currently taken */
4a37fb66 1849 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1850 if (!(lock_status & resource_bit)) {
1851 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1852 lock_status, resource_bit);
1853 return -EFAULT;
a2fbb9ea
ET
1854 }
1855
4a37fb66 1856 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1857 return 0;
1858}
1859
1860/* HW Lock for shared dual port PHYs */
4a37fb66 1861static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1862{
34f80b04 1863 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1864
46c6a674
EG
1865 if (bp->port.need_hw_lock)
1866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1867}
a2fbb9ea 1868
4a37fb66 1869static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1870{
46c6a674
EG
1871 if (bp->port.need_hw_lock)
1872 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1873
34f80b04 1874 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1875}
a2fbb9ea 1876
4acac6a5
EG
1877int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1878{
1879 /* The GPIO should be swapped if swap register is set and active */
1880 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1881 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1882 int gpio_shift = gpio_num +
1883 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1884 u32 gpio_mask = (1 << gpio_shift);
1885 u32 gpio_reg;
1886 int value;
1887
1888 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1889 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1890 return -EINVAL;
1891 }
1892
1893 /* read GPIO value */
1894 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1895
1896 /* get the requested pin value */
1897 if ((gpio_reg & gpio_mask) == gpio_mask)
1898 value = 1;
1899 else
1900 value = 0;
1901
1902 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1903
1904 return value;
1905}
1906
17de50b7 1907int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1908{
1909 /* The GPIO should be swapped if swap register is set and active */
1910 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1911 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1912 int gpio_shift = gpio_num +
1913 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1914 u32 gpio_mask = (1 << gpio_shift);
1915 u32 gpio_reg;
a2fbb9ea 1916
c18487ee
YR
1917 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1918 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1919 return -EINVAL;
1920 }
a2fbb9ea 1921
4a37fb66 1922 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1923 /* read GPIO and mask except the float bits */
1924 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1925
c18487ee
YR
1926 switch (mode) {
1927 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1928 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1929 gpio_num, gpio_shift);
1930 /* clear FLOAT and set CLR */
1931 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1932 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1933 break;
a2fbb9ea 1934
c18487ee
YR
1935 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1936 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1937 gpio_num, gpio_shift);
1938 /* clear FLOAT and set SET */
1939 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1940 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1941 break;
a2fbb9ea 1942
17de50b7 1943 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1944 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1945 gpio_num, gpio_shift);
1946 /* set FLOAT */
1947 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1948 break;
a2fbb9ea 1949
c18487ee
YR
1950 default:
1951 break;
a2fbb9ea
ET
1952 }
1953
c18487ee 1954 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1955 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1956
c18487ee 1957 return 0;
a2fbb9ea
ET
1958}
1959
4acac6a5
EG
1960int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1961{
1962 /* The GPIO should be swapped if swap register is set and active */
1963 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1964 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1965 int gpio_shift = gpio_num +
1966 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1967 u32 gpio_mask = (1 << gpio_shift);
1968 u32 gpio_reg;
1969
1970 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1971 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1972 return -EINVAL;
1973 }
1974
1975 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1976 /* read GPIO int */
1977 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1978
1979 switch (mode) {
1980 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1981 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1982 "output low\n", gpio_num, gpio_shift);
1983 /* clear SET and set CLR */
1984 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1985 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1986 break;
1987
1988 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1989 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1990 "output high\n", gpio_num, gpio_shift);
1991 /* clear CLR and set SET */
1992 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1993 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1994 break;
1995
1996 default:
1997 break;
1998 }
1999
2000 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2001 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2002
2003 return 0;
2004}
2005
c18487ee 2006static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2007{
c18487ee
YR
2008 u32 spio_mask = (1 << spio_num);
2009 u32 spio_reg;
a2fbb9ea 2010
c18487ee
YR
2011 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2012 (spio_num > MISC_REGISTERS_SPIO_7)) {
2013 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2014 return -EINVAL;
a2fbb9ea
ET
2015 }
2016
4a37fb66 2017 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2018 /* read SPIO and mask except the float bits */
2019 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2020
c18487ee 2021 switch (mode) {
6378c025 2022 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2023 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2024 /* clear FLOAT and set CLR */
2025 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2027 break;
a2fbb9ea 2028
6378c025 2029 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2030 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2031 /* clear FLOAT and set SET */
2032 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2033 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2034 break;
a2fbb9ea 2035
c18487ee
YR
2036 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2037 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2038 /* set FLOAT */
2039 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2040 break;
a2fbb9ea 2041
c18487ee
YR
2042 default:
2043 break;
a2fbb9ea
ET
2044 }
2045
c18487ee 2046 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2047 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2048
a2fbb9ea
ET
2049 return 0;
2050}
2051
c18487ee 2052static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2053{
ad33ea3a
EG
2054 switch (bp->link_vars.ieee_fc &
2055 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2056 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2057 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2058 ADVERTISED_Pause);
2059 break;
356e2385 2060
c18487ee 2061 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2062 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2063 ADVERTISED_Pause);
2064 break;
356e2385 2065
c18487ee 2066 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2067 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2068 break;
356e2385 2069
c18487ee 2070 default:
34f80b04 2071 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2072 ADVERTISED_Pause);
2073 break;
2074 }
2075}
f1410647 2076
c18487ee
YR
2077static void bnx2x_link_report(struct bnx2x *bp)
2078{
2079 if (bp->link_vars.link_up) {
2080 if (bp->state == BNX2X_STATE_OPEN)
2081 netif_carrier_on(bp->dev);
2082 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2083
c18487ee 2084 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2085
c18487ee
YR
2086 if (bp->link_vars.duplex == DUPLEX_FULL)
2087 printk("full duplex");
2088 else
2089 printk("half duplex");
f1410647 2090
c0700f90
DM
2091 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2092 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2093 printk(", receive ");
356e2385
EG
2094 if (bp->link_vars.flow_ctrl &
2095 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2096 printk("& transmit ");
2097 } else {
2098 printk(", transmit ");
2099 }
2100 printk("flow control ON");
2101 }
2102 printk("\n");
f1410647 2103
c18487ee
YR
2104 } else { /* link_down */
2105 netif_carrier_off(bp->dev);
2106 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2107 }
c18487ee
YR
2108}
2109
b5bf9068 2110static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2111{
19680c48
EG
2112 if (!BP_NOMCP(bp)) {
2113 u8 rc;
a2fbb9ea 2114
19680c48 2115 /* Initialize link parameters structure variables */
8c99e7b0
YR
2116 /* It is recommended to turn off RX FC for jumbo frames
2117 for better performance */
2118 if (IS_E1HMF(bp))
c0700f90 2119 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2120 else if (bp->dev->mtu > 5000)
c0700f90 2121 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2122 else
c0700f90 2123 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2124
4a37fb66 2125 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2126
2127 if (load_mode == LOAD_DIAG)
2128 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2129
19680c48 2130 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2131
4a37fb66 2132 bnx2x_release_phy_lock(bp);
a2fbb9ea 2133
3c96c68b
EG
2134 bnx2x_calc_fc_adv(bp);
2135
b5bf9068
EG
2136 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2137 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2138 bnx2x_link_report(bp);
b5bf9068 2139 }
34f80b04 2140
19680c48
EG
2141 return rc;
2142 }
f5372251 2143 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2144 return -EINVAL;
a2fbb9ea
ET
2145}
2146
c18487ee 2147static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2148{
19680c48 2149 if (!BP_NOMCP(bp)) {
4a37fb66 2150 bnx2x_acquire_phy_lock(bp);
19680c48 2151 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2152 bnx2x_release_phy_lock(bp);
a2fbb9ea 2153
19680c48
EG
2154 bnx2x_calc_fc_adv(bp);
2155 } else
f5372251 2156 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2157}
a2fbb9ea 2158
c18487ee
YR
2159static void bnx2x__link_reset(struct bnx2x *bp)
2160{
19680c48 2161 if (!BP_NOMCP(bp)) {
4a37fb66 2162 bnx2x_acquire_phy_lock(bp);
589abe3a 2163 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2164 bnx2x_release_phy_lock(bp);
19680c48 2165 } else
f5372251 2166 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2167}
a2fbb9ea 2168
c18487ee
YR
2169static u8 bnx2x_link_test(struct bnx2x *bp)
2170{
2171 u8 rc;
a2fbb9ea 2172
4a37fb66 2173 bnx2x_acquire_phy_lock(bp);
c18487ee 2174 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2175 bnx2x_release_phy_lock(bp);
a2fbb9ea 2176
c18487ee
YR
2177 return rc;
2178}
a2fbb9ea 2179
8a1c38d1 2180static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2181{
8a1c38d1
EG
2182 u32 r_param = bp->link_vars.line_speed / 8;
2183 u32 fair_periodic_timeout_usec;
2184 u32 t_fair;
34f80b04 2185
8a1c38d1
EG
2186 memset(&(bp->cmng.rs_vars), 0,
2187 sizeof(struct rate_shaping_vars_per_port));
2188 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2189
8a1c38d1
EG
2190 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2191 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2192
8a1c38d1
EG
2193 /* this is the threshold below which no timer arming will occur
2194 1.25 coefficient is for the threshold to be a little bigger
2195 than the real time, to compensate for timer in-accuracy */
2196 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2197 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2198
8a1c38d1
EG
2199 /* resolution of fairness timer */
2200 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2201 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2202 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2203
8a1c38d1
EG
2204 /* this is the threshold below which we won't arm the timer anymore */
2205 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2206
8a1c38d1
EG
2207 /* we multiply by 1e3/8 to get bytes/msec.
2208 We don't want the credits to pass a credit
2209 of the t_fair*FAIR_MEM (algorithm resolution) */
2210 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2211 /* since each tick is 4 usec */
2212 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2213}
2214
8a1c38d1 2215static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2216{
2217 struct rate_shaping_vars_per_vn m_rs_vn;
2218 struct fairness_vars_per_vn m_fair_vn;
2219 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2220 u16 vn_min_rate, vn_max_rate;
2221 int i;
2222
2223 /* If function is hidden - set min and max to zeroes */
2224 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2225 vn_min_rate = 0;
2226 vn_max_rate = 0;
2227
2228 } else {
2229 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2230 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2231 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2232 if current min rate is zero - set it to 1.
33471629 2233 This is a requirement of the algorithm. */
8a1c38d1 2234 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2235 vn_min_rate = DEF_MIN_RATE;
2236 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2237 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2238 }
2239
8a1c38d1
EG
2240 DP(NETIF_MSG_IFUP,
2241 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2242 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2243
2244 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2245 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2246
2247 /* global vn counter - maximal Mbps for this vn */
2248 m_rs_vn.vn_counter.rate = vn_max_rate;
2249
2250 /* quota - number of bytes transmitted in this period */
2251 m_rs_vn.vn_counter.quota =
2252 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2253
8a1c38d1 2254 if (bp->vn_weight_sum) {
34f80b04
EG
2255 /* credit for each period of the fairness algorithm:
2256 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2257 vn_weight_sum should not be larger than 10000, thus
2258 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2259 than zero */
34f80b04 2260 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2261 max((u32)(vn_min_rate * (T_FAIR_COEF /
2262 (8 * bp->vn_weight_sum))),
2263 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2264 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2265 m_fair_vn.vn_credit_delta);
2266 }
2267
34f80b04
EG
2268 /* Store it to internal memory */
2269 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2270 REG_WR(bp, BAR_XSTRORM_INTMEM +
2271 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2272 ((u32 *)(&m_rs_vn))[i]);
2273
2274 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2275 REG_WR(bp, BAR_XSTRORM_INTMEM +
2276 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2277 ((u32 *)(&m_fair_vn))[i]);
2278}
2279
8a1c38d1 2280
c18487ee
YR
2281/* This function is called upon link interrupt */
2282static void bnx2x_link_attn(struct bnx2x *bp)
2283{
bb2a0f7a
YG
2284 /* Make sure that we are synced with the current statistics */
2285 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2286
c18487ee 2287 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2288
bb2a0f7a
YG
2289 if (bp->link_vars.link_up) {
2290
1c06328c
EG
2291 /* dropless flow control */
2292 if (CHIP_IS_E1H(bp)) {
2293 int port = BP_PORT(bp);
2294 u32 pause_enabled = 0;
2295
2296 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2297 pause_enabled = 1;
2298
2299 REG_WR(bp, BAR_USTRORM_INTMEM +
2300 USTORM_PAUSE_ENABLED_OFFSET(port),
2301 pause_enabled);
2302 }
2303
bb2a0f7a
YG
2304 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2305 struct host_port_stats *pstats;
2306
2307 pstats = bnx2x_sp(bp, port_stats);
2308 /* reset old bmac stats */
2309 memset(&(pstats->mac_stx[0]), 0,
2310 sizeof(struct mac_stx));
2311 }
2312 if ((bp->state == BNX2X_STATE_OPEN) ||
2313 (bp->state == BNX2X_STATE_DISABLED))
2314 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2315 }
2316
c18487ee
YR
2317 /* indicate link status */
2318 bnx2x_link_report(bp);
34f80b04
EG
2319
2320 if (IS_E1HMF(bp)) {
8a1c38d1 2321 int port = BP_PORT(bp);
34f80b04 2322 int func;
8a1c38d1 2323 int vn;
34f80b04
EG
2324
2325 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2326 if (vn == BP_E1HVN(bp))
2327 continue;
2328
8a1c38d1 2329 func = ((vn << 1) | port);
34f80b04
EG
2330
2331 /* Set the attention towards other drivers
2332 on the same port */
2333 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2334 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2335 }
34f80b04 2336
8a1c38d1
EG
2337 if (bp->link_vars.link_up) {
2338 int i;
2339
2340 /* Init rate shaping and fairness contexts */
2341 bnx2x_init_port_minmax(bp);
34f80b04 2342
34f80b04 2343 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2344 bnx2x_init_vn_minmax(bp, 2*vn + port);
2345
2346 /* Store it to internal memory */
2347 for (i = 0;
2348 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2349 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2351 ((u32 *)(&bp->cmng))[i]);
2352 }
34f80b04 2353 }
c18487ee 2354}
a2fbb9ea 2355
c18487ee
YR
2356static void bnx2x__link_status_update(struct bnx2x *bp)
2357{
2358 if (bp->state != BNX2X_STATE_OPEN)
2359 return;
a2fbb9ea 2360
c18487ee 2361 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2362
bb2a0f7a
YG
2363 if (bp->link_vars.link_up)
2364 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2365 else
2366 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2367
c18487ee
YR
2368 /* indicate link status */
2369 bnx2x_link_report(bp);
a2fbb9ea 2370}
a2fbb9ea 2371
34f80b04
EG
2372static void bnx2x_pmf_update(struct bnx2x *bp)
2373{
2374 int port = BP_PORT(bp);
2375 u32 val;
2376
2377 bp->port.pmf = 1;
2378 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2379
2380 /* enable nig attention */
2381 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2382 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2383 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2384
2385 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2386}
2387
c18487ee 2388/* end of Link */
a2fbb9ea
ET
2389
2390/* slow path */
2391
2392/*
2393 * General service functions
2394 */
2395
2396/* the slow path queue is odd since completions arrive on the fastpath ring */
2397static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2398 u32 data_hi, u32 data_lo, int common)
2399{
34f80b04 2400 int func = BP_FUNC(bp);
a2fbb9ea 2401
34f80b04
EG
2402 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2403 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2404 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2405 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2406 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2407
2408#ifdef BNX2X_STOP_ON_ERROR
2409 if (unlikely(bp->panic))
2410 return -EIO;
2411#endif
2412
34f80b04 2413 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2414
2415 if (!bp->spq_left) {
2416 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2417 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2418 bnx2x_panic();
2419 return -EBUSY;
2420 }
f1410647 2421
a2fbb9ea
ET
2422 /* CID needs port number to be encoded int it */
2423 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2424 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2425 HW_CID(bp, cid)));
2426 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2427 if (common)
2428 bp->spq_prod_bd->hdr.type |=
2429 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2430
2431 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2432 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2433
2434 bp->spq_left--;
2435
2436 if (bp->spq_prod_bd == bp->spq_last_bd) {
2437 bp->spq_prod_bd = bp->spq;
2438 bp->spq_prod_idx = 0;
2439 DP(NETIF_MSG_TIMER, "end of spq\n");
2440
2441 } else {
2442 bp->spq_prod_bd++;
2443 bp->spq_prod_idx++;
2444 }
2445
37dbbf32
EG
2446 /* Make sure that BD data is updated before writing the producer */
2447 wmb();
2448
34f80b04 2449 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2450 bp->spq_prod_idx);
2451
37dbbf32
EG
2452 mmiowb();
2453
34f80b04 2454 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2455 return 0;
2456}
2457
2458/* acquire split MCP access lock register */
4a37fb66 2459static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2460{
a2fbb9ea 2461 u32 i, j, val;
34f80b04 2462 int rc = 0;
a2fbb9ea
ET
2463
2464 might_sleep();
2465 i = 100;
2466 for (j = 0; j < i*10; j++) {
2467 val = (1UL << 31);
2468 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2469 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2470 if (val & (1L << 31))
2471 break;
2472
2473 msleep(5);
2474 }
a2fbb9ea 2475 if (!(val & (1L << 31))) {
19680c48 2476 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2477 rc = -EBUSY;
2478 }
2479
2480 return rc;
2481}
2482
4a37fb66
YG
2483/* release split MCP access lock register */
2484static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2485{
2486 u32 val = 0;
2487
2488 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2489}
2490
2491static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2492{
2493 struct host_def_status_block *def_sb = bp->def_status_blk;
2494 u16 rc = 0;
2495
2496 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2497 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2498 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2499 rc |= 1;
2500 }
2501 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2502 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2503 rc |= 2;
2504 }
2505 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2506 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2507 rc |= 4;
2508 }
2509 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2510 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2511 rc |= 8;
2512 }
2513 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2514 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2515 rc |= 16;
2516 }
2517 return rc;
2518}
2519
2520/*
2521 * slow path service functions
2522 */
2523
2524static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2525{
34f80b04 2526 int port = BP_PORT(bp);
5c862848
EG
2527 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2528 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2529 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2530 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2531 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2532 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2533 u32 aeu_mask;
87942b46 2534 u32 nig_mask = 0;
a2fbb9ea 2535
a2fbb9ea
ET
2536 if (bp->attn_state & asserted)
2537 BNX2X_ERR("IGU ERROR\n");
2538
3fcaf2e5
EG
2539 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540 aeu_mask = REG_RD(bp, aeu_addr);
2541
a2fbb9ea 2542 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2543 aeu_mask, asserted);
2544 aeu_mask &= ~(asserted & 0xff);
2545 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2546
3fcaf2e5
EG
2547 REG_WR(bp, aeu_addr, aeu_mask);
2548 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2549
3fcaf2e5 2550 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2551 bp->attn_state |= asserted;
3fcaf2e5 2552 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2553
2554 if (asserted & ATTN_HARD_WIRED_MASK) {
2555 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2556
a5e9a7cf
EG
2557 bnx2x_acquire_phy_lock(bp);
2558
877e9aa4 2559 /* save nig interrupt mask */
87942b46 2560 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2561 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2562
c18487ee 2563 bnx2x_link_attn(bp);
a2fbb9ea
ET
2564
2565 /* handle unicore attn? */
2566 }
2567 if (asserted & ATTN_SW_TIMER_4_FUNC)
2568 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2569
2570 if (asserted & GPIO_2_FUNC)
2571 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2572
2573 if (asserted & GPIO_3_FUNC)
2574 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2575
2576 if (asserted & GPIO_4_FUNC)
2577 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2578
2579 if (port == 0) {
2580 if (asserted & ATTN_GENERAL_ATTN_1) {
2581 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2582 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2583 }
2584 if (asserted & ATTN_GENERAL_ATTN_2) {
2585 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2586 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2587 }
2588 if (asserted & ATTN_GENERAL_ATTN_3) {
2589 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2590 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2591 }
2592 } else {
2593 if (asserted & ATTN_GENERAL_ATTN_4) {
2594 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2595 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2596 }
2597 if (asserted & ATTN_GENERAL_ATTN_5) {
2598 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2599 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2600 }
2601 if (asserted & ATTN_GENERAL_ATTN_6) {
2602 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2604 }
2605 }
2606
2607 } /* if hardwired */
2608
5c862848
EG
2609 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2610 asserted, hc_addr);
2611 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2612
2613 /* now set back the mask */
a5e9a7cf 2614 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2615 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2616 bnx2x_release_phy_lock(bp);
2617 }
a2fbb9ea
ET
2618}
2619
fd4ef40d
EG
2620static inline void bnx2x_fan_failure(struct bnx2x *bp)
2621{
2622 int port = BP_PORT(bp);
2623
2624 /* mark the failure */
2625 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2626 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2628 bp->link_params.ext_phy_config);
2629
2630 /* log the failure */
2631 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2632 " the driver to shutdown the card to prevent permanent"
2633 " damage. Please contact Dell Support for assistance\n",
2634 bp->dev->name);
2635}
877e9aa4 2636static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2637{
34f80b04 2638 int port = BP_PORT(bp);
877e9aa4
ET
2639 int reg_offset;
2640 u32 val;
2641
34f80b04
EG
2642 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2643 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2644
34f80b04 2645 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2646
2647 val = REG_RD(bp, reg_offset);
2648 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2649 REG_WR(bp, reg_offset, val);
2650
2651 BNX2X_ERR("SPIO5 hw attention\n");
2652
fd4ef40d 2653 /* Fan failure attention */
35b19ba5
EG
2654 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2655 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2656 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2657 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2658 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2659 /* The PHY reset is controlled by GPIO 1 */
2660 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2661 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2662 break;
2663
2664 default:
2665 break;
2666 }
fd4ef40d 2667 bnx2x_fan_failure(bp);
877e9aa4 2668 }
34f80b04 2669
589abe3a
EG
2670 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2671 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2672 bnx2x_acquire_phy_lock(bp);
2673 bnx2x_handle_module_detect_int(&bp->link_params);
2674 bnx2x_release_phy_lock(bp);
2675 }
2676
34f80b04
EG
2677 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2678
2679 val = REG_RD(bp, reg_offset);
2680 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2681 REG_WR(bp, reg_offset, val);
2682
2683 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2684 (attn & HW_INTERRUT_ASSERT_SET_0));
2685 bnx2x_panic();
2686 }
877e9aa4
ET
2687}
2688
2689static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2690{
2691 u32 val;
2692
0626b899 2693 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2694
2695 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2696 BNX2X_ERR("DB hw attention 0x%x\n", val);
2697 /* DORQ discard attention */
2698 if (val & 0x2)
2699 BNX2X_ERR("FATAL error from DORQ\n");
2700 }
34f80b04
EG
2701
2702 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2703
2704 int port = BP_PORT(bp);
2705 int reg_offset;
2706
2707 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2708 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2709
2710 val = REG_RD(bp, reg_offset);
2711 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2712 REG_WR(bp, reg_offset, val);
2713
2714 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2715 (attn & HW_INTERRUT_ASSERT_SET_1));
2716 bnx2x_panic();
2717 }
877e9aa4
ET
2718}
2719
2720static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2721{
2722 u32 val;
2723
2724 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2725
2726 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2727 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2728 /* CFC error attention */
2729 if (val & 0x2)
2730 BNX2X_ERR("FATAL error from CFC\n");
2731 }
2732
2733 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2734
2735 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2736 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2737 /* RQ_USDMDP_FIFO_OVERFLOW */
2738 if (val & 0x18000)
2739 BNX2X_ERR("FATAL error from PXP\n");
2740 }
34f80b04
EG
2741
2742 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2743
2744 int port = BP_PORT(bp);
2745 int reg_offset;
2746
2747 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2748 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2749
2750 val = REG_RD(bp, reg_offset);
2751 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2752 REG_WR(bp, reg_offset, val);
2753
2754 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2755 (attn & HW_INTERRUT_ASSERT_SET_2));
2756 bnx2x_panic();
2757 }
877e9aa4
ET
2758}
2759
2760static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2761{
34f80b04
EG
2762 u32 val;
2763
877e9aa4
ET
2764 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2765
34f80b04
EG
2766 if (attn & BNX2X_PMF_LINK_ASSERT) {
2767 int func = BP_FUNC(bp);
2768
2769 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2770 bnx2x__link_status_update(bp);
2771 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2772 DRV_STATUS_PMF)
2773 bnx2x_pmf_update(bp);
2774
2775 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2776
2777 BNX2X_ERR("MC assert!\n");
2778 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2779 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2780 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2781 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2782 bnx2x_panic();
2783
2784 } else if (attn & BNX2X_MCP_ASSERT) {
2785
2786 BNX2X_ERR("MCP assert!\n");
2787 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2788 bnx2x_fw_dump(bp);
877e9aa4
ET
2789
2790 } else
2791 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2792 }
2793
2794 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2795 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2796 if (attn & BNX2X_GRC_TIMEOUT) {
2797 val = CHIP_IS_E1H(bp) ?
2798 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2799 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2800 }
2801 if (attn & BNX2X_GRC_RSV) {
2802 val = CHIP_IS_E1H(bp) ?
2803 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2804 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2805 }
877e9aa4 2806 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2807 }
2808}
2809
2810static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2811{
a2fbb9ea
ET
2812 struct attn_route attn;
2813 struct attn_route group_mask;
34f80b04 2814 int port = BP_PORT(bp);
877e9aa4 2815 int index;
a2fbb9ea
ET
2816 u32 reg_addr;
2817 u32 val;
3fcaf2e5 2818 u32 aeu_mask;
a2fbb9ea
ET
2819
2820 /* need to take HW lock because MCP or other port might also
2821 try to handle this event */
4a37fb66 2822 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2823
2824 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2825 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2826 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2827 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2828 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2829 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2830
2831 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2832 if (deasserted & (1 << index)) {
2833 group_mask = bp->attn_group[index];
2834
34f80b04
EG
2835 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2836 index, group_mask.sig[0], group_mask.sig[1],
2837 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2838
877e9aa4
ET
2839 bnx2x_attn_int_deasserted3(bp,
2840 attn.sig[3] & group_mask.sig[3]);
2841 bnx2x_attn_int_deasserted1(bp,
2842 attn.sig[1] & group_mask.sig[1]);
2843 bnx2x_attn_int_deasserted2(bp,
2844 attn.sig[2] & group_mask.sig[2]);
2845 bnx2x_attn_int_deasserted0(bp,
2846 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2847
a2fbb9ea
ET
2848 if ((attn.sig[0] & group_mask.sig[0] &
2849 HW_PRTY_ASSERT_SET_0) ||
2850 (attn.sig[1] & group_mask.sig[1] &
2851 HW_PRTY_ASSERT_SET_1) ||
2852 (attn.sig[2] & group_mask.sig[2] &
2853 HW_PRTY_ASSERT_SET_2))
6378c025 2854 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2855 }
2856 }
2857
4a37fb66 2858 bnx2x_release_alr(bp);
a2fbb9ea 2859
5c862848 2860 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2861
2862 val = ~deasserted;
3fcaf2e5
EG
2863 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2864 val, reg_addr);
5c862848 2865 REG_WR(bp, reg_addr, val);
a2fbb9ea 2866
a2fbb9ea 2867 if (~bp->attn_state & deasserted)
3fcaf2e5 2868 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2869
2870 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2871 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2872
3fcaf2e5
EG
2873 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2874 aeu_mask = REG_RD(bp, reg_addr);
2875
2876 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2877 aeu_mask, deasserted);
2878 aeu_mask |= (deasserted & 0xff);
2879 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2880
3fcaf2e5
EG
2881 REG_WR(bp, reg_addr, aeu_mask);
2882 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2883
2884 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2885 bp->attn_state &= ~deasserted;
2886 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2887}
2888
2889static void bnx2x_attn_int(struct bnx2x *bp)
2890{
2891 /* read local copy of bits */
68d59484
EG
2892 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2893 attn_bits);
2894 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2895 attn_bits_ack);
a2fbb9ea
ET
2896 u32 attn_state = bp->attn_state;
2897
2898 /* look for changed bits */
2899 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2900 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2901
2902 DP(NETIF_MSG_HW,
2903 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2904 attn_bits, attn_ack, asserted, deasserted);
2905
2906 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2907 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2908
2909 /* handle bits that were raised */
2910 if (asserted)
2911 bnx2x_attn_int_asserted(bp, asserted);
2912
2913 if (deasserted)
2914 bnx2x_attn_int_deasserted(bp, deasserted);
2915}
2916
2917static void bnx2x_sp_task(struct work_struct *work)
2918{
1cf167f2 2919 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2920 u16 status;
2921
34f80b04 2922
a2fbb9ea
ET
2923 /* Return here if interrupt is disabled */
2924 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2925 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2926 return;
2927 }
2928
2929 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2930/* if (status == 0) */
2931/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2932
3196a88a 2933 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2934
877e9aa4
ET
2935 /* HW attentions */
2936 if (status & 0x1)
a2fbb9ea 2937 bnx2x_attn_int(bp);
a2fbb9ea 2938
68d59484 2939 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2940 IGU_INT_NOP, 1);
2941 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2942 IGU_INT_NOP, 1);
2943 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2944 IGU_INT_NOP, 1);
2945 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2946 IGU_INT_NOP, 1);
2947 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2948 IGU_INT_ENABLE, 1);
877e9aa4 2949
a2fbb9ea
ET
2950}
2951
2952static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2953{
2954 struct net_device *dev = dev_instance;
2955 struct bnx2x *bp = netdev_priv(dev);
2956
2957 /* Return here if interrupt is disabled */
2958 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2959 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2960 return IRQ_HANDLED;
2961 }
2962
8d9c5f34 2963 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2964
2965#ifdef BNX2X_STOP_ON_ERROR
2966 if (unlikely(bp->panic))
2967 return IRQ_HANDLED;
2968#endif
2969
1cf167f2 2970 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2971
2972 return IRQ_HANDLED;
2973}
2974
2975/* end of slow path */
2976
2977/* Statistics */
2978
2979/****************************************************************************
2980* Macros
2981****************************************************************************/
2982
a2fbb9ea
ET
2983/* sum[hi:lo] += add[hi:lo] */
2984#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2985 do { \
2986 s_lo += a_lo; \
f5ba6772 2987 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2988 } while (0)
2989
2990/* difference = minuend - subtrahend */
2991#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2992 do { \
bb2a0f7a
YG
2993 if (m_lo < s_lo) { \
2994 /* underflow */ \
a2fbb9ea 2995 d_hi = m_hi - s_hi; \
bb2a0f7a 2996 if (d_hi > 0) { \
6378c025 2997 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2998 d_hi--; \
2999 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3000 } else { \
6378c025 3001 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3002 d_hi = 0; \
3003 d_lo = 0; \
3004 } \
bb2a0f7a
YG
3005 } else { \
3006 /* m_lo >= s_lo */ \
a2fbb9ea 3007 if (m_hi < s_hi) { \
bb2a0f7a
YG
3008 d_hi = 0; \
3009 d_lo = 0; \
3010 } else { \
6378c025 3011 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3012 d_hi = m_hi - s_hi; \
3013 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3014 } \
3015 } \
3016 } while (0)
3017
bb2a0f7a 3018#define UPDATE_STAT64(s, t) \
a2fbb9ea 3019 do { \
bb2a0f7a
YG
3020 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3021 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3022 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3023 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3024 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3025 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3026 } while (0)
3027
bb2a0f7a 3028#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3029 do { \
bb2a0f7a
YG
3030 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3031 diff.lo, new->s##_lo, old->s##_lo); \
3032 ADD_64(estats->t##_hi, diff.hi, \
3033 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3034 } while (0)
3035
3036/* sum[hi:lo] += add */
3037#define ADD_EXTEND_64(s_hi, s_lo, a) \
3038 do { \
3039 s_lo += a; \
3040 s_hi += (s_lo < a) ? 1 : 0; \
3041 } while (0)
3042
bb2a0f7a 3043#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3044 do { \
bb2a0f7a
YG
3045 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3046 pstats->mac_stx[1].s##_lo, \
3047 new->s); \
a2fbb9ea
ET
3048 } while (0)
3049
bb2a0f7a 3050#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3051 do { \
4781bfad
EG
3052 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3053 old_tclient->s = tclient->s; \
de832a55
EG
3054 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3055 } while (0)
3056
3057#define UPDATE_EXTEND_USTAT(s, t) \
3058 do { \
3059 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3060 old_uclient->s = uclient->s; \
3061 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3062 } while (0)
3063
3064#define UPDATE_EXTEND_XSTAT(s, t) \
3065 do { \
4781bfad
EG
3066 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3067 old_xclient->s = xclient->s; \
de832a55
EG
3068 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3069 } while (0)
3070
3071/* minuend -= subtrahend */
3072#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3073 do { \
3074 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3075 } while (0)
3076
3077/* minuend[hi:lo] -= subtrahend */
3078#define SUB_EXTEND_64(m_hi, m_lo, s) \
3079 do { \
3080 SUB_64(m_hi, 0, m_lo, s); \
3081 } while (0)
3082
3083#define SUB_EXTEND_USTAT(s, t) \
3084 do { \
3085 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3086 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3087 } while (0)
3088
3089/*
3090 * General service functions
3091 */
3092
3093static inline long bnx2x_hilo(u32 *hiref)
3094{
3095 u32 lo = *(hiref + 1);
3096#if (BITS_PER_LONG == 64)
3097 u32 hi = *hiref;
3098
3099 return HILO_U64(hi, lo);
3100#else
3101 return lo;
3102#endif
3103}
3104
3105/*
3106 * Init service functions
3107 */
3108
bb2a0f7a
YG
3109static void bnx2x_storm_stats_post(struct bnx2x *bp)
3110{
3111 if (!bp->stats_pending) {
3112 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3113 int i, rc;
bb2a0f7a
YG
3114
3115 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3116 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3117 for_each_queue(bp, i)
3118 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3119
3120 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3121 ((u32 *)&ramrod_data)[1],
3122 ((u32 *)&ramrod_data)[0], 0);
3123 if (rc == 0) {
3124 /* stats ramrod has it's own slot on the spq */
3125 bp->spq_left++;
3126 bp->stats_pending = 1;
3127 }
3128 }
3129}
3130
3131static void bnx2x_stats_init(struct bnx2x *bp)
3132{
3133 int port = BP_PORT(bp);
de832a55 3134 int i;
bb2a0f7a 3135
de832a55 3136 bp->stats_pending = 0;
bb2a0f7a
YG
3137 bp->executer_idx = 0;
3138 bp->stats_counter = 0;
3139
3140 /* port stats */
3141 if (!BP_NOMCP(bp))
3142 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3143 else
3144 bp->port.port_stx = 0;
3145 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3146
3147 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3148 bp->port.old_nig_stats.brb_discard =
3149 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3150 bp->port.old_nig_stats.brb_truncate =
3151 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3152 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3153 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3154 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3155 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3156
3157 /* function stats */
de832a55
EG
3158 for_each_queue(bp, i) {
3159 struct bnx2x_fastpath *fp = &bp->fp[i];
3160
3161 memset(&fp->old_tclient, 0,
3162 sizeof(struct tstorm_per_client_stats));
3163 memset(&fp->old_uclient, 0,
3164 sizeof(struct ustorm_per_client_stats));
3165 memset(&fp->old_xclient, 0,
3166 sizeof(struct xstorm_per_client_stats));
3167 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3168 }
3169
bb2a0f7a 3170 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3171 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3172
3173 bp->stats_state = STATS_STATE_DISABLED;
3174 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3175 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3176}
3177
3178static void bnx2x_hw_stats_post(struct bnx2x *bp)
3179{
3180 struct dmae_command *dmae = &bp->stats_dmae;
3181 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3182
3183 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3184 if (CHIP_REV_IS_SLOW(bp))
3185 return;
bb2a0f7a
YG
3186
3187 /* loader */
3188 if (bp->executer_idx) {
3189 int loader_idx = PMF_DMAE_C(bp);
3190
3191 memset(dmae, 0, sizeof(struct dmae_command));
3192
3193 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3194 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3195 DMAE_CMD_DST_RESET |
3196#ifdef __BIG_ENDIAN
3197 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3198#else
3199 DMAE_CMD_ENDIANITY_DW_SWAP |
3200#endif
3201 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3202 DMAE_CMD_PORT_0) |
3203 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3204 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3205 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3206 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3207 sizeof(struct dmae_command) *
3208 (loader_idx + 1)) >> 2;
3209 dmae->dst_addr_hi = 0;
3210 dmae->len = sizeof(struct dmae_command) >> 2;
3211 if (CHIP_IS_E1(bp))
3212 dmae->len--;
3213 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3214 dmae->comp_addr_hi = 0;
3215 dmae->comp_val = 1;
3216
3217 *stats_comp = 0;
3218 bnx2x_post_dmae(bp, dmae, loader_idx);
3219
3220 } else if (bp->func_stx) {
3221 *stats_comp = 0;
3222 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3223 }
3224}
3225
3226static int bnx2x_stats_comp(struct bnx2x *bp)
3227{
3228 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3229 int cnt = 10;
3230
3231 might_sleep();
3232 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3233 if (!cnt) {
3234 BNX2X_ERR("timeout waiting for stats finished\n");
3235 break;
3236 }
3237 cnt--;
12469401 3238 msleep(1);
bb2a0f7a
YG
3239 }
3240 return 1;
3241}
3242
3243/*
3244 * Statistics service functions
3245 */
3246
3247static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3248{
3249 struct dmae_command *dmae;
3250 u32 opcode;
3251 int loader_idx = PMF_DMAE_C(bp);
3252 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3253
3254 /* sanity */
3255 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3256 BNX2X_ERR("BUG!\n");
3257 return;
3258 }
3259
3260 bp->executer_idx = 0;
3261
3262 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3263 DMAE_CMD_C_ENABLE |
3264 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3265#ifdef __BIG_ENDIAN
3266 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3267#else
3268 DMAE_CMD_ENDIANITY_DW_SWAP |
3269#endif
3270 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3271 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3272
3273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3275 dmae->src_addr_lo = bp->port.port_stx >> 2;
3276 dmae->src_addr_hi = 0;
3277 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3278 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3279 dmae->len = DMAE_LEN32_RD_MAX;
3280 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3281 dmae->comp_addr_hi = 0;
3282 dmae->comp_val = 1;
3283
3284 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3286 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3287 dmae->src_addr_hi = 0;
7a9b2557
VZ
3288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3289 DMAE_LEN32_RD_MAX * 4);
3290 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3291 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3292 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3293 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3294 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3295 dmae->comp_val = DMAE_COMP_VAL;
3296
3297 *stats_comp = 0;
3298 bnx2x_hw_stats_post(bp);
3299 bnx2x_stats_comp(bp);
3300}
3301
3302static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3303{
3304 struct dmae_command *dmae;
34f80b04 3305 int port = BP_PORT(bp);
bb2a0f7a 3306 int vn = BP_E1HVN(bp);
a2fbb9ea 3307 u32 opcode;
bb2a0f7a 3308 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3309 u32 mac_addr;
bb2a0f7a
YG
3310 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3311
3312 /* sanity */
3313 if (!bp->link_vars.link_up || !bp->port.pmf) {
3314 BNX2X_ERR("BUG!\n");
3315 return;
3316 }
a2fbb9ea
ET
3317
3318 bp->executer_idx = 0;
bb2a0f7a
YG
3319
3320 /* MCP */
3321 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3322 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3323 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3324#ifdef __BIG_ENDIAN
bb2a0f7a 3325 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3326#else
bb2a0f7a 3327 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3328#endif
bb2a0f7a
YG
3329 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3330 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3331
bb2a0f7a 3332 if (bp->port.port_stx) {
a2fbb9ea
ET
3333
3334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335 dmae->opcode = opcode;
bb2a0f7a
YG
3336 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3337 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3338 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3339 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3340 dmae->len = sizeof(struct host_port_stats) >> 2;
3341 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3342 dmae->comp_addr_hi = 0;
3343 dmae->comp_val = 1;
a2fbb9ea
ET
3344 }
3345
bb2a0f7a
YG
3346 if (bp->func_stx) {
3347
3348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3349 dmae->opcode = opcode;
3350 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3351 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3352 dmae->dst_addr_lo = bp->func_stx >> 2;
3353 dmae->dst_addr_hi = 0;
3354 dmae->len = sizeof(struct host_func_stats) >> 2;
3355 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3356 dmae->comp_addr_hi = 0;
3357 dmae->comp_val = 1;
a2fbb9ea
ET
3358 }
3359
bb2a0f7a 3360 /* MAC */
a2fbb9ea
ET
3361 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3362 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3363 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3364#ifdef __BIG_ENDIAN
3365 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3366#else
3367 DMAE_CMD_ENDIANITY_DW_SWAP |
3368#endif
bb2a0f7a
YG
3369 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3370 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3371
c18487ee 3372 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3373
3374 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3375 NIG_REG_INGRESS_BMAC0_MEM);
3376
3377 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3378 BIGMAC_REGISTER_TX_STAT_GTBYT */
3379 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3380 dmae->opcode = opcode;
3381 dmae->src_addr_lo = (mac_addr +
3382 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3383 dmae->src_addr_hi = 0;
3384 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3385 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3386 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3387 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3388 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3389 dmae->comp_addr_hi = 0;
3390 dmae->comp_val = 1;
3391
3392 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3393 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3394 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3395 dmae->opcode = opcode;
3396 dmae->src_addr_lo = (mac_addr +
3397 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3398 dmae->src_addr_hi = 0;
3399 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3400 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3401 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3402 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3403 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3404 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3405 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3406 dmae->comp_addr_hi = 0;
3407 dmae->comp_val = 1;
3408
c18487ee 3409 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3410
3411 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3412
3413 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3414 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3415 dmae->opcode = opcode;
3416 dmae->src_addr_lo = (mac_addr +
3417 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3418 dmae->src_addr_hi = 0;
3419 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3420 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3421 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3422 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3423 dmae->comp_addr_hi = 0;
3424 dmae->comp_val = 1;
3425
3426 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3427 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3428 dmae->opcode = opcode;
3429 dmae->src_addr_lo = (mac_addr +
3430 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3431 dmae->src_addr_hi = 0;
3432 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3433 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3434 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3435 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3436 dmae->len = 1;
3437 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3438 dmae->comp_addr_hi = 0;
3439 dmae->comp_val = 1;
3440
3441 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3442 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3443 dmae->opcode = opcode;
3444 dmae->src_addr_lo = (mac_addr +
3445 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3446 dmae->src_addr_hi = 0;
3447 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3448 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3449 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3450 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3451 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3452 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3453 dmae->comp_addr_hi = 0;
3454 dmae->comp_val = 1;
3455 }
3456
3457 /* NIG */
bb2a0f7a
YG
3458 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3459 dmae->opcode = opcode;
3460 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3461 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3462 dmae->src_addr_hi = 0;
3463 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3464 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3465 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3466 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3467 dmae->comp_addr_hi = 0;
3468 dmae->comp_val = 1;
3469
3470 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3471 dmae->opcode = opcode;
3472 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3473 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3474 dmae->src_addr_hi = 0;
3475 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3476 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3477 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3478 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3479 dmae->len = (2*sizeof(u32)) >> 2;
3480 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3481 dmae->comp_addr_hi = 0;
3482 dmae->comp_val = 1;
3483
a2fbb9ea
ET
3484 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3485 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3486 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3487 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3488#ifdef __BIG_ENDIAN
3489 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3490#else
3491 DMAE_CMD_ENDIANITY_DW_SWAP |
3492#endif
bb2a0f7a
YG
3493 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3494 (vn << DMAE_CMD_E1HVN_SHIFT));
3495 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3496 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3497 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3498 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3499 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3500 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3501 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3502 dmae->len = (2*sizeof(u32)) >> 2;
3503 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3504 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3505 dmae->comp_val = DMAE_COMP_VAL;
3506
3507 *stats_comp = 0;
a2fbb9ea
ET
3508}
3509
bb2a0f7a 3510static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3511{
bb2a0f7a
YG
3512 struct dmae_command *dmae = &bp->stats_dmae;
3513 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3514
bb2a0f7a
YG
3515 /* sanity */
3516 if (!bp->func_stx) {
3517 BNX2X_ERR("BUG!\n");
3518 return;
3519 }
a2fbb9ea 3520
bb2a0f7a
YG
3521 bp->executer_idx = 0;
3522 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3523
bb2a0f7a
YG
3524 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3525 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3526 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3527#ifdef __BIG_ENDIAN
3528 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3529#else
3530 DMAE_CMD_ENDIANITY_DW_SWAP |
3531#endif
3532 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3533 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3534 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3535 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3536 dmae->dst_addr_lo = bp->func_stx >> 2;
3537 dmae->dst_addr_hi = 0;
3538 dmae->len = sizeof(struct host_func_stats) >> 2;
3539 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3540 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3541 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3542
bb2a0f7a
YG
3543 *stats_comp = 0;
3544}
a2fbb9ea 3545
bb2a0f7a
YG
3546static void bnx2x_stats_start(struct bnx2x *bp)
3547{
3548 if (bp->port.pmf)
3549 bnx2x_port_stats_init(bp);
3550
3551 else if (bp->func_stx)
3552 bnx2x_func_stats_init(bp);
3553
3554 bnx2x_hw_stats_post(bp);
3555 bnx2x_storm_stats_post(bp);
3556}
3557
3558static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3559{
3560 bnx2x_stats_comp(bp);
3561 bnx2x_stats_pmf_update(bp);
3562 bnx2x_stats_start(bp);
3563}
3564
3565static void bnx2x_stats_restart(struct bnx2x *bp)
3566{
3567 bnx2x_stats_comp(bp);
3568 bnx2x_stats_start(bp);
3569}
3570
3571static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3572{
3573 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3574 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3575 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3576 struct {
3577 u32 lo;
3578 u32 hi;
3579 } diff;
bb2a0f7a
YG
3580
3581 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3582 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3583 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3584 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3585 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3586 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3587 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3588 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3589 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3590 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3591 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3592 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3593 UPDATE_STAT64(tx_stat_gt127,
3594 tx_stat_etherstatspkts65octetsto127octets);
3595 UPDATE_STAT64(tx_stat_gt255,
3596 tx_stat_etherstatspkts128octetsto255octets);
3597 UPDATE_STAT64(tx_stat_gt511,
3598 tx_stat_etherstatspkts256octetsto511octets);
3599 UPDATE_STAT64(tx_stat_gt1023,
3600 tx_stat_etherstatspkts512octetsto1023octets);
3601 UPDATE_STAT64(tx_stat_gt1518,
3602 tx_stat_etherstatspkts1024octetsto1522octets);
3603 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3604 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3605 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3606 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3607 UPDATE_STAT64(tx_stat_gterr,
3608 tx_stat_dot3statsinternalmactransmiterrors);
3609 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3610
3611 estats->pause_frames_received_hi =
3612 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3613 estats->pause_frames_received_lo =
3614 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3615
3616 estats->pause_frames_sent_hi =
3617 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3618 estats->pause_frames_sent_lo =
3619 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3620}
3621
3622static void bnx2x_emac_stats_update(struct bnx2x *bp)
3623{
3624 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3625 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3626 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3627
3628 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3629 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3630 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3631 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3632 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3633 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3634 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3635 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3636 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3637 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3638 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3639 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3640 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3641 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3642 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3643 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3644 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3645 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3646 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3647 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3648 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3649 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3650 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3651 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3652 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3653 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3654 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3655 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3656 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3657 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3658 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3659
3660 estats->pause_frames_received_hi =
3661 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3662 estats->pause_frames_received_lo =
3663 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3664 ADD_64(estats->pause_frames_received_hi,
3665 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3666 estats->pause_frames_received_lo,
3667 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3668
3669 estats->pause_frames_sent_hi =
3670 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3671 estats->pause_frames_sent_lo =
3672 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3673 ADD_64(estats->pause_frames_sent_hi,
3674 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3675 estats->pause_frames_sent_lo,
3676 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3677}
3678
3679static int bnx2x_hw_stats_update(struct bnx2x *bp)
3680{
3681 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3682 struct nig_stats *old = &(bp->port.old_nig_stats);
3683 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3684 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3685 struct {
3686 u32 lo;
3687 u32 hi;
3688 } diff;
de832a55 3689 u32 nig_timer_max;
bb2a0f7a
YG
3690
3691 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3692 bnx2x_bmac_stats_update(bp);
3693
3694 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3695 bnx2x_emac_stats_update(bp);
3696
3697 else { /* unreached */
c3eefaf6 3698 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3699 return -1;
3700 }
a2fbb9ea 3701
bb2a0f7a
YG
3702 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3703 new->brb_discard - old->brb_discard);
66e855f3
YG
3704 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3705 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3706
bb2a0f7a
YG
3707 UPDATE_STAT64_NIG(egress_mac_pkt0,
3708 etherstatspkts1024octetsto1522octets);
3709 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3710
bb2a0f7a 3711 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3712
bb2a0f7a
YG
3713 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3714 sizeof(struct mac_stx));
3715 estats->brb_drop_hi = pstats->brb_drop_hi;
3716 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3717
bb2a0f7a 3718 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3719
de832a55
EG
3720 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3721 if (nig_timer_max != estats->nig_timer_max) {
3722 estats->nig_timer_max = nig_timer_max;
3723 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3724 }
3725
bb2a0f7a 3726 return 0;
a2fbb9ea
ET
3727}
3728
bb2a0f7a 3729static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3730{
3731 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3732 struct tstorm_per_port_stats *tport =
de832a55 3733 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3734 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3735 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3736 int i;
3737
3738 memset(&(fstats->total_bytes_received_hi), 0,
3739 sizeof(struct host_func_stats) - 2*sizeof(u32));
3740 estats->error_bytes_received_hi = 0;
3741 estats->error_bytes_received_lo = 0;
3742 estats->etherstatsoverrsizepkts_hi = 0;
3743 estats->etherstatsoverrsizepkts_lo = 0;
3744 estats->no_buff_discard_hi = 0;
3745 estats->no_buff_discard_lo = 0;
a2fbb9ea 3746
de832a55
EG
3747 for_each_queue(bp, i) {
3748 struct bnx2x_fastpath *fp = &bp->fp[i];
3749 int cl_id = fp->cl_id;
3750 struct tstorm_per_client_stats *tclient =
3751 &stats->tstorm_common.client_statistics[cl_id];
3752 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3753 struct ustorm_per_client_stats *uclient =
3754 &stats->ustorm_common.client_statistics[cl_id];
3755 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3756 struct xstorm_per_client_stats *xclient =
3757 &stats->xstorm_common.client_statistics[cl_id];
3758 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3759 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3760 u32 diff;
3761
3762 /* are storm stats valid? */
3763 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3764 bp->stats_counter) {
de832a55
EG
3765 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3766 " xstorm counter (%d) != stats_counter (%d)\n",
3767 i, xclient->stats_counter, bp->stats_counter);
3768 return -1;
3769 }
3770 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3771 bp->stats_counter) {
de832a55
EG
3772 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3773 " tstorm counter (%d) != stats_counter (%d)\n",
3774 i, tclient->stats_counter, bp->stats_counter);
3775 return -2;
3776 }
3777 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3778 bp->stats_counter) {
3779 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3780 " ustorm counter (%d) != stats_counter (%d)\n",
3781 i, uclient->stats_counter, bp->stats_counter);
3782 return -4;
3783 }
a2fbb9ea 3784
de832a55
EG
3785 qstats->total_bytes_received_hi =
3786 qstats->valid_bytes_received_hi =
a2fbb9ea 3787 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3788 qstats->total_bytes_received_lo =
3789 qstats->valid_bytes_received_lo =
a2fbb9ea 3790 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3791
de832a55 3792 qstats->error_bytes_received_hi =
bb2a0f7a 3793 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3794 qstats->error_bytes_received_lo =
bb2a0f7a 3795 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3796
de832a55
EG
3797 ADD_64(qstats->total_bytes_received_hi,
3798 qstats->error_bytes_received_hi,
3799 qstats->total_bytes_received_lo,
3800 qstats->error_bytes_received_lo);
3801
3802 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3803 total_unicast_packets_received);
3804 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3805 total_multicast_packets_received);
3806 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3807 total_broadcast_packets_received);
3808 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3809 etherstatsoverrsizepkts);
3810 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3811
3812 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3813 total_unicast_packets_received);
3814 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3815 total_multicast_packets_received);
3816 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3817 total_broadcast_packets_received);
3818 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3819 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3820 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3821
3822 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3823 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3824 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3825 le32_to_cpu(xclient->total_sent_bytes.lo);
3826
de832a55
EG
3827 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3828 total_unicast_packets_transmitted);
3829 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3830 total_multicast_packets_transmitted);
3831 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3832 total_broadcast_packets_transmitted);
3833
3834 old_tclient->checksum_discard = tclient->checksum_discard;
3835 old_tclient->ttl0_discard = tclient->ttl0_discard;
3836
3837 ADD_64(fstats->total_bytes_received_hi,
3838 qstats->total_bytes_received_hi,
3839 fstats->total_bytes_received_lo,
3840 qstats->total_bytes_received_lo);
3841 ADD_64(fstats->total_bytes_transmitted_hi,
3842 qstats->total_bytes_transmitted_hi,
3843 fstats->total_bytes_transmitted_lo,
3844 qstats->total_bytes_transmitted_lo);
3845 ADD_64(fstats->total_unicast_packets_received_hi,
3846 qstats->total_unicast_packets_received_hi,
3847 fstats->total_unicast_packets_received_lo,
3848 qstats->total_unicast_packets_received_lo);
3849 ADD_64(fstats->total_multicast_packets_received_hi,
3850 qstats->total_multicast_packets_received_hi,
3851 fstats->total_multicast_packets_received_lo,
3852 qstats->total_multicast_packets_received_lo);
3853 ADD_64(fstats->total_broadcast_packets_received_hi,
3854 qstats->total_broadcast_packets_received_hi,
3855 fstats->total_broadcast_packets_received_lo,
3856 qstats->total_broadcast_packets_received_lo);
3857 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3858 qstats->total_unicast_packets_transmitted_hi,
3859 fstats->total_unicast_packets_transmitted_lo,
3860 qstats->total_unicast_packets_transmitted_lo);
3861 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3862 qstats->total_multicast_packets_transmitted_hi,
3863 fstats->total_multicast_packets_transmitted_lo,
3864 qstats->total_multicast_packets_transmitted_lo);
3865 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3866 qstats->total_broadcast_packets_transmitted_hi,
3867 fstats->total_broadcast_packets_transmitted_lo,
3868 qstats->total_broadcast_packets_transmitted_lo);
3869 ADD_64(fstats->valid_bytes_received_hi,
3870 qstats->valid_bytes_received_hi,
3871 fstats->valid_bytes_received_lo,
3872 qstats->valid_bytes_received_lo);
3873
3874 ADD_64(estats->error_bytes_received_hi,
3875 qstats->error_bytes_received_hi,
3876 estats->error_bytes_received_lo,
3877 qstats->error_bytes_received_lo);
3878 ADD_64(estats->etherstatsoverrsizepkts_hi,
3879 qstats->etherstatsoverrsizepkts_hi,
3880 estats->etherstatsoverrsizepkts_lo,
3881 qstats->etherstatsoverrsizepkts_lo);
3882 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3883 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3884 }
3885
3886 ADD_64(fstats->total_bytes_received_hi,
3887 estats->rx_stat_ifhcinbadoctets_hi,
3888 fstats->total_bytes_received_lo,
3889 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3890
3891 memcpy(estats, &(fstats->total_bytes_received_hi),
3892 sizeof(struct host_func_stats) - 2*sizeof(u32));
3893
de832a55
EG
3894 ADD_64(estats->etherstatsoverrsizepkts_hi,
3895 estats->rx_stat_dot3statsframestoolong_hi,
3896 estats->etherstatsoverrsizepkts_lo,
3897 estats->rx_stat_dot3statsframestoolong_lo);
3898 ADD_64(estats->error_bytes_received_hi,
3899 estats->rx_stat_ifhcinbadoctets_hi,
3900 estats->error_bytes_received_lo,
3901 estats->rx_stat_ifhcinbadoctets_lo);
3902
3903 if (bp->port.pmf) {
3904 estats->mac_filter_discard =
3905 le32_to_cpu(tport->mac_filter_discard);
3906 estats->xxoverflow_discard =
3907 le32_to_cpu(tport->xxoverflow_discard);
3908 estats->brb_truncate_discard =
bb2a0f7a 3909 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3910 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3911 }
bb2a0f7a
YG
3912
3913 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3914
de832a55
EG
3915 bp->stats_pending = 0;
3916
a2fbb9ea
ET
3917 return 0;
3918}
3919
bb2a0f7a 3920static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3921{
bb2a0f7a 3922 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3923 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3924 int i;
a2fbb9ea
ET
3925
3926 nstats->rx_packets =
3927 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3928 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3929 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3930
3931 nstats->tx_packets =
3932 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3933 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3934 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3935
de832a55 3936 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3937
0e39e645 3938 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3939
de832a55
EG
3940 nstats->rx_dropped = estats->mac_discard;
3941 for_each_queue(bp, i)
3942 nstats->rx_dropped +=
3943 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3944
a2fbb9ea
ET
3945 nstats->tx_dropped = 0;
3946
3947 nstats->multicast =
de832a55 3948 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3949
bb2a0f7a 3950 nstats->collisions =
de832a55 3951 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3952
3953 nstats->rx_length_errors =
de832a55
EG
3954 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3955 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3956 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3957 bnx2x_hilo(&estats->brb_truncate_hi);
3958 nstats->rx_crc_errors =
3959 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3960 nstats->rx_frame_errors =
3961 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3962 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3963 nstats->rx_missed_errors = estats->xxoverflow_discard;
3964
3965 nstats->rx_errors = nstats->rx_length_errors +
3966 nstats->rx_over_errors +
3967 nstats->rx_crc_errors +
3968 nstats->rx_frame_errors +
0e39e645
ET
3969 nstats->rx_fifo_errors +
3970 nstats->rx_missed_errors;
a2fbb9ea 3971
bb2a0f7a 3972 nstats->tx_aborted_errors =
de832a55
EG
3973 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3974 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3975 nstats->tx_carrier_errors =
3976 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3977 nstats->tx_fifo_errors = 0;
3978 nstats->tx_heartbeat_errors = 0;
3979 nstats->tx_window_errors = 0;
3980
3981 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3982 nstats->tx_carrier_errors +
3983 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3984}
3985
3986static void bnx2x_drv_stats_update(struct bnx2x *bp)
3987{
3988 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3989 int i;
3990
3991 estats->driver_xoff = 0;
3992 estats->rx_err_discard_pkt = 0;
3993 estats->rx_skb_alloc_failed = 0;
3994 estats->hw_csum_err = 0;
3995 for_each_queue(bp, i) {
3996 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3997
3998 estats->driver_xoff += qstats->driver_xoff;
3999 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4000 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4001 estats->hw_csum_err += qstats->hw_csum_err;
4002 }
a2fbb9ea
ET
4003}
4004
bb2a0f7a 4005static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4006{
bb2a0f7a 4007 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4008
bb2a0f7a
YG
4009 if (*stats_comp != DMAE_COMP_VAL)
4010 return;
4011
4012 if (bp->port.pmf)
de832a55 4013 bnx2x_hw_stats_update(bp);
a2fbb9ea 4014
de832a55
EG
4015 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4016 BNX2X_ERR("storm stats were not updated for 3 times\n");
4017 bnx2x_panic();
4018 return;
a2fbb9ea
ET
4019 }
4020
de832a55
EG
4021 bnx2x_net_stats_update(bp);
4022 bnx2x_drv_stats_update(bp);
4023
a2fbb9ea 4024 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
4025 struct tstorm_per_client_stats *old_tclient =
4026 &bp->fp->old_tclient;
4027 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4028 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4029 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4030 int i;
a2fbb9ea
ET
4031
4032 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4033 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4034 " tx pkt (%lx)\n",
4035 bnx2x_tx_avail(bp->fp),
7a9b2557 4036 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4037 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4038 " rx pkt (%lx)\n",
7a9b2557
VZ
4039 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4040 bp->fp->rx_comp_cons),
4041 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4042 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4043 "brb truncate %u\n",
4044 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4045 qstats->driver_xoff,
4046 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4047 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4048 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4049 "mac_discard %u mac_filter_discard %u "
4050 "xxovrflow_discard %u brb_truncate_discard %u "
4051 "ttl0_discard %u\n",
4781bfad 4052 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4053 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4054 bnx2x_hilo(&qstats->no_buff_discard_hi),
4055 estats->mac_discard, estats->mac_filter_discard,
4056 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4057 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4058
4059 for_each_queue(bp, i) {
4060 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4061 bnx2x_fp(bp, i, tx_pkt),
4062 bnx2x_fp(bp, i, rx_pkt),
4063 bnx2x_fp(bp, i, rx_calls));
4064 }
4065 }
4066
bb2a0f7a
YG
4067 bnx2x_hw_stats_post(bp);
4068 bnx2x_storm_stats_post(bp);
4069}
a2fbb9ea 4070
bb2a0f7a
YG
4071static void bnx2x_port_stats_stop(struct bnx2x *bp)
4072{
4073 struct dmae_command *dmae;
4074 u32 opcode;
4075 int loader_idx = PMF_DMAE_C(bp);
4076 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4077
bb2a0f7a 4078 bp->executer_idx = 0;
a2fbb9ea 4079
bb2a0f7a
YG
4080 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4081 DMAE_CMD_C_ENABLE |
4082 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4083#ifdef __BIG_ENDIAN
bb2a0f7a 4084 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4085#else
bb2a0f7a 4086 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4087#endif
bb2a0f7a
YG
4088 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4089 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4090
4091 if (bp->port.port_stx) {
4092
4093 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094 if (bp->func_stx)
4095 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4096 else
4097 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4098 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4099 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4100 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4101 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4102 dmae->len = sizeof(struct host_port_stats) >> 2;
4103 if (bp->func_stx) {
4104 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4105 dmae->comp_addr_hi = 0;
4106 dmae->comp_val = 1;
4107 } else {
4108 dmae->comp_addr_lo =
4109 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4110 dmae->comp_addr_hi =
4111 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4112 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4113
bb2a0f7a
YG
4114 *stats_comp = 0;
4115 }
a2fbb9ea
ET
4116 }
4117
bb2a0f7a
YG
4118 if (bp->func_stx) {
4119
4120 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4121 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4122 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4123 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4124 dmae->dst_addr_lo = bp->func_stx >> 2;
4125 dmae->dst_addr_hi = 0;
4126 dmae->len = sizeof(struct host_func_stats) >> 2;
4127 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4128 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4129 dmae->comp_val = DMAE_COMP_VAL;
4130
4131 *stats_comp = 0;
a2fbb9ea 4132 }
bb2a0f7a
YG
4133}
4134
4135static void bnx2x_stats_stop(struct bnx2x *bp)
4136{
4137 int update = 0;
4138
4139 bnx2x_stats_comp(bp);
4140
4141 if (bp->port.pmf)
4142 update = (bnx2x_hw_stats_update(bp) == 0);
4143
4144 update |= (bnx2x_storm_stats_update(bp) == 0);
4145
4146 if (update) {
4147 bnx2x_net_stats_update(bp);
a2fbb9ea 4148
bb2a0f7a
YG
4149 if (bp->port.pmf)
4150 bnx2x_port_stats_stop(bp);
4151
4152 bnx2x_hw_stats_post(bp);
4153 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4154 }
4155}
4156
bb2a0f7a
YG
4157static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4158{
4159}
4160
4161static const struct {
4162 void (*action)(struct bnx2x *bp);
4163 enum bnx2x_stats_state next_state;
4164} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4165/* state event */
4166{
4167/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4168/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4169/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4170/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4171},
4172{
4173/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4174/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4175/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4176/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4177}
4178};
4179
4180static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4181{
4182 enum bnx2x_stats_state state = bp->stats_state;
4183
4184 bnx2x_stats_stm[state][event].action(bp);
4185 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4186
4187 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4188 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4189 state, event, bp->stats_state);
4190}
4191
a2fbb9ea
ET
4192static void bnx2x_timer(unsigned long data)
4193{
4194 struct bnx2x *bp = (struct bnx2x *) data;
4195
4196 if (!netif_running(bp->dev))
4197 return;
4198
4199 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4200 goto timer_restart;
a2fbb9ea
ET
4201
4202 if (poll) {
4203 struct bnx2x_fastpath *fp = &bp->fp[0];
4204 int rc;
4205
7961f791 4206 bnx2x_tx_int(fp);
a2fbb9ea
ET
4207 rc = bnx2x_rx_int(fp, 1000);
4208 }
4209
34f80b04
EG
4210 if (!BP_NOMCP(bp)) {
4211 int func = BP_FUNC(bp);
a2fbb9ea
ET
4212 u32 drv_pulse;
4213 u32 mcp_pulse;
4214
4215 ++bp->fw_drv_pulse_wr_seq;
4216 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4217 /* TBD - add SYSTEM_TIME */
4218 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4219 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4220
34f80b04 4221 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4222 MCP_PULSE_SEQ_MASK);
4223 /* The delta between driver pulse and mcp response
4224 * should be 1 (before mcp response) or 0 (after mcp response)
4225 */
4226 if ((drv_pulse != mcp_pulse) &&
4227 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4228 /* someone lost a heartbeat... */
4229 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4230 drv_pulse, mcp_pulse);
4231 }
4232 }
4233
bb2a0f7a
YG
4234 if ((bp->state == BNX2X_STATE_OPEN) ||
4235 (bp->state == BNX2X_STATE_DISABLED))
4236 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4237
f1410647 4238timer_restart:
a2fbb9ea
ET
4239 mod_timer(&bp->timer, jiffies + bp->current_interval);
4240}
4241
4242/* end of Statistics */
4243
4244/* nic init */
4245
4246/*
4247 * nic init service functions
4248 */
4249
34f80b04 4250static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4251{
34f80b04
EG
4252 int port = BP_PORT(bp);
4253
490c3c9b 4254 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04 4255 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4256 sizeof(struct ustorm_status_block)/4);
490c3c9b 4257 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04 4258 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4259 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4260}
4261
5c862848
EG
4262static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4263 dma_addr_t mapping, int sb_id)
34f80b04
EG
4264{
4265 int port = BP_PORT(bp);
bb2a0f7a 4266 int func = BP_FUNC(bp);
a2fbb9ea 4267 int index;
34f80b04 4268 u64 section;
a2fbb9ea
ET
4269
4270 /* USTORM */
4271 section = ((u64)mapping) + offsetof(struct host_status_block,
4272 u_status_block);
34f80b04 4273 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4274
4275 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4276 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4277 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4278 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4279 U64_HI(section));
bb2a0f7a
YG
4280 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4281 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4282
4283 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4284 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4285 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4286
4287 /* CSTORM */
4288 section = ((u64)mapping) + offsetof(struct host_status_block,
4289 c_status_block);
34f80b04 4290 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4291
4292 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4293 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4294 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4295 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4296 U64_HI(section));
7a9b2557
VZ
4297 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4298 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4299
4300 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4301 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4302 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4303
4304 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4305}
4306
4307static void bnx2x_zero_def_sb(struct bnx2x *bp)
4308{
4309 int func = BP_FUNC(bp);
a2fbb9ea 4310
490c3c9b
EG
4311 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4312 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4313 sizeof(struct tstorm_def_status_block)/4);
4314 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04
EG
4315 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4316 sizeof(struct ustorm_def_status_block)/4);
490c3c9b 4317 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04
EG
4318 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4319 sizeof(struct cstorm_def_status_block)/4);
490c3c9b 4320 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
34f80b04
EG
4321 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4322 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4323}
4324
4325static void bnx2x_init_def_sb(struct bnx2x *bp,
4326 struct host_def_status_block *def_sb,
34f80b04 4327 dma_addr_t mapping, int sb_id)
a2fbb9ea 4328{
34f80b04
EG
4329 int port = BP_PORT(bp);
4330 int func = BP_FUNC(bp);
a2fbb9ea
ET
4331 int index, val, reg_offset;
4332 u64 section;
4333
4334 /* ATTN */
4335 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4336 atten_status_block);
34f80b04 4337 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4338
49d66772
ET
4339 bp->attn_state = 0;
4340
a2fbb9ea
ET
4341 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4342 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4343
34f80b04 4344 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4345 bp->attn_group[index].sig[0] = REG_RD(bp,
4346 reg_offset + 0x10*index);
4347 bp->attn_group[index].sig[1] = REG_RD(bp,
4348 reg_offset + 0x4 + 0x10*index);
4349 bp->attn_group[index].sig[2] = REG_RD(bp,
4350 reg_offset + 0x8 + 0x10*index);
4351 bp->attn_group[index].sig[3] = REG_RD(bp,
4352 reg_offset + 0xc + 0x10*index);
4353 }
4354
a2fbb9ea
ET
4355 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4356 HC_REG_ATTN_MSG0_ADDR_L);
4357
4358 REG_WR(bp, reg_offset, U64_LO(section));
4359 REG_WR(bp, reg_offset + 4, U64_HI(section));
4360
4361 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4362
4363 val = REG_RD(bp, reg_offset);
34f80b04 4364 val |= sb_id;
a2fbb9ea
ET
4365 REG_WR(bp, reg_offset, val);
4366
4367 /* USTORM */
4368 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4369 u_def_status_block);
34f80b04 4370 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4371
4372 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4373 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4374 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4375 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4376 U64_HI(section));
5c862848 4377 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4378 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4379
4380 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4381 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4382 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4383
4384 /* CSTORM */
4385 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4386 c_def_status_block);
34f80b04 4387 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4388
4389 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4390 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4391 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4392 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4393 U64_HI(section));
5c862848 4394 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4395 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4396
4397 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4398 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4399 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4400
4401 /* TSTORM */
4402 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4403 t_def_status_block);
34f80b04 4404 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4405
4406 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4407 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4408 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4409 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4410 U64_HI(section));
5c862848 4411 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4412 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4413
4414 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4415 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4416 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4417
4418 /* XSTORM */
4419 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4420 x_def_status_block);
34f80b04 4421 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4422
4423 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4424 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4425 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4426 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4427 U64_HI(section));
5c862848 4428 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4429 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4430
4431 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4432 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4433 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4434
bb2a0f7a 4435 bp->stats_pending = 0;
66e855f3 4436 bp->set_mac_pending = 0;
bb2a0f7a 4437
34f80b04 4438 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4439}
4440
4441static void bnx2x_update_coalesce(struct bnx2x *bp)
4442{
34f80b04 4443 int port = BP_PORT(bp);
a2fbb9ea
ET
4444 int i;
4445
4446 for_each_queue(bp, i) {
34f80b04 4447 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4448
4449 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4450 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4451 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4452 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4453 bp->rx_ticks/12);
a2fbb9ea 4454 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4455 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4456 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4457 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4458
4459 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4460 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4461 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4462 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4463 bp->tx_ticks/12);
a2fbb9ea 4464 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4465 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4466 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4467 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4468 }
4469}
4470
7a9b2557
VZ
4471static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4472 struct bnx2x_fastpath *fp, int last)
4473{
4474 int i;
4475
4476 for (i = 0; i < last; i++) {
4477 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4478 struct sk_buff *skb = rx_buf->skb;
4479
4480 if (skb == NULL) {
4481 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4482 continue;
4483 }
4484
4485 if (fp->tpa_state[i] == BNX2X_TPA_START)
4486 pci_unmap_single(bp->pdev,
4487 pci_unmap_addr(rx_buf, mapping),
356e2385 4488 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4489
4490 dev_kfree_skb(skb);
4491 rx_buf->skb = NULL;
4492 }
4493}
4494
a2fbb9ea
ET
4495static void bnx2x_init_rx_rings(struct bnx2x *bp)
4496{
7a9b2557 4497 int func = BP_FUNC(bp);
32626230
EG
4498 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4499 ETH_MAX_AGGREGATION_QUEUES_E1H;
4500 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4501 int i, j;
a2fbb9ea 4502
87942b46 4503 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4504 DP(NETIF_MSG_IFUP,
4505 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4506
7a9b2557 4507 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4508
555f6c78 4509 for_each_rx_queue(bp, j) {
32626230 4510 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4511
32626230 4512 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4513 fp->tpa_pool[i].skb =
4514 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4515 if (!fp->tpa_pool[i].skb) {
4516 BNX2X_ERR("Failed to allocate TPA "
4517 "skb pool for queue[%d] - "
4518 "disabling TPA on this "
4519 "queue!\n", j);
4520 bnx2x_free_tpa_pool(bp, fp, i);
4521 fp->disable_tpa = 1;
4522 break;
4523 }
4524 pci_unmap_addr_set((struct sw_rx_bd *)
4525 &bp->fp->tpa_pool[i],
4526 mapping, 0);
4527 fp->tpa_state[i] = BNX2X_TPA_STOP;
4528 }
4529 }
4530 }
4531
555f6c78 4532 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4533 struct bnx2x_fastpath *fp = &bp->fp[j];
4534
4535 fp->rx_bd_cons = 0;
4536 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4537 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4538
4539 /* "next page" elements initialization */
4540 /* SGE ring */
4541 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4542 struct eth_rx_sge *sge;
4543
4544 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4545 sge->addr_hi =
4546 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4547 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4548 sge->addr_lo =
4549 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4550 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4551 }
4552
4553 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4554
7a9b2557 4555 /* RX BD ring */
a2fbb9ea
ET
4556 for (i = 1; i <= NUM_RX_RINGS; i++) {
4557 struct eth_rx_bd *rx_bd;
4558
4559 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4560 rx_bd->addr_hi =
4561 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4562 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4563 rx_bd->addr_lo =
4564 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4565 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4566 }
4567
34f80b04 4568 /* CQ ring */
a2fbb9ea
ET
4569 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4570 struct eth_rx_cqe_next_page *nextpg;
4571
4572 nextpg = (struct eth_rx_cqe_next_page *)
4573 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4574 nextpg->addr_hi =
4575 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4576 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4577 nextpg->addr_lo =
4578 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4579 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4580 }
4581
7a9b2557
VZ
4582 /* Allocate SGEs and initialize the ring elements */
4583 for (i = 0, ring_prod = 0;
4584 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4585
7a9b2557
VZ
4586 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4587 BNX2X_ERR("was only able to allocate "
4588 "%d rx sges\n", i);
4589 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4590 /* Cleanup already allocated elements */
4591 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4592 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4593 fp->disable_tpa = 1;
4594 ring_prod = 0;
4595 break;
4596 }
4597 ring_prod = NEXT_SGE_IDX(ring_prod);
4598 }
4599 fp->rx_sge_prod = ring_prod;
4600
4601 /* Allocate BDs and initialize BD ring */
66e855f3 4602 fp->rx_comp_cons = 0;
7a9b2557 4603 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4604 for (i = 0; i < bp->rx_ring_size; i++) {
4605 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4606 BNX2X_ERR("was only able to allocate "
de832a55
EG
4607 "%d rx skbs on queue[%d]\n", i, j);
4608 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4609 break;
4610 }
4611 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4612 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4613 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4614 }
4615
7a9b2557
VZ
4616 fp->rx_bd_prod = ring_prod;
4617 /* must not have more available CQEs than BDs */
4618 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4619 cqe_ring_prod);
a2fbb9ea
ET
4620 fp->rx_pkt = fp->rx_calls = 0;
4621
7a9b2557
VZ
4622 /* Warning!
4623 * this will generate an interrupt (to the TSTORM)
4624 * must only be done after chip is initialized
4625 */
4626 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4627 fp->rx_sge_prod);
a2fbb9ea
ET
4628 if (j != 0)
4629 continue;
4630
4631 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4632 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4633 U64_LO(fp->rx_comp_mapping));
4634 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4635 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4636 U64_HI(fp->rx_comp_mapping));
4637 }
4638}
4639
4640static void bnx2x_init_tx_ring(struct bnx2x *bp)
4641{
4642 int i, j;
4643
555f6c78 4644 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4645 struct bnx2x_fastpath *fp = &bp->fp[j];
4646
4647 for (i = 1; i <= NUM_TX_RINGS; i++) {
4648 struct eth_tx_bd *tx_bd =
4649 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4650
4651 tx_bd->addr_hi =
4652 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4653 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4654 tx_bd->addr_lo =
4655 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4656 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4657 }
4658
4659 fp->tx_pkt_prod = 0;
4660 fp->tx_pkt_cons = 0;
4661 fp->tx_bd_prod = 0;
4662 fp->tx_bd_cons = 0;
4663 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4664 fp->tx_pkt = 0;
4665 }
4666}
4667
4668static void bnx2x_init_sp_ring(struct bnx2x *bp)
4669{
34f80b04 4670 int func = BP_FUNC(bp);
a2fbb9ea
ET
4671
4672 spin_lock_init(&bp->spq_lock);
4673
4674 bp->spq_left = MAX_SPQ_PENDING;
4675 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4676 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4677 bp->spq_prod_bd = bp->spq;
4678 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4679
34f80b04 4680 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4681 U64_LO(bp->spq_mapping));
34f80b04
EG
4682 REG_WR(bp,
4683 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4684 U64_HI(bp->spq_mapping));
4685
34f80b04 4686 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4687 bp->spq_prod_idx);
4688}
4689
4690static void bnx2x_init_context(struct bnx2x *bp)
4691{
4692 int i;
4693
4694 for_each_queue(bp, i) {
4695 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4696 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4697 u8 cl_id = fp->cl_id;
0626b899 4698 u8 sb_id = fp->sb_id;
a2fbb9ea 4699
34f80b04
EG
4700 context->ustorm_st_context.common.sb_index_numbers =
4701 BNX2X_RX_SB_INDEX_NUM;
0626b899 4702 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4703 context->ustorm_st_context.common.status_block_id = sb_id;
4704 context->ustorm_st_context.common.flags =
de832a55
EG
4705 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4706 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4707 context->ustorm_st_context.common.statistics_counter_id =
4708 cl_id;
8d9c5f34 4709 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4710 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4711 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4712 bp->rx_buf_size;
34f80b04 4713 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4714 U64_HI(fp->rx_desc_mapping);
34f80b04 4715 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4716 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4717 if (!fp->disable_tpa) {
4718 context->ustorm_st_context.common.flags |=
4719 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4720 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4721 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4722 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4723 (u32)0xffff);
7a9b2557
VZ
4724 context->ustorm_st_context.common.sge_page_base_hi =
4725 U64_HI(fp->rx_sge_mapping);
4726 context->ustorm_st_context.common.sge_page_base_lo =
4727 U64_LO(fp->rx_sge_mapping);
4728 }
4729
8d9c5f34
EG
4730 context->ustorm_ag_context.cdu_usage =
4731 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4732 CDU_REGION_NUMBER_UCM_AG,
4733 ETH_CONNECTION_TYPE);
4734
4735 context->xstorm_st_context.tx_bd_page_base_hi =
4736 U64_HI(fp->tx_desc_mapping);
4737 context->xstorm_st_context.tx_bd_page_base_lo =
4738 U64_LO(fp->tx_desc_mapping);
4739 context->xstorm_st_context.db_data_addr_hi =
4740 U64_HI(fp->tx_prods_mapping);
4741 context->xstorm_st_context.db_data_addr_lo =
4742 U64_LO(fp->tx_prods_mapping);
0626b899 4743 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4744 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4745 context->cstorm_st_context.sb_index_number =
5c862848 4746 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4747 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4748
4749 context->xstorm_ag_context.cdu_reserved =
4750 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4751 CDU_REGION_NUMBER_XCM_AG,
4752 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4753 }
4754}
4755
4756static void bnx2x_init_ind_table(struct bnx2x *bp)
4757{
26c8fa4d 4758 int func = BP_FUNC(bp);
a2fbb9ea
ET
4759 int i;
4760
555f6c78 4761 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4762 return;
4763
555f6c78
EG
4764 DP(NETIF_MSG_IFUP,
4765 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4766 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4767 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4768 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4769 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4770}
4771
49d66772
ET
4772static void bnx2x_set_client_config(struct bnx2x *bp)
4773{
49d66772 4774 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4775 int port = BP_PORT(bp);
4776 int i;
49d66772 4777
e7799c5f 4778 tstorm_client.mtu = bp->dev->mtu;
49d66772 4779 tstorm_client.config_flags =
de832a55
EG
4780 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4781 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4782#ifdef BCM_VLAN
0c6671b0 4783 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4784 tstorm_client.config_flags |=
8d9c5f34 4785 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4786 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4787 }
4788#endif
49d66772 4789
7a9b2557
VZ
4790 if (bp->flags & TPA_ENABLE_FLAG) {
4791 tstorm_client.max_sges_for_packet =
4f40f2cb 4792 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4793 tstorm_client.max_sges_for_packet =
4794 ((tstorm_client.max_sges_for_packet +
4795 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4796 PAGES_PER_SGE_SHIFT;
4797
4798 tstorm_client.config_flags |=
4799 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4800 }
4801
49d66772 4802 for_each_queue(bp, i) {
de832a55
EG
4803 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4804
49d66772 4805 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4806 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4807 ((u32 *)&tstorm_client)[0]);
4808 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4809 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4810 ((u32 *)&tstorm_client)[1]);
4811 }
4812
34f80b04
EG
4813 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4814 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4815}
4816
a2fbb9ea
ET
4817static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4818{
a2fbb9ea 4819 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4820 int mode = bp->rx_mode;
4821 int mask = (1 << BP_L_ID(bp));
4822 int func = BP_FUNC(bp);
a2fbb9ea
ET
4823 int i;
4824
3196a88a 4825 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4826
4827 switch (mode) {
4828 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4829 tstorm_mac_filter.ucast_drop_all = mask;
4830 tstorm_mac_filter.mcast_drop_all = mask;
4831 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4832 break;
356e2385 4833
a2fbb9ea 4834 case BNX2X_RX_MODE_NORMAL:
34f80b04 4835 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4836 break;
356e2385 4837
a2fbb9ea 4838 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4839 tstorm_mac_filter.mcast_accept_all = mask;
4840 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4841 break;
356e2385 4842
a2fbb9ea 4843 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4844 tstorm_mac_filter.ucast_accept_all = mask;
4845 tstorm_mac_filter.mcast_accept_all = mask;
4846 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4847 break;
356e2385 4848
a2fbb9ea 4849 default:
34f80b04
EG
4850 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4851 break;
a2fbb9ea
ET
4852 }
4853
4854 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4855 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4856 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4857 ((u32 *)&tstorm_mac_filter)[i]);
4858
34f80b04 4859/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4860 ((u32 *)&tstorm_mac_filter)[i]); */
4861 }
a2fbb9ea 4862
49d66772
ET
4863 if (mode != BNX2X_RX_MODE_NONE)
4864 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4865}
4866
471de716
EG
4867static void bnx2x_init_internal_common(struct bnx2x *bp)
4868{
4869 int i;
4870
3cdf1db7
YG
4871 if (bp->flags & TPA_ENABLE_FLAG) {
4872 struct tstorm_eth_tpa_exist tpa = {0};
4873
4874 tpa.tpa_exist = 1;
4875
4876 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4877 ((u32 *)&tpa)[0]);
4878 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4879 ((u32 *)&tpa)[1]);
4880 }
4881
471de716
EG
4882 /* Zero this manually as its initialization is
4883 currently missing in the initTool */
4884 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4885 REG_WR(bp, BAR_USTRORM_INTMEM +
4886 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4887}
4888
4889static void bnx2x_init_internal_port(struct bnx2x *bp)
4890{
4891 int port = BP_PORT(bp);
4892
4893 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4894 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4895 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4896 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4897}
4898
8a1c38d1
EG
4899/* Calculates the sum of vn_min_rates.
4900 It's needed for further normalizing of the min_rates.
4901 Returns:
4902 sum of vn_min_rates.
4903 or
4904 0 - if all the min_rates are 0.
4905 In the later case fainess algorithm should be deactivated.
4906 If not all min_rates are zero then those that are zeroes will be set to 1.
4907 */
4908static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4909{
4910 int all_zero = 1;
4911 int port = BP_PORT(bp);
4912 int vn;
4913
4914 bp->vn_weight_sum = 0;
4915 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4916 int func = 2*vn + port;
4917 u32 vn_cfg =
4918 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4919 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4920 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4921
4922 /* Skip hidden vns */
4923 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4924 continue;
4925
4926 /* If min rate is zero - set it to 1 */
4927 if (!vn_min_rate)
4928 vn_min_rate = DEF_MIN_RATE;
4929 else
4930 all_zero = 0;
4931
4932 bp->vn_weight_sum += vn_min_rate;
4933 }
4934
4935 /* ... only if all min rates are zeros - disable fairness */
4936 if (all_zero)
4937 bp->vn_weight_sum = 0;
4938}
4939
471de716 4940static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4941{
a2fbb9ea
ET
4942 struct tstorm_eth_function_common_config tstorm_config = {0};
4943 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4944 int port = BP_PORT(bp);
4945 int func = BP_FUNC(bp);
de832a55
EG
4946 int i, j;
4947 u32 offset;
471de716 4948 u16 max_agg_size;
a2fbb9ea
ET
4949
4950 if (is_multi(bp)) {
555f6c78 4951 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4952 tstorm_config.rss_result_mask = MULTI_MASK;
4953 }
8d9c5f34
EG
4954 if (IS_E1HMF(bp))
4955 tstorm_config.config_flags |=
4956 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4957
34f80b04
EG
4958 tstorm_config.leading_client_id = BP_L_ID(bp);
4959
a2fbb9ea 4960 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4961 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4962 (*(u32 *)&tstorm_config));
4963
c14423fe 4964 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4965 bnx2x_set_storm_rx_mode(bp);
4966
de832a55
EG
4967 for_each_queue(bp, i) {
4968 u8 cl_id = bp->fp[i].cl_id;
4969
4970 /* reset xstorm per client statistics */
4971 offset = BAR_XSTRORM_INTMEM +
4972 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4973 for (j = 0;
4974 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4975 REG_WR(bp, offset + j*4, 0);
4976
4977 /* reset tstorm per client statistics */
4978 offset = BAR_TSTRORM_INTMEM +
4979 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4980 for (j = 0;
4981 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4982 REG_WR(bp, offset + j*4, 0);
4983
4984 /* reset ustorm per client statistics */
4985 offset = BAR_USTRORM_INTMEM +
4986 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4987 for (j = 0;
4988 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4989 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4990 }
4991
4992 /* Init statistics related context */
34f80b04 4993 stats_flags.collect_eth = 1;
a2fbb9ea 4994
66e855f3 4995 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4996 ((u32 *)&stats_flags)[0]);
66e855f3 4997 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4998 ((u32 *)&stats_flags)[1]);
4999
66e855f3 5000 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5001 ((u32 *)&stats_flags)[0]);
66e855f3 5002 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5003 ((u32 *)&stats_flags)[1]);
5004
de832a55
EG
5005 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5006 ((u32 *)&stats_flags)[0]);
5007 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5008 ((u32 *)&stats_flags)[1]);
5009
66e855f3 5010 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5011 ((u32 *)&stats_flags)[0]);
66e855f3 5012 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5013 ((u32 *)&stats_flags)[1]);
5014
66e855f3
YG
5015 REG_WR(bp, BAR_XSTRORM_INTMEM +
5016 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5017 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5018 REG_WR(bp, BAR_XSTRORM_INTMEM +
5019 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5020 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5021
5022 REG_WR(bp, BAR_TSTRORM_INTMEM +
5023 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5024 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5025 REG_WR(bp, BAR_TSTRORM_INTMEM +
5026 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5027 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5028
de832a55
EG
5029 REG_WR(bp, BAR_USTRORM_INTMEM +
5030 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5031 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5032 REG_WR(bp, BAR_USTRORM_INTMEM +
5033 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5034 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5035
34f80b04
EG
5036 if (CHIP_IS_E1H(bp)) {
5037 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5038 IS_E1HMF(bp));
5039 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5040 IS_E1HMF(bp));
5041 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5042 IS_E1HMF(bp));
5043 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5044 IS_E1HMF(bp));
5045
7a9b2557
VZ
5046 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5047 bp->e1hov);
34f80b04
EG
5048 }
5049
4f40f2cb
EG
5050 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5051 max_agg_size =
5052 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5053 SGE_PAGE_SIZE * PAGES_PER_SGE),
5054 (u32)0xffff);
555f6c78 5055 for_each_rx_queue(bp, i) {
7a9b2557 5056 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5057
5058 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5059 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5060 U64_LO(fp->rx_comp_mapping));
5061 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5062 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5063 U64_HI(fp->rx_comp_mapping));
5064
7a9b2557 5065 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5066 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5067 max_agg_size);
5068 }
8a1c38d1 5069
1c06328c
EG
5070 /* dropless flow control */
5071 if (CHIP_IS_E1H(bp)) {
5072 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5073
5074 rx_pause.bd_thr_low = 250;
5075 rx_pause.cqe_thr_low = 250;
5076 rx_pause.cos = 1;
5077 rx_pause.sge_thr_low = 0;
5078 rx_pause.bd_thr_high = 350;
5079 rx_pause.cqe_thr_high = 350;
5080 rx_pause.sge_thr_high = 0;
5081
5082 for_each_rx_queue(bp, i) {
5083 struct bnx2x_fastpath *fp = &bp->fp[i];
5084
5085 if (!fp->disable_tpa) {
5086 rx_pause.sge_thr_low = 150;
5087 rx_pause.sge_thr_high = 250;
5088 }
5089
5090
5091 offset = BAR_USTRORM_INTMEM +
5092 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5093 fp->cl_id);
5094 for (j = 0;
5095 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5096 j++)
5097 REG_WR(bp, offset + j*4,
5098 ((u32 *)&rx_pause)[j]);
5099 }
5100 }
5101
8a1c38d1
EG
5102 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5103
5104 /* Init rate shaping and fairness contexts */
5105 if (IS_E1HMF(bp)) {
5106 int vn;
5107
5108 /* During init there is no active link
5109 Until link is up, set link rate to 10Gbps */
5110 bp->link_vars.line_speed = SPEED_10000;
5111 bnx2x_init_port_minmax(bp);
5112
5113 bnx2x_calc_vn_weight_sum(bp);
5114
5115 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5116 bnx2x_init_vn_minmax(bp, 2*vn + port);
5117
5118 /* Enable rate shaping and fairness */
5119 bp->cmng.flags.cmng_enables =
5120 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5121 if (bp->vn_weight_sum)
5122 bp->cmng.flags.cmng_enables |=
5123 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5124 else
5125 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5126 " fairness will be disabled\n");
5127 } else {
5128 /* rate shaping and fairness are disabled */
5129 DP(NETIF_MSG_IFUP,
5130 "single function mode minmax will be disabled\n");
5131 }
5132
5133
5134 /* Store it to internal memory */
5135 if (bp->port.pmf)
5136 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5137 REG_WR(bp, BAR_XSTRORM_INTMEM +
5138 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5139 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5140}
5141
471de716
EG
5142static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5143{
5144 switch (load_code) {
5145 case FW_MSG_CODE_DRV_LOAD_COMMON:
5146 bnx2x_init_internal_common(bp);
5147 /* no break */
5148
5149 case FW_MSG_CODE_DRV_LOAD_PORT:
5150 bnx2x_init_internal_port(bp);
5151 /* no break */
5152
5153 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5154 bnx2x_init_internal_func(bp);
5155 break;
5156
5157 default:
5158 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5159 break;
5160 }
5161}
5162
5163static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5164{
5165 int i;
5166
5167 for_each_queue(bp, i) {
5168 struct bnx2x_fastpath *fp = &bp->fp[i];
5169
34f80b04 5170 fp->bp = bp;
a2fbb9ea 5171 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5172 fp->index = i;
34f80b04
EG
5173 fp->cl_id = BP_L_ID(bp) + i;
5174 fp->sb_id = fp->cl_id;
5175 DP(NETIF_MSG_IFUP,
f5372251
EG
5176 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5177 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5178 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5179 fp->sb_id);
5c862848 5180 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5181 }
5182
16119785
EG
5183 /* ensure status block indices were read */
5184 rmb();
5185
5186
5c862848
EG
5187 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5188 DEF_SB_ID);
5189 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5190 bnx2x_update_coalesce(bp);
5191 bnx2x_init_rx_rings(bp);
5192 bnx2x_init_tx_ring(bp);
5193 bnx2x_init_sp_ring(bp);
5194 bnx2x_init_context(bp);
471de716 5195 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5196 bnx2x_init_ind_table(bp);
0ef00459
EG
5197 bnx2x_stats_init(bp);
5198
5199 /* At this point, we are ready for interrupts */
5200 atomic_set(&bp->intr_sem, 0);
5201
5202 /* flush all before enabling interrupts */
5203 mb();
5204 mmiowb();
5205
615f8fd9 5206 bnx2x_int_enable(bp);
eb8da205
EG
5207
5208 /* Check for SPIO5 */
5209 bnx2x_attn_int_deasserted0(bp,
5210 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5211 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5212}
5213
5214/* end of nic init */
5215
5216/*
5217 * gzip service functions
5218 */
5219
5220static int bnx2x_gunzip_init(struct bnx2x *bp)
5221{
5222 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5223 &bp->gunzip_mapping);
5224 if (bp->gunzip_buf == NULL)
5225 goto gunzip_nomem1;
5226
5227 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5228 if (bp->strm == NULL)
5229 goto gunzip_nomem2;
5230
5231 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5232 GFP_KERNEL);
5233 if (bp->strm->workspace == NULL)
5234 goto gunzip_nomem3;
5235
5236 return 0;
5237
5238gunzip_nomem3:
5239 kfree(bp->strm);
5240 bp->strm = NULL;
5241
5242gunzip_nomem2:
5243 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5244 bp->gunzip_mapping);
5245 bp->gunzip_buf = NULL;
5246
5247gunzip_nomem1:
5248 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5249 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5250 return -ENOMEM;
5251}
5252
5253static void bnx2x_gunzip_end(struct bnx2x *bp)
5254{
5255 kfree(bp->strm->workspace);
5256
5257 kfree(bp->strm);
5258 bp->strm = NULL;
5259
5260 if (bp->gunzip_buf) {
5261 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5262 bp->gunzip_mapping);
5263 bp->gunzip_buf = NULL;
5264 }
5265}
5266
94a78b79 5267static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5268{
5269 int n, rc;
5270
5271 /* check gzip header */
94a78b79
VZ
5272 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5273 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5274 return -EINVAL;
94a78b79 5275 }
a2fbb9ea
ET
5276
5277 n = 10;
5278
34f80b04 5279#define FNAME 0x8
a2fbb9ea
ET
5280
5281 if (zbuf[3] & FNAME)
5282 while ((zbuf[n++] != 0) && (n < len));
5283
94a78b79 5284 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5285 bp->strm->avail_in = len - n;
5286 bp->strm->next_out = bp->gunzip_buf;
5287 bp->strm->avail_out = FW_BUF_SIZE;
5288
5289 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5290 if (rc != Z_OK)
5291 return rc;
5292
5293 rc = zlib_inflate(bp->strm, Z_FINISH);
5294 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5295 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5296 bp->dev->name, bp->strm->msg);
5297
5298 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5299 if (bp->gunzip_outlen & 0x3)
5300 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5301 " gunzip_outlen (%d) not aligned\n",
5302 bp->dev->name, bp->gunzip_outlen);
5303 bp->gunzip_outlen >>= 2;
5304
5305 zlib_inflateEnd(bp->strm);
5306
5307 if (rc == Z_STREAM_END)
5308 return 0;
5309
5310 return rc;
5311}
5312
5313/* nic load/unload */
5314
5315/*
34f80b04 5316 * General service functions
a2fbb9ea
ET
5317 */
5318
5319/* send a NIG loopback debug packet */
5320static void bnx2x_lb_pckt(struct bnx2x *bp)
5321{
a2fbb9ea 5322 u32 wb_write[3];
a2fbb9ea
ET
5323
5324 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5325 wb_write[0] = 0x55555555;
5326 wb_write[1] = 0x55555555;
34f80b04 5327 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5328 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5329
5330 /* NON-IP protocol */
a2fbb9ea
ET
5331 wb_write[0] = 0x09000000;
5332 wb_write[1] = 0x55555555;
34f80b04 5333 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5334 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5335}
5336
5337/* some of the internal memories
5338 * are not directly readable from the driver
5339 * to test them we send debug packets
5340 */
5341static int bnx2x_int_mem_test(struct bnx2x *bp)
5342{
5343 int factor;
5344 int count, i;
5345 u32 val = 0;
5346
ad8d3948 5347 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5348 factor = 120;
ad8d3948
EG
5349 else if (CHIP_REV_IS_EMUL(bp))
5350 factor = 200;
5351 else
a2fbb9ea 5352 factor = 1;
a2fbb9ea
ET
5353
5354 DP(NETIF_MSG_HW, "start part1\n");
5355
5356 /* Disable inputs of parser neighbor blocks */
5357 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5358 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5359 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5360 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5361
5362 /* Write 0 to parser credits for CFC search request */
5363 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5364
5365 /* send Ethernet packet */
5366 bnx2x_lb_pckt(bp);
5367
5368 /* TODO do i reset NIG statistic? */
5369 /* Wait until NIG register shows 1 packet of size 0x10 */
5370 count = 1000 * factor;
5371 while (count) {
34f80b04 5372
a2fbb9ea
ET
5373 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5374 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5375 if (val == 0x10)
5376 break;
5377
5378 msleep(10);
5379 count--;
5380 }
5381 if (val != 0x10) {
5382 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5383 return -1;
5384 }
5385
5386 /* Wait until PRS register shows 1 packet */
5387 count = 1000 * factor;
5388 while (count) {
5389 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5390 if (val == 1)
5391 break;
5392
5393 msleep(10);
5394 count--;
5395 }
5396 if (val != 0x1) {
5397 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5398 return -2;
5399 }
5400
5401 /* Reset and init BRB, PRS */
34f80b04 5402 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5403 msleep(50);
34f80b04 5404 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5405 msleep(50);
94a78b79
VZ
5406 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5407 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5408
5409 DP(NETIF_MSG_HW, "part2\n");
5410
5411 /* Disable inputs of parser neighbor blocks */
5412 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5413 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5414 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5415 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5416
5417 /* Write 0 to parser credits for CFC search request */
5418 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5419
5420 /* send 10 Ethernet packets */
5421 for (i = 0; i < 10; i++)
5422 bnx2x_lb_pckt(bp);
5423
5424 /* Wait until NIG register shows 10 + 1
5425 packets of size 11*0x10 = 0xb0 */
5426 count = 1000 * factor;
5427 while (count) {
34f80b04 5428
a2fbb9ea
ET
5429 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5430 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5431 if (val == 0xb0)
5432 break;
5433
5434 msleep(10);
5435 count--;
5436 }
5437 if (val != 0xb0) {
5438 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5439 return -3;
5440 }
5441
5442 /* Wait until PRS register shows 2 packets */
5443 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5444 if (val != 2)
5445 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5446
5447 /* Write 1 to parser credits for CFC search request */
5448 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5449
5450 /* Wait until PRS register shows 3 packets */
5451 msleep(10 * factor);
5452 /* Wait until NIG register shows 1 packet of size 0x10 */
5453 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5454 if (val != 3)
5455 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5456
5457 /* clear NIG EOP FIFO */
5458 for (i = 0; i < 11; i++)
5459 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5460 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5461 if (val != 1) {
5462 BNX2X_ERR("clear of NIG failed\n");
5463 return -4;
5464 }
5465
5466 /* Reset and init BRB, PRS, NIG */
5467 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5468 msleep(50);
5469 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5470 msleep(50);
94a78b79
VZ
5471 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5472 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5473#ifndef BCM_ISCSI
5474 /* set NIC mode */
5475 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5476#endif
5477
5478 /* Enable inputs of parser neighbor blocks */
5479 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5480 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5481 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5482 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5483
5484 DP(NETIF_MSG_HW, "done\n");
5485
5486 return 0; /* OK */
5487}
5488
5489static void enable_blocks_attention(struct bnx2x *bp)
5490{
5491 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5492 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5493 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5494 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5495 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5496 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5497 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5498 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5499 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5500/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5501/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5502 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5503 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5504 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5505/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5506/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5507 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5508 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5509 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5510 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5511/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5512/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5513 if (CHIP_REV_IS_FPGA(bp))
5514 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5515 else
5516 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5517 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5518 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5519 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5520/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5521/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5522 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5523 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5524/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5525 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5526}
5527
34f80b04 5528
81f75bbf
EG
5529static void bnx2x_reset_common(struct bnx2x *bp)
5530{
5531 /* reset_common */
5532 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5533 0xd3ffff7f);
5534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5535}
5536
fd4ef40d
EG
5537
5538static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5539{
5540 u32 val;
5541 u8 port;
5542 u8 is_required = 0;
5543
5544 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5545 SHARED_HW_CFG_FAN_FAILURE_MASK;
5546
5547 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5548 is_required = 1;
5549
5550 /*
5551 * The fan failure mechanism is usually related to the PHY type since
5552 * the power consumption of the board is affected by the PHY. Currently,
5553 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5554 */
5555 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5556 for (port = PORT_0; port < PORT_MAX; port++) {
5557 u32 phy_type =
5558 SHMEM_RD(bp, dev_info.port_hw_config[port].
5559 external_phy_config) &
5560 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5561 is_required |=
5562 ((phy_type ==
5563 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5564 (phy_type ==
5565 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5566 }
5567
5568 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5569
5570 if (is_required == 0)
5571 return;
5572
5573 /* Fan failure is indicated by SPIO 5 */
5574 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5575 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5576
5577 /* set to active low mode */
5578 val = REG_RD(bp, MISC_REG_SPIO_INT);
5579 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5580 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5581 REG_WR(bp, MISC_REG_SPIO_INT, val);
5582
5583 /* enable interrupt to signal the IGU */
5584 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5585 val |= (1 << MISC_REGISTERS_SPIO_5);
5586 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5587}
5588
34f80b04 5589static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5590{
a2fbb9ea 5591 u32 val, i;
a2fbb9ea 5592
34f80b04 5593 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5594
81f75bbf 5595 bnx2x_reset_common(bp);
34f80b04
EG
5596 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5597 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5598
94a78b79 5599 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5600 if (CHIP_IS_E1H(bp))
5601 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5602
34f80b04
EG
5603 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5604 msleep(30);
5605 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5606
94a78b79 5607 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5608 if (CHIP_IS_E1(bp)) {
5609 /* enable HW interrupt from PXP on USDM overflow
5610 bit 16 on INT_MASK_0 */
5611 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5612 }
a2fbb9ea 5613
94a78b79 5614 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5615 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5616
5617#ifdef __BIG_ENDIAN
34f80b04
EG
5618 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5619 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5620 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5621 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5622 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5623 /* make sure this value is 0 */
5624 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5625
5626/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5627 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5628 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5629 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5630 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5631#endif
5632
34f80b04 5633 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5634#ifdef BCM_ISCSI
34f80b04
EG
5635 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5636 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5637 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5638#endif
5639
34f80b04
EG
5640 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5641 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5642
34f80b04
EG
5643 /* let the HW do it's magic ... */
5644 msleep(100);
5645 /* finish PXP init */
5646 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5647 if (val != 1) {
5648 BNX2X_ERR("PXP2 CFG failed\n");
5649 return -EBUSY;
5650 }
5651 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5652 if (val != 1) {
5653 BNX2X_ERR("PXP2 RD_INIT failed\n");
5654 return -EBUSY;
5655 }
a2fbb9ea 5656
34f80b04
EG
5657 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5658 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5659
94a78b79 5660 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5661
34f80b04
EG
5662 /* clean the DMAE memory */
5663 bp->dmae_ready = 1;
5664 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5665
94a78b79
VZ
5666 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5667 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5668 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5669 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5670
34f80b04
EG
5671 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5672 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5673 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5674 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5675
94a78b79 5676 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
5677 /* soft reset pulse */
5678 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5679 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5680
5681#ifdef BCM_ISCSI
94a78b79 5682 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5683#endif
a2fbb9ea 5684
94a78b79 5685 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5686 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5687 if (!CHIP_REV_IS_SLOW(bp)) {
5688 /* enable hw interrupt from doorbell Q */
5689 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5690 }
a2fbb9ea 5691
94a78b79
VZ
5692 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5693 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5694 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5695 /* set NIC mode */
5696 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5697 if (CHIP_IS_E1H(bp))
5698 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5699
94a78b79
VZ
5700 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5701 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5702 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5703 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5704
490c3c9b
EG
5705 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5706 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5707 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5708 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5709
94a78b79
VZ
5710 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5711 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5712 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5713 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5714
34f80b04
EG
5715 /* sync semi rtc */
5716 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5717 0x80000000);
5718 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5719 0x80000000);
a2fbb9ea 5720
94a78b79
VZ
5721 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5722 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5723 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5724
34f80b04
EG
5725 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5726 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5727 REG_WR(bp, i, 0xc0cac01a);
5728 /* TODO: replace with something meaningful */
5729 }
94a78b79 5730 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 5731 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5732
34f80b04
EG
5733 if (sizeof(union cdu_context) != 1024)
5734 /* we currently assume that a context is 1024 bytes */
5735 printk(KERN_ALERT PFX "please adjust the size of"
5736 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5737
94a78b79 5738 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5739 val = (4 << 24) + (0 << 12) + 1024;
5740 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5741 if (CHIP_IS_E1(bp)) {
5742 /* !!! fix pxp client crdit until excel update */
5743 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5744 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5745 }
a2fbb9ea 5746
94a78b79 5747 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5748 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5749 /* enable context validation interrupt from CFC */
5750 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5751
5752 /* set the thresholds to prevent CFC/CDU race */
5753 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5754
94a78b79
VZ
5755 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5756 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5757
34f80b04 5758 /* PXPCS COMMON comes here */
94a78b79 5759 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5760 /* Reset PCIE errors for debug */
5761 REG_WR(bp, 0x2814, 0xffffffff);
5762 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5763
34f80b04 5764 /* EMAC0 COMMON comes here */
94a78b79 5765 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
34f80b04 5766 /* EMAC1 COMMON comes here */
94a78b79 5767 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
34f80b04 5768 /* DBU COMMON comes here */
94a78b79 5769 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
34f80b04 5770 /* DBG COMMON comes here */
94a78b79 5771 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5772
94a78b79 5773 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
5774 if (CHIP_IS_E1H(bp)) {
5775 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5776 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5777 }
5778
5779 if (CHIP_REV_IS_SLOW(bp))
5780 msleep(200);
5781
5782 /* finish CFC init */
5783 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5784 if (val != 1) {
5785 BNX2X_ERR("CFC LL_INIT failed\n");
5786 return -EBUSY;
5787 }
5788 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5789 if (val != 1) {
5790 BNX2X_ERR("CFC AC_INIT failed\n");
5791 return -EBUSY;
5792 }
5793 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5794 if (val != 1) {
5795 BNX2X_ERR("CFC CAM_INIT failed\n");
5796 return -EBUSY;
5797 }
5798 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5799
34f80b04
EG
5800 /* read NIG statistic
5801 to see if this is our first up since powerup */
5802 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803 val = *bnx2x_sp(bp, wb_data[0]);
5804
5805 /* do internal memory self test */
5806 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5807 BNX2X_ERR("internal mem self test failed\n");
5808 return -EBUSY;
5809 }
5810
35b19ba5 5811 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5812 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5813 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5814 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5815 bp->port.need_hw_lock = 1;
5816 break;
5817
34f80b04
EG
5818 default:
5819 break;
5820 }
f1410647 5821
fd4ef40d
EG
5822 bnx2x_setup_fan_failure_detection(bp);
5823
34f80b04
EG
5824 /* clear PXP2 attentions */
5825 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5826
34f80b04 5827 enable_blocks_attention(bp);
a2fbb9ea 5828
6bbca910
YR
5829 if (!BP_NOMCP(bp)) {
5830 bnx2x_acquire_phy_lock(bp);
5831 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5832 bnx2x_release_phy_lock(bp);
5833 } else
5834 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5835
34f80b04
EG
5836 return 0;
5837}
a2fbb9ea 5838
34f80b04
EG
5839static int bnx2x_init_port(struct bnx2x *bp)
5840{
5841 int port = BP_PORT(bp);
94a78b79 5842 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5843 u32 low, high;
34f80b04 5844 u32 val;
a2fbb9ea 5845
34f80b04
EG
5846 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5847
5848 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5849
5850 /* Port PXP comes here */
94a78b79 5851 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
a2fbb9ea 5852 /* Port PXP2 comes here */
94a78b79 5853 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
a2fbb9ea
ET
5854#ifdef BCM_ISCSI
5855 /* Port0 1
5856 * Port1 385 */
5857 i++;
5858 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5859 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5860 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5861 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5862
5863 /* Port0 2
5864 * Port1 386 */
5865 i++;
5866 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5867 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5868 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5869 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5870
5871 /* Port0 3
5872 * Port1 387 */
5873 i++;
5874 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5875 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5876 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5877 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5878#endif
34f80b04 5879 /* Port CMs come here */
94a78b79 5880 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea
ET
5881
5882 /* Port QM comes here */
a2fbb9ea
ET
5883#ifdef BCM_ISCSI
5884 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5885 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5886
94a78b79 5887 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea
ET
5888#endif
5889 /* Port DQ comes here */
94a78b79 5890 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5891
94a78b79 5892 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
5893 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5894 /* no pause for emulation and FPGA */
5895 low = 0;
5896 high = 513;
5897 } else {
5898 if (IS_E1HMF(bp))
5899 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5900 else if (bp->dev->mtu > 4096) {
5901 if (bp->flags & ONE_PORT_FLAG)
5902 low = 160;
5903 else {
5904 val = bp->dev->mtu;
5905 /* (24*1024 + val*4)/256 */
5906 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5907 }
5908 } else
5909 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5910 high = low + 56; /* 14*1024/256 */
5911 }
5912 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5913 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5914
5915
ad8d3948 5916 /* Port PRS comes here */
94a78b79 5917 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
a2fbb9ea 5918 /* Port TSDM comes here */
94a78b79 5919 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
a2fbb9ea 5920 /* Port CSDM comes here */
94a78b79 5921 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
a2fbb9ea 5922 /* Port USDM comes here */
94a78b79 5923 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
a2fbb9ea 5924 /* Port XSDM comes here */
94a78b79 5925 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5926
94a78b79
VZ
5927 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5928 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5929 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5930 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 5931
a2fbb9ea 5932 /* Port UPB comes here */
94a78b79 5933 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
34f80b04 5934 /* Port XPB comes here */
94a78b79 5935 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5936
94a78b79 5937 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
5938
5939 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5940 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5941
5942 /* update threshold */
34f80b04 5943 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5944 /* update init credit */
34f80b04 5945 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5946
5947 /* probe changes */
34f80b04 5948 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5949 msleep(5);
34f80b04 5950 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5951
5952#ifdef BCM_ISCSI
5953 /* tell the searcher where the T2 table is */
5954 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5955
5956 wb_write[0] = U64_LO(bp->t2_mapping);
5957 wb_write[1] = U64_HI(bp->t2_mapping);
5958 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5959 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5960 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5961 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5962
5963 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5964 /* Port SRCH comes here */
5965#endif
5966 /* Port CDU comes here */
94a78b79 5967 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
a2fbb9ea 5968 /* Port CFC comes here */
94a78b79 5969 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5970
5971 if (CHIP_IS_E1(bp)) {
5972 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5973 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5974 }
94a78b79 5975 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5976
94a78b79 5977 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5978 /* init aeu_mask_attn_func_0/1:
5979 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5980 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5981 * bits 4-7 are used for "per vn group attention" */
5982 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5983 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5984
a2fbb9ea 5985 /* Port PXPCS comes here */
94a78b79 5986 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
a2fbb9ea 5987 /* Port EMAC0 comes here */
94a78b79 5988 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
a2fbb9ea 5989 /* Port EMAC1 comes here */
94a78b79 5990 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
a2fbb9ea 5991 /* Port DBU comes here */
94a78b79 5992 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
a2fbb9ea 5993 /* Port DBG comes here */
94a78b79 5994 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5995
94a78b79 5996 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5997
5998 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5999
6000 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6001 /* 0x2 disable e1hov, 0x1 enable */
6002 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6003 (IS_E1HMF(bp) ? 0x1 : 0x2));
6004
1c06328c
EG
6005 /* support pause requests from USDM, TSDM and BRB */
6006 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6007
6008 {
6009 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6010 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6011 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6012 }
34f80b04
EG
6013 }
6014
a2fbb9ea 6015 /* Port MCP comes here */
94a78b79 6016 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
a2fbb9ea 6017 /* Port DMAE comes here */
94a78b79 6018 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6019
35b19ba5 6020 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6021 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6022 {
6023 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6024
6025 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6026 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6027
6028 /* The GPIO should be swapped if the swap register is
6029 set and active */
6030 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6031 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6032
6033 /* Select function upon port-swap configuration */
6034 if (port == 0) {
6035 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6036 aeu_gpio_mask = (swap_val && swap_override) ?
6037 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6038 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6039 } else {
6040 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6041 aeu_gpio_mask = (swap_val && swap_override) ?
6042 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6043 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6044 }
6045 val = REG_RD(bp, offset);
6046 /* add GPIO3 to group */
6047 val |= aeu_gpio_mask;
6048 REG_WR(bp, offset, val);
6049 }
6050 break;
6051
35b19ba5 6052 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
6053 /* add SPIO 5 to group 0 */
6054 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6055 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6056 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6057 break;
6058
6059 default:
6060 break;
6061 }
6062
c18487ee 6063 bnx2x__link_reset(bp);
a2fbb9ea 6064
34f80b04
EG
6065 return 0;
6066}
6067
6068#define ILT_PER_FUNC (768/2)
6069#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6070/* the phys address is shifted right 12 bits and has an added
6071 1=valid bit added to the 53rd bit
6072 then since this is a wide register(TM)
6073 we split it into two 32 bit writes
6074 */
6075#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6076#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6077#define PXP_ONE_ILT(x) (((x) << 10) | x)
6078#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6079
6080#define CNIC_ILT_LINES 0
6081
6082static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6083{
6084 int reg;
6085
6086 if (CHIP_IS_E1H(bp))
6087 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6088 else /* E1 */
6089 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6090
6091 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6092}
6093
6094static int bnx2x_init_func(struct bnx2x *bp)
6095{
6096 int port = BP_PORT(bp);
6097 int func = BP_FUNC(bp);
8badd27a 6098 u32 addr, val;
34f80b04
EG
6099 int i;
6100
6101 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6102
8badd27a
EG
6103 /* set MSI reconfigure capability */
6104 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6105 val = REG_RD(bp, addr);
6106 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6107 REG_WR(bp, addr, val);
6108
34f80b04
EG
6109 i = FUNC_ILT_BASE(func);
6110
6111 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6112 if (CHIP_IS_E1H(bp)) {
6113 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6114 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6115 } else /* E1 */
6116 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6117 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6118
6119
6120 if (CHIP_IS_E1H(bp)) {
6121 for (i = 0; i < 9; i++)
6122 bnx2x_init_block(bp,
94a78b79 6123 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6124
6125 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6126 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6127 }
6128
6129 /* HC init per function */
6130 if (CHIP_IS_E1H(bp)) {
6131 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6132
6133 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6134 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6135 }
94a78b79 6136 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6137
c14423fe 6138 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6139 REG_WR(bp, 0x2114, 0xffffffff);
6140 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6141
34f80b04
EG
6142 return 0;
6143}
6144
6145static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6146{
6147 int i, rc = 0;
a2fbb9ea 6148
34f80b04
EG
6149 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6150 BP_FUNC(bp), load_code);
a2fbb9ea 6151
34f80b04
EG
6152 bp->dmae_ready = 0;
6153 mutex_init(&bp->dmae_mutex);
6154 bnx2x_gunzip_init(bp);
a2fbb9ea 6155
34f80b04
EG
6156 switch (load_code) {
6157 case FW_MSG_CODE_DRV_LOAD_COMMON:
6158 rc = bnx2x_init_common(bp);
6159 if (rc)
6160 goto init_hw_err;
6161 /* no break */
6162
6163 case FW_MSG_CODE_DRV_LOAD_PORT:
6164 bp->dmae_ready = 1;
6165 rc = bnx2x_init_port(bp);
6166 if (rc)
6167 goto init_hw_err;
6168 /* no break */
6169
6170 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6171 bp->dmae_ready = 1;
6172 rc = bnx2x_init_func(bp);
6173 if (rc)
6174 goto init_hw_err;
6175 break;
6176
6177 default:
6178 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6179 break;
6180 }
6181
6182 if (!BP_NOMCP(bp)) {
6183 int func = BP_FUNC(bp);
a2fbb9ea
ET
6184
6185 bp->fw_drv_pulse_wr_seq =
34f80b04 6186 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6187 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6188 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6189 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6190 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6191 } else
6192 bp->func_stx = 0;
a2fbb9ea 6193
34f80b04
EG
6194 /* this needs to be done before gunzip end */
6195 bnx2x_zero_def_sb(bp);
6196 for_each_queue(bp, i)
6197 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6198
6199init_hw_err:
6200 bnx2x_gunzip_end(bp);
6201
6202 return rc;
a2fbb9ea
ET
6203}
6204
c14423fe 6205/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6206static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6207{
34f80b04 6208 int func = BP_FUNC(bp);
f1410647
ET
6209 u32 seq = ++bp->fw_seq;
6210 u32 rc = 0;
19680c48
EG
6211 u32 cnt = 1;
6212 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6213
34f80b04 6214 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6215 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6216
19680c48
EG
6217 do {
6218 /* let the FW do it's magic ... */
6219 msleep(delay);
a2fbb9ea 6220
19680c48 6221 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6222
19680c48
EG
6223 /* Give the FW up to 2 second (200*10ms) */
6224 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6225
6226 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6227 cnt*delay, rc, seq);
a2fbb9ea
ET
6228
6229 /* is this a reply to our command? */
6230 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6231 rc &= FW_MSG_CODE_MASK;
f1410647 6232
a2fbb9ea
ET
6233 } else {
6234 /* FW BUG! */
6235 BNX2X_ERR("FW failed to respond!\n");
6236 bnx2x_fw_dump(bp);
6237 rc = 0;
6238 }
f1410647 6239
a2fbb9ea
ET
6240 return rc;
6241}
6242
6243static void bnx2x_free_mem(struct bnx2x *bp)
6244{
6245
6246#define BNX2X_PCI_FREE(x, y, size) \
6247 do { \
6248 if (x) { \
6249 pci_free_consistent(bp->pdev, size, x, y); \
6250 x = NULL; \
6251 y = 0; \
6252 } \
6253 } while (0)
6254
6255#define BNX2X_FREE(x) \
6256 do { \
6257 if (x) { \
6258 vfree(x); \
6259 x = NULL; \
6260 } \
6261 } while (0)
6262
6263 int i;
6264
6265 /* fastpath */
555f6c78 6266 /* Common */
a2fbb9ea
ET
6267 for_each_queue(bp, i) {
6268
555f6c78 6269 /* status blocks */
a2fbb9ea
ET
6270 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6271 bnx2x_fp(bp, i, status_blk_mapping),
6272 sizeof(struct host_status_block) +
6273 sizeof(struct eth_tx_db_data));
555f6c78
EG
6274 }
6275 /* Rx */
6276 for_each_rx_queue(bp, i) {
a2fbb9ea 6277
555f6c78 6278 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6279 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6280 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6281 bnx2x_fp(bp, i, rx_desc_mapping),
6282 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6283
6284 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6285 bnx2x_fp(bp, i, rx_comp_mapping),
6286 sizeof(struct eth_fast_path_rx_cqe) *
6287 NUM_RCQ_BD);
a2fbb9ea 6288
7a9b2557 6289 /* SGE ring */
32626230 6290 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6291 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6292 bnx2x_fp(bp, i, rx_sge_mapping),
6293 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6294 }
555f6c78
EG
6295 /* Tx */
6296 for_each_tx_queue(bp, i) {
6297
6298 /* fastpath tx rings: tx_buf tx_desc */
6299 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6300 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6301 bnx2x_fp(bp, i, tx_desc_mapping),
6302 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6303 }
a2fbb9ea
ET
6304 /* end of fastpath */
6305
6306 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6307 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6308
6309 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6310 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6311
6312#ifdef BCM_ISCSI
6313 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6314 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6315 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6316 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6317#endif
7a9b2557 6318 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6319
6320#undef BNX2X_PCI_FREE
6321#undef BNX2X_KFREE
6322}
6323
6324static int bnx2x_alloc_mem(struct bnx2x *bp)
6325{
6326
6327#define BNX2X_PCI_ALLOC(x, y, size) \
6328 do { \
6329 x = pci_alloc_consistent(bp->pdev, size, y); \
6330 if (x == NULL) \
6331 goto alloc_mem_err; \
6332 memset(x, 0, size); \
6333 } while (0)
6334
6335#define BNX2X_ALLOC(x, size) \
6336 do { \
6337 x = vmalloc(size); \
6338 if (x == NULL) \
6339 goto alloc_mem_err; \
6340 memset(x, 0, size); \
6341 } while (0)
6342
6343 int i;
6344
6345 /* fastpath */
555f6c78 6346 /* Common */
a2fbb9ea
ET
6347 for_each_queue(bp, i) {
6348 bnx2x_fp(bp, i, bp) = bp;
6349
555f6c78 6350 /* status blocks */
a2fbb9ea
ET
6351 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6352 &bnx2x_fp(bp, i, status_blk_mapping),
6353 sizeof(struct host_status_block) +
6354 sizeof(struct eth_tx_db_data));
555f6c78
EG
6355 }
6356 /* Rx */
6357 for_each_rx_queue(bp, i) {
a2fbb9ea 6358
555f6c78 6359 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6360 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6361 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6362 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6363 &bnx2x_fp(bp, i, rx_desc_mapping),
6364 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6365
6366 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6367 &bnx2x_fp(bp, i, rx_comp_mapping),
6368 sizeof(struct eth_fast_path_rx_cqe) *
6369 NUM_RCQ_BD);
6370
7a9b2557
VZ
6371 /* SGE ring */
6372 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6373 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6374 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6375 &bnx2x_fp(bp, i, rx_sge_mapping),
6376 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6377 }
555f6c78
EG
6378 /* Tx */
6379 for_each_tx_queue(bp, i) {
6380
6381 bnx2x_fp(bp, i, hw_tx_prods) =
6382 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6383
6384 bnx2x_fp(bp, i, tx_prods_mapping) =
6385 bnx2x_fp(bp, i, status_blk_mapping) +
6386 sizeof(struct host_status_block);
6387
6388 /* fastpath tx rings: tx_buf tx_desc */
6389 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6390 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6391 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6392 &bnx2x_fp(bp, i, tx_desc_mapping),
6393 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6394 }
a2fbb9ea
ET
6395 /* end of fastpath */
6396
6397 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6398 sizeof(struct host_def_status_block));
6399
6400 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6401 sizeof(struct bnx2x_slowpath));
6402
6403#ifdef BCM_ISCSI
6404 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6405
6406 /* Initialize T1 */
6407 for (i = 0; i < 64*1024; i += 64) {
6408 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6409 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6410 }
6411
6412 /* allocate searcher T2 table
6413 we allocate 1/4 of alloc num for T2
6414 (which is not entered into the ILT) */
6415 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6416
6417 /* Initialize T2 */
6418 for (i = 0; i < 16*1024; i += 64)
6419 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6420
c14423fe 6421 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6422 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6423
6424 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6425 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6426
6427 /* QM queues (128*MAX_CONN) */
6428 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6429#endif
6430
6431 /* Slow path ring */
6432 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6433
6434 return 0;
6435
6436alloc_mem_err:
6437 bnx2x_free_mem(bp);
6438 return -ENOMEM;
6439
6440#undef BNX2X_PCI_ALLOC
6441#undef BNX2X_ALLOC
6442}
6443
6444static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6445{
6446 int i;
6447
555f6c78 6448 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6449 struct bnx2x_fastpath *fp = &bp->fp[i];
6450
6451 u16 bd_cons = fp->tx_bd_cons;
6452 u16 sw_prod = fp->tx_pkt_prod;
6453 u16 sw_cons = fp->tx_pkt_cons;
6454
a2fbb9ea
ET
6455 while (sw_cons != sw_prod) {
6456 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6457 sw_cons++;
6458 }
6459 }
6460}
6461
6462static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6463{
6464 int i, j;
6465
555f6c78 6466 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6467 struct bnx2x_fastpath *fp = &bp->fp[j];
6468
a2fbb9ea
ET
6469 for (i = 0; i < NUM_RX_BD; i++) {
6470 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6471 struct sk_buff *skb = rx_buf->skb;
6472
6473 if (skb == NULL)
6474 continue;
6475
6476 pci_unmap_single(bp->pdev,
6477 pci_unmap_addr(rx_buf, mapping),
356e2385 6478 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6479
6480 rx_buf->skb = NULL;
6481 dev_kfree_skb(skb);
6482 }
7a9b2557 6483 if (!fp->disable_tpa)
32626230
EG
6484 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6485 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6486 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6487 }
6488}
6489
6490static void bnx2x_free_skbs(struct bnx2x *bp)
6491{
6492 bnx2x_free_tx_skbs(bp);
6493 bnx2x_free_rx_skbs(bp);
6494}
6495
6496static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6497{
34f80b04 6498 int i, offset = 1;
a2fbb9ea
ET
6499
6500 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6501 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6502 bp->msix_table[0].vector);
6503
6504 for_each_queue(bp, i) {
c14423fe 6505 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6506 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6507 bnx2x_fp(bp, i, state));
6508
34f80b04 6509 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6510 }
a2fbb9ea
ET
6511}
6512
6513static void bnx2x_free_irq(struct bnx2x *bp)
6514{
a2fbb9ea 6515 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6516 bnx2x_free_msix_irqs(bp);
6517 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6518 bp->flags &= ~USING_MSIX_FLAG;
6519
8badd27a
EG
6520 } else if (bp->flags & USING_MSI_FLAG) {
6521 free_irq(bp->pdev->irq, bp->dev);
6522 pci_disable_msi(bp->pdev);
6523 bp->flags &= ~USING_MSI_FLAG;
6524
a2fbb9ea
ET
6525 } else
6526 free_irq(bp->pdev->irq, bp->dev);
6527}
6528
6529static int bnx2x_enable_msix(struct bnx2x *bp)
6530{
8badd27a
EG
6531 int i, rc, offset = 1;
6532 int igu_vec = 0;
a2fbb9ea 6533
8badd27a
EG
6534 bp->msix_table[0].entry = igu_vec;
6535 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6536
34f80b04 6537 for_each_queue(bp, i) {
8badd27a 6538 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6539 bp->msix_table[i + offset].entry = igu_vec;
6540 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6541 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6542 }
6543
34f80b04 6544 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6545 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6546 if (rc) {
8badd27a
EG
6547 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6548 return rc;
34f80b04 6549 }
8badd27a 6550
a2fbb9ea
ET
6551 bp->flags |= USING_MSIX_FLAG;
6552
6553 return 0;
a2fbb9ea
ET
6554}
6555
a2fbb9ea
ET
6556static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6557{
34f80b04 6558 int i, rc, offset = 1;
a2fbb9ea 6559
a2fbb9ea
ET
6560 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6561 bp->dev->name, bp->dev);
a2fbb9ea
ET
6562 if (rc) {
6563 BNX2X_ERR("request sp irq failed\n");
6564 return -EBUSY;
6565 }
6566
6567 for_each_queue(bp, i) {
555f6c78
EG
6568 struct bnx2x_fastpath *fp = &bp->fp[i];
6569
6570 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6571 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6572 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6573 if (rc) {
555f6c78 6574 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6575 bnx2x_free_msix_irqs(bp);
6576 return -EBUSY;
6577 }
6578
555f6c78 6579 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6580 }
6581
555f6c78
EG
6582 i = BNX2X_NUM_QUEUES(bp);
6583 if (is_multi(bp))
6584 printk(KERN_INFO PFX
6585 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6586 bp->dev->name, bp->msix_table[0].vector,
6587 bp->msix_table[offset].vector,
6588 bp->msix_table[offset + i - 1].vector);
6589 else
6590 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6591 bp->dev->name, bp->msix_table[0].vector,
6592 bp->msix_table[offset + i - 1].vector);
6593
a2fbb9ea 6594 return 0;
a2fbb9ea
ET
6595}
6596
8badd27a
EG
6597static int bnx2x_enable_msi(struct bnx2x *bp)
6598{
6599 int rc;
6600
6601 rc = pci_enable_msi(bp->pdev);
6602 if (rc) {
6603 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6604 return -1;
6605 }
6606 bp->flags |= USING_MSI_FLAG;
6607
6608 return 0;
6609}
6610
a2fbb9ea
ET
6611static int bnx2x_req_irq(struct bnx2x *bp)
6612{
8badd27a 6613 unsigned long flags;
34f80b04 6614 int rc;
a2fbb9ea 6615
8badd27a
EG
6616 if (bp->flags & USING_MSI_FLAG)
6617 flags = 0;
6618 else
6619 flags = IRQF_SHARED;
6620
6621 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6622 bp->dev->name, bp->dev);
a2fbb9ea
ET
6623 if (!rc)
6624 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6625
6626 return rc;
a2fbb9ea
ET
6627}
6628
65abd74d
YG
6629static void bnx2x_napi_enable(struct bnx2x *bp)
6630{
6631 int i;
6632
555f6c78 6633 for_each_rx_queue(bp, i)
65abd74d
YG
6634 napi_enable(&bnx2x_fp(bp, i, napi));
6635}
6636
6637static void bnx2x_napi_disable(struct bnx2x *bp)
6638{
6639 int i;
6640
555f6c78 6641 for_each_rx_queue(bp, i)
65abd74d
YG
6642 napi_disable(&bnx2x_fp(bp, i, napi));
6643}
6644
6645static void bnx2x_netif_start(struct bnx2x *bp)
6646{
e1510706
EG
6647 int intr_sem;
6648
6649 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6650 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6651
6652 if (intr_sem) {
65abd74d 6653 if (netif_running(bp->dev)) {
65abd74d
YG
6654 bnx2x_napi_enable(bp);
6655 bnx2x_int_enable(bp);
555f6c78
EG
6656 if (bp->state == BNX2X_STATE_OPEN)
6657 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6658 }
6659 }
6660}
6661
f8ef6e44 6662static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6663{
f8ef6e44 6664 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6665 bnx2x_napi_disable(bp);
762d5f6c
EG
6666 netif_tx_disable(bp->dev);
6667 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6668}
6669
a2fbb9ea
ET
6670/*
6671 * Init service functions
6672 */
6673
3101c2bc 6674static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6675{
6676 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6677 int port = BP_PORT(bp);
a2fbb9ea
ET
6678
6679 /* CAM allocation
6680 * unicasts 0-31:port0 32-63:port1
6681 * multicast 64-127:port0 128-191:port1
6682 */
8d9c5f34 6683 config->hdr.length = 2;
af246401 6684 config->hdr.offset = port ? 32 : 0;
0626b899 6685 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6686 config->hdr.reserved1 = 0;
6687
6688 /* primary MAC */
6689 config->config_table[0].cam_entry.msb_mac_addr =
6690 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6691 config->config_table[0].cam_entry.middle_mac_addr =
6692 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6693 config->config_table[0].cam_entry.lsb_mac_addr =
6694 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6695 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6696 if (set)
6697 config->config_table[0].target_table_entry.flags = 0;
6698 else
6699 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6700 config->config_table[0].target_table_entry.client_id = 0;
6701 config->config_table[0].target_table_entry.vlan_id = 0;
6702
3101c2bc
YG
6703 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6704 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6705 config->config_table[0].cam_entry.msb_mac_addr,
6706 config->config_table[0].cam_entry.middle_mac_addr,
6707 config->config_table[0].cam_entry.lsb_mac_addr);
6708
6709 /* broadcast */
4781bfad
EG
6710 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6711 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6712 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6713 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6714 if (set)
6715 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6716 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6717 else
6718 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6719 config->config_table[1].target_table_entry.client_id = 0;
6720 config->config_table[1].target_table_entry.vlan_id = 0;
6721
6722 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6723 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6724 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6725}
6726
3101c2bc 6727static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6728{
6729 struct mac_configuration_cmd_e1h *config =
6730 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6731
3101c2bc 6732 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6733 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6734 return;
6735 }
6736
6737 /* CAM allocation for E1H
6738 * unicasts: by func number
6739 * multicast: 20+FUNC*20, 20 each
6740 */
8d9c5f34 6741 config->hdr.length = 1;
34f80b04 6742 config->hdr.offset = BP_FUNC(bp);
0626b899 6743 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6744 config->hdr.reserved1 = 0;
6745
6746 /* primary MAC */
6747 config->config_table[0].msb_mac_addr =
6748 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6749 config->config_table[0].middle_mac_addr =
6750 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6751 config->config_table[0].lsb_mac_addr =
6752 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6753 config->config_table[0].client_id = BP_L_ID(bp);
6754 config->config_table[0].vlan_id = 0;
6755 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6756 if (set)
6757 config->config_table[0].flags = BP_PORT(bp);
6758 else
6759 config->config_table[0].flags =
6760 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6761
3101c2bc
YG
6762 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6763 (set ? "setting" : "clearing"),
34f80b04
EG
6764 config->config_table[0].msb_mac_addr,
6765 config->config_table[0].middle_mac_addr,
6766 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6767
6768 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6769 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6770 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6771}
6772
a2fbb9ea
ET
6773static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6774 int *state_p, int poll)
6775{
6776 /* can take a while if any port is running */
8b3a0f0b 6777 int cnt = 5000;
a2fbb9ea 6778
c14423fe
ET
6779 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6780 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6781
6782 might_sleep();
34f80b04 6783 while (cnt--) {
a2fbb9ea
ET
6784 if (poll) {
6785 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6786 /* if index is different from 0
6787 * the reply for some commands will
3101c2bc 6788 * be on the non default queue
a2fbb9ea
ET
6789 */
6790 if (idx)
6791 bnx2x_rx_int(&bp->fp[idx], 10);
6792 }
a2fbb9ea 6793
3101c2bc 6794 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6795 if (*state_p == state) {
6796#ifdef BNX2X_STOP_ON_ERROR
6797 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6798#endif
a2fbb9ea 6799 return 0;
8b3a0f0b 6800 }
a2fbb9ea 6801
a2fbb9ea 6802 msleep(1);
a2fbb9ea
ET
6803 }
6804
a2fbb9ea 6805 /* timeout! */
49d66772
ET
6806 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6807 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6808#ifdef BNX2X_STOP_ON_ERROR
6809 bnx2x_panic();
6810#endif
a2fbb9ea 6811
49d66772 6812 return -EBUSY;
a2fbb9ea
ET
6813}
6814
6815static int bnx2x_setup_leading(struct bnx2x *bp)
6816{
34f80b04 6817 int rc;
a2fbb9ea 6818
c14423fe 6819 /* reset IGU state */
34f80b04 6820 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6821
6822 /* SETUP ramrod */
6823 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6824
34f80b04
EG
6825 /* Wait for completion */
6826 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6827
34f80b04 6828 return rc;
a2fbb9ea
ET
6829}
6830
6831static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6832{
555f6c78
EG
6833 struct bnx2x_fastpath *fp = &bp->fp[index];
6834
a2fbb9ea 6835 /* reset IGU state */
555f6c78 6836 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6837
228241eb 6838 /* SETUP ramrod */
555f6c78
EG
6839 fp->state = BNX2X_FP_STATE_OPENING;
6840 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6841 fp->cl_id, 0);
a2fbb9ea
ET
6842
6843 /* Wait for completion */
6844 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6845 &(fp->state), 0);
a2fbb9ea
ET
6846}
6847
a2fbb9ea 6848static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6849
8badd27a 6850static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6851{
555f6c78 6852 int num_queues;
a2fbb9ea 6853
8badd27a
EG
6854 switch (int_mode) {
6855 case INT_MODE_INTx:
6856 case INT_MODE_MSI:
555f6c78
EG
6857 num_queues = 1;
6858 bp->num_rx_queues = num_queues;
6859 bp->num_tx_queues = num_queues;
6860 DP(NETIF_MSG_IFUP,
6861 "set number of queues to %d\n", num_queues);
8badd27a
EG
6862 break;
6863
6864 case INT_MODE_MSIX:
6865 default:
555f6c78
EG
6866 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6867 num_queues = min_t(u32, num_online_cpus(),
6868 BNX2X_MAX_QUEUES(bp));
34f80b04 6869 else
555f6c78
EG
6870 num_queues = 1;
6871 bp->num_rx_queues = num_queues;
6872 bp->num_tx_queues = num_queues;
6873 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6874 " number of tx queues to %d\n",
6875 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6876 /* if we can't use MSI-X we only need one fp,
6877 * so try to enable MSI-X with the requested number of fp's
6878 * and fallback to MSI or legacy INTx with one fp
6879 */
8badd27a 6880 if (bnx2x_enable_msix(bp)) {
34f80b04 6881 /* failed to enable MSI-X */
555f6c78
EG
6882 num_queues = 1;
6883 bp->num_rx_queues = num_queues;
6884 bp->num_tx_queues = num_queues;
6885 if (bp->multi_mode)
6886 BNX2X_ERR("Multi requested but failed to "
6887 "enable MSI-X set number of "
6888 "queues to %d\n", num_queues);
a2fbb9ea 6889 }
8badd27a 6890 break;
a2fbb9ea 6891 }
555f6c78 6892 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6893}
6894
6895static void bnx2x_set_rx_mode(struct net_device *dev);
6896
6897/* must be called with rtnl_lock */
6898static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6899{
6900 u32 load_code;
6901 int i, rc = 0;
6902#ifdef BNX2X_STOP_ON_ERROR
6903 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6904 if (unlikely(bp->panic))
6905 return -EPERM;
6906#endif
6907
6908 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6909
6910 bnx2x_set_int_mode(bp);
c14423fe 6911
a2fbb9ea
ET
6912 if (bnx2x_alloc_mem(bp))
6913 return -ENOMEM;
6914
555f6c78 6915 for_each_rx_queue(bp, i)
7a9b2557
VZ
6916 bnx2x_fp(bp, i, disable_tpa) =
6917 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6918
555f6c78 6919 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6920 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6921 bnx2x_poll, 128);
6922
6923#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6924 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6925 struct bnx2x_fastpath *fp = &bp->fp[i];
6926
6927 fp->poll_no_work = 0;
6928 fp->poll_calls = 0;
6929 fp->poll_max_calls = 0;
6930 fp->poll_complete = 0;
6931 fp->poll_exit = 0;
6932 }
6933#endif
6934 bnx2x_napi_enable(bp);
6935
34f80b04
EG
6936 if (bp->flags & USING_MSIX_FLAG) {
6937 rc = bnx2x_req_msix_irqs(bp);
6938 if (rc) {
6939 pci_disable_msix(bp->pdev);
2dfe0e1f 6940 goto load_error1;
34f80b04
EG
6941 }
6942 } else {
8badd27a
EG
6943 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6944 bnx2x_enable_msi(bp);
34f80b04
EG
6945 bnx2x_ack_int(bp);
6946 rc = bnx2x_req_irq(bp);
6947 if (rc) {
2dfe0e1f 6948 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6949 if (bp->flags & USING_MSI_FLAG)
6950 pci_disable_msi(bp->pdev);
2dfe0e1f 6951 goto load_error1;
a2fbb9ea 6952 }
8badd27a
EG
6953 if (bp->flags & USING_MSI_FLAG) {
6954 bp->dev->irq = bp->pdev->irq;
6955 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6956 bp->dev->name, bp->pdev->irq);
6957 }
a2fbb9ea
ET
6958 }
6959
2dfe0e1f
EG
6960 /* Send LOAD_REQUEST command to MCP
6961 Returns the type of LOAD command:
6962 if it is the first port to be initialized
6963 common blocks should be initialized, otherwise - not
6964 */
6965 if (!BP_NOMCP(bp)) {
6966 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6967 if (!load_code) {
6968 BNX2X_ERR("MCP response failure, aborting\n");
6969 rc = -EBUSY;
6970 goto load_error2;
6971 }
6972 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6973 rc = -EBUSY; /* other port in diagnostic mode */
6974 goto load_error2;
6975 }
6976
6977 } else {
6978 int port = BP_PORT(bp);
6979
f5372251 6980 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
6981 load_count[0], load_count[1], load_count[2]);
6982 load_count[0]++;
6983 load_count[1 + port]++;
f5372251 6984 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
6985 load_count[0], load_count[1], load_count[2]);
6986 if (load_count[0] == 1)
6987 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6988 else if (load_count[1 + port] == 1)
6989 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6990 else
6991 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6992 }
6993
6994 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6995 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6996 bp->port.pmf = 1;
6997 else
6998 bp->port.pmf = 0;
6999 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7000
a2fbb9ea 7001 /* Initialize HW */
34f80b04
EG
7002 rc = bnx2x_init_hw(bp, load_code);
7003 if (rc) {
a2fbb9ea 7004 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7005 goto load_error2;
a2fbb9ea
ET
7006 }
7007
a2fbb9ea 7008 /* Setup NIC internals and enable interrupts */
471de716 7009 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
7010
7011 /* Send LOAD_DONE command to MCP */
34f80b04 7012 if (!BP_NOMCP(bp)) {
228241eb
ET
7013 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7014 if (!load_code) {
da5a662a 7015 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7016 rc = -EBUSY;
2dfe0e1f 7017 goto load_error3;
a2fbb9ea
ET
7018 }
7019 }
7020
7021 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7022
34f80b04
EG
7023 rc = bnx2x_setup_leading(bp);
7024 if (rc) {
da5a662a 7025 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7026 goto load_error3;
34f80b04 7027 }
a2fbb9ea 7028
34f80b04
EG
7029 if (CHIP_IS_E1H(bp))
7030 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7031 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7032 bp->state = BNX2X_STATE_DISABLED;
7033 }
a2fbb9ea 7034
34f80b04
EG
7035 if (bp->state == BNX2X_STATE_OPEN)
7036 for_each_nondefault_queue(bp, i) {
7037 rc = bnx2x_setup_multi(bp, i);
7038 if (rc)
2dfe0e1f 7039 goto load_error3;
34f80b04 7040 }
a2fbb9ea 7041
34f80b04 7042 if (CHIP_IS_E1(bp))
3101c2bc 7043 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 7044 else
3101c2bc 7045 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
7046
7047 if (bp->port.pmf)
b5bf9068 7048 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7049
7050 /* Start fast path */
34f80b04
EG
7051 switch (load_mode) {
7052 case LOAD_NORMAL:
7053 /* Tx queue should be only reenabled */
555f6c78 7054 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 7055 /* Initialize the receive filter. */
34f80b04
EG
7056 bnx2x_set_rx_mode(bp->dev);
7057 break;
7058
7059 case LOAD_OPEN:
555f6c78 7060 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 7061 /* Initialize the receive filter. */
34f80b04 7062 bnx2x_set_rx_mode(bp->dev);
34f80b04 7063 break;
a2fbb9ea 7064
34f80b04 7065 case LOAD_DIAG:
2dfe0e1f 7066 /* Initialize the receive filter. */
a2fbb9ea 7067 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7068 bp->state = BNX2X_STATE_DIAG;
7069 break;
7070
7071 default:
7072 break;
a2fbb9ea
ET
7073 }
7074
34f80b04
EG
7075 if (!bp->port.pmf)
7076 bnx2x__link_status_update(bp);
7077
a2fbb9ea
ET
7078 /* start the timer */
7079 mod_timer(&bp->timer, jiffies + bp->current_interval);
7080
34f80b04 7081
a2fbb9ea
ET
7082 return 0;
7083
2dfe0e1f
EG
7084load_error3:
7085 bnx2x_int_disable_sync(bp, 1);
7086 if (!BP_NOMCP(bp)) {
7087 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7088 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7089 }
7090 bp->port.pmf = 0;
7a9b2557
VZ
7091 /* Free SKBs, SGEs, TPA pool and driver internals */
7092 bnx2x_free_skbs(bp);
555f6c78 7093 for_each_rx_queue(bp, i)
3196a88a 7094 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7095load_error2:
d1014634
YG
7096 /* Release IRQs */
7097 bnx2x_free_irq(bp);
2dfe0e1f
EG
7098load_error1:
7099 bnx2x_napi_disable(bp);
555f6c78 7100 for_each_rx_queue(bp, i)
7cde1c8b 7101 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7102 bnx2x_free_mem(bp);
7103
34f80b04 7104 return rc;
a2fbb9ea
ET
7105}
7106
7107static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7108{
555f6c78 7109 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7110 int rc;
7111
c14423fe 7112 /* halt the connection */
555f6c78
EG
7113 fp->state = BNX2X_FP_STATE_HALTING;
7114 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7115
34f80b04 7116 /* Wait for completion */
a2fbb9ea 7117 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7118 &(fp->state), 1);
c14423fe 7119 if (rc) /* timeout */
a2fbb9ea
ET
7120 return rc;
7121
7122 /* delete cfc entry */
7123 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7124
34f80b04
EG
7125 /* Wait for completion */
7126 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7127 &(fp->state), 1);
34f80b04 7128 return rc;
a2fbb9ea
ET
7129}
7130
da5a662a 7131static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7132{
4781bfad 7133 __le16 dsb_sp_prod_idx;
c14423fe 7134 /* if the other port is handling traffic,
a2fbb9ea 7135 this can take a lot of time */
34f80b04
EG
7136 int cnt = 500;
7137 int rc;
a2fbb9ea
ET
7138
7139 might_sleep();
7140
7141 /* Send HALT ramrod */
7142 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7143 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7144
34f80b04
EG
7145 /* Wait for completion */
7146 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7147 &(bp->fp[0].state), 1);
7148 if (rc) /* timeout */
da5a662a 7149 return rc;
a2fbb9ea 7150
49d66772 7151 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7152
228241eb 7153 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7154 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7155
49d66772 7156 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7157 we are going to reset the chip anyway
7158 so there is not much to do if this times out
7159 */
34f80b04 7160 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7161 if (!cnt) {
7162 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7163 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7164 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7165#ifdef BNX2X_STOP_ON_ERROR
7166 bnx2x_panic();
7167#endif
36e552ab 7168 rc = -EBUSY;
34f80b04
EG
7169 break;
7170 }
7171 cnt--;
da5a662a 7172 msleep(1);
5650d9d4 7173 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7174 }
7175 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7176 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7177
7178 return rc;
a2fbb9ea
ET
7179}
7180
34f80b04
EG
7181static void bnx2x_reset_func(struct bnx2x *bp)
7182{
7183 int port = BP_PORT(bp);
7184 int func = BP_FUNC(bp);
7185 int base, i;
7186
7187 /* Configure IGU */
7188 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7189 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7190
34f80b04
EG
7191 /* Clear ILT */
7192 base = FUNC_ILT_BASE(func);
7193 for (i = base; i < base + ILT_PER_FUNC; i++)
7194 bnx2x_ilt_wr(bp, i, 0);
7195}
7196
7197static void bnx2x_reset_port(struct bnx2x *bp)
7198{
7199 int port = BP_PORT(bp);
7200 u32 val;
7201
7202 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7203
7204 /* Do not rcv packets to BRB */
7205 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7206 /* Do not direct rcv packets that are not for MCP to the BRB */
7207 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7208 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7209
7210 /* Configure AEU */
7211 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7212
7213 msleep(100);
7214 /* Check for BRB port occupancy */
7215 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7216 if (val)
7217 DP(NETIF_MSG_IFDOWN,
33471629 7218 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7219
7220 /* TODO: Close Doorbell port? */
7221}
7222
34f80b04
EG
7223static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7224{
7225 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7226 BP_FUNC(bp), reset_code);
7227
7228 switch (reset_code) {
7229 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7230 bnx2x_reset_port(bp);
7231 bnx2x_reset_func(bp);
7232 bnx2x_reset_common(bp);
7233 break;
7234
7235 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7236 bnx2x_reset_port(bp);
7237 bnx2x_reset_func(bp);
7238 break;
7239
7240 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7241 bnx2x_reset_func(bp);
7242 break;
49d66772 7243
34f80b04
EG
7244 default:
7245 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7246 break;
7247 }
7248}
7249
33471629 7250/* must be called with rtnl_lock */
34f80b04 7251static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7252{
da5a662a 7253 int port = BP_PORT(bp);
a2fbb9ea 7254 u32 reset_code = 0;
da5a662a 7255 int i, cnt, rc;
a2fbb9ea
ET
7256
7257 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7258
228241eb
ET
7259 bp->rx_mode = BNX2X_RX_MODE_NONE;
7260 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7261
f8ef6e44 7262 bnx2x_netif_stop(bp, 1);
e94d8af3 7263
34f80b04
EG
7264 del_timer_sync(&bp->timer);
7265 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7266 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7267 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7268
70b9986c
EG
7269 /* Release IRQs */
7270 bnx2x_free_irq(bp);
7271
555f6c78
EG
7272 /* Wait until tx fastpath tasks complete */
7273 for_each_tx_queue(bp, i) {
228241eb
ET
7274 struct bnx2x_fastpath *fp = &bp->fp[i];
7275
34f80b04 7276 cnt = 1000;
e8b5fc51 7277 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7278
7961f791 7279 bnx2x_tx_int(fp);
34f80b04
EG
7280 if (!cnt) {
7281 BNX2X_ERR("timeout waiting for queue[%d]\n",
7282 i);
7283#ifdef BNX2X_STOP_ON_ERROR
7284 bnx2x_panic();
7285 return -EBUSY;
7286#else
7287 break;
7288#endif
7289 }
7290 cnt--;
da5a662a 7291 msleep(1);
34f80b04 7292 }
228241eb 7293 }
da5a662a
VZ
7294 /* Give HW time to discard old tx messages */
7295 msleep(1);
a2fbb9ea 7296
3101c2bc
YG
7297 if (CHIP_IS_E1(bp)) {
7298 struct mac_configuration_cmd *config =
7299 bnx2x_sp(bp, mcast_config);
7300
7301 bnx2x_set_mac_addr_e1(bp, 0);
7302
8d9c5f34 7303 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7304 CAM_INVALIDATE(config->config_table[i]);
7305
8d9c5f34 7306 config->hdr.length = i;
3101c2bc
YG
7307 if (CHIP_REV_IS_SLOW(bp))
7308 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7309 else
7310 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7311 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7312 config->hdr.reserved1 = 0;
7313
7314 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7315 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7316 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7317
7318 } else { /* E1H */
65abd74d
YG
7319 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7320
3101c2bc
YG
7321 bnx2x_set_mac_addr_e1h(bp, 0);
7322
7323 for (i = 0; i < MC_HASH_SIZE; i++)
7324 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7325 }
7326
65abd74d
YG
7327 if (unload_mode == UNLOAD_NORMAL)
7328 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7329
7330 else if (bp->flags & NO_WOL_FLAG) {
7331 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7332 if (CHIP_IS_E1H(bp))
7333 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7334
7335 } else if (bp->wol) {
7336 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7337 u8 *mac_addr = bp->dev->dev_addr;
7338 u32 val;
7339 /* The mac address is written to entries 1-4 to
7340 preserve entry 0 which is used by the PMF */
7341 u8 entry = (BP_E1HVN(bp) + 1)*8;
7342
7343 val = (mac_addr[0] << 8) | mac_addr[1];
7344 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7345
7346 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7347 (mac_addr[4] << 8) | mac_addr[5];
7348 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7349
7350 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7351
7352 } else
7353 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7354
34f80b04
EG
7355 /* Close multi and leading connections
7356 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7357 for_each_nondefault_queue(bp, i)
7358 if (bnx2x_stop_multi(bp, i))
228241eb 7359 goto unload_error;
a2fbb9ea 7360
da5a662a
VZ
7361 rc = bnx2x_stop_leading(bp);
7362 if (rc) {
34f80b04 7363 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7364#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7365 return -EBUSY;
da5a662a
VZ
7366#else
7367 goto unload_error;
34f80b04 7368#endif
228241eb
ET
7369 }
7370
7371unload_error:
34f80b04 7372 if (!BP_NOMCP(bp))
228241eb 7373 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7374 else {
f5372251 7375 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7376 load_count[0], load_count[1], load_count[2]);
7377 load_count[0]--;
da5a662a 7378 load_count[1 + port]--;
f5372251 7379 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7380 load_count[0], load_count[1], load_count[2]);
7381 if (load_count[0] == 0)
7382 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7383 else if (load_count[1 + port] == 0)
34f80b04
EG
7384 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7385 else
7386 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7387 }
a2fbb9ea 7388
34f80b04
EG
7389 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7390 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7391 bnx2x__link_reset(bp);
a2fbb9ea
ET
7392
7393 /* Reset the chip */
228241eb 7394 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7395
7396 /* Report UNLOAD_DONE to MCP */
34f80b04 7397 if (!BP_NOMCP(bp))
a2fbb9ea 7398 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7399
9a035440 7400 bp->port.pmf = 0;
a2fbb9ea 7401
7a9b2557 7402 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7403 bnx2x_free_skbs(bp);
555f6c78 7404 for_each_rx_queue(bp, i)
3196a88a 7405 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7406 for_each_rx_queue(bp, i)
7cde1c8b 7407 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7408 bnx2x_free_mem(bp);
7409
7410 bp->state = BNX2X_STATE_CLOSED;
228241eb 7411
a2fbb9ea
ET
7412 netif_carrier_off(bp->dev);
7413
7414 return 0;
7415}
7416
34f80b04
EG
7417static void bnx2x_reset_task(struct work_struct *work)
7418{
7419 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7420
7421#ifdef BNX2X_STOP_ON_ERROR
7422 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7423 " so reset not done to allow debug dump,\n"
7424 KERN_ERR " you will need to reboot when done\n");
7425 return;
7426#endif
7427
7428 rtnl_lock();
7429
7430 if (!netif_running(bp->dev))
7431 goto reset_task_exit;
7432
7433 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7434 bnx2x_nic_load(bp, LOAD_NORMAL);
7435
7436reset_task_exit:
7437 rtnl_unlock();
7438}
7439
a2fbb9ea
ET
7440/* end of nic load/unload */
7441
7442/* ethtool_ops */
7443
7444/*
7445 * Init service functions
7446 */
7447
f1ef27ef
EG
7448static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7449{
7450 switch (func) {
7451 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7452 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7453 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7454 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7455 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7456 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7457 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7458 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7459 default:
7460 BNX2X_ERR("Unsupported function index: %d\n", func);
7461 return (u32)(-1);
7462 }
7463}
7464
7465static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7466{
7467 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7468
7469 /* Flush all outstanding writes */
7470 mmiowb();
7471
7472 /* Pretend to be function 0 */
7473 REG_WR(bp, reg, 0);
7474 /* Flush the GRC transaction (in the chip) */
7475 new_val = REG_RD(bp, reg);
7476 if (new_val != 0) {
7477 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7478 new_val);
7479 BUG();
7480 }
7481
7482 /* From now we are in the "like-E1" mode */
7483 bnx2x_int_disable(bp);
7484
7485 /* Flush all outstanding writes */
7486 mmiowb();
7487
7488 /* Restore the original funtion settings */
7489 REG_WR(bp, reg, orig_func);
7490 new_val = REG_RD(bp, reg);
7491 if (new_val != orig_func) {
7492 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7493 orig_func, new_val);
7494 BUG();
7495 }
7496}
7497
7498static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7499{
7500 if (CHIP_IS_E1H(bp))
7501 bnx2x_undi_int_disable_e1h(bp, func);
7502 else
7503 bnx2x_int_disable(bp);
7504}
7505
34f80b04
EG
7506static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7507{
7508 u32 val;
7509
7510 /* Check if there is any driver already loaded */
7511 val = REG_RD(bp, MISC_REG_UNPREPARED);
7512 if (val == 0x1) {
7513 /* Check if it is the UNDI driver
7514 * UNDI driver initializes CID offset for normal bell to 0x7
7515 */
4a37fb66 7516 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7517 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7518 if (val == 0x7) {
7519 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7520 /* save our func */
34f80b04 7521 int func = BP_FUNC(bp);
da5a662a
VZ
7522 u32 swap_en;
7523 u32 swap_val;
34f80b04 7524
b4661739
EG
7525 /* clear the UNDI indication */
7526 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7527
34f80b04
EG
7528 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7529
7530 /* try unload UNDI on port 0 */
7531 bp->func = 0;
da5a662a
VZ
7532 bp->fw_seq =
7533 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7534 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7535 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7536
7537 /* if UNDI is loaded on the other port */
7538 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7539
da5a662a
VZ
7540 /* send "DONE" for previous unload */
7541 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7542
7543 /* unload UNDI on port 1 */
34f80b04 7544 bp->func = 1;
da5a662a
VZ
7545 bp->fw_seq =
7546 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7547 DRV_MSG_SEQ_NUMBER_MASK);
7548 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7549
7550 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7551 }
7552
b4661739
EG
7553 /* now it's safe to release the lock */
7554 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7555
f1ef27ef 7556 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7557
7558 /* close input traffic and wait for it */
7559 /* Do not rcv packets to BRB */
7560 REG_WR(bp,
7561 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7562 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7563 /* Do not direct rcv packets that are not for MCP to
7564 * the BRB */
7565 REG_WR(bp,
7566 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7567 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7568 /* clear AEU */
7569 REG_WR(bp,
7570 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7571 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7572 msleep(10);
7573
7574 /* save NIG port swap info */
7575 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7576 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7577 /* reset device */
7578 REG_WR(bp,
7579 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7580 0xd3ffffff);
34f80b04
EG
7581 REG_WR(bp,
7582 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7583 0x1403);
da5a662a
VZ
7584 /* take the NIG out of reset and restore swap values */
7585 REG_WR(bp,
7586 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7587 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7588 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7589 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7590
7591 /* send unload done to the MCP */
7592 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7593
7594 /* restore our func and fw_seq */
7595 bp->func = func;
7596 bp->fw_seq =
7597 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7598 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7599
7600 } else
7601 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7602 }
7603}
7604
7605static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7606{
7607 u32 val, val2, val3, val4, id;
72ce58c3 7608 u16 pmc;
34f80b04
EG
7609
7610 /* Get the chip revision id and number. */
7611 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7612 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7613 id = ((val & 0xffff) << 16);
7614 val = REG_RD(bp, MISC_REG_CHIP_REV);
7615 id |= ((val & 0xf) << 12);
7616 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7617 id |= ((val & 0xff) << 4);
5a40e08e 7618 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7619 id |= (val & 0xf);
7620 bp->common.chip_id = id;
7621 bp->link_params.chip_id = bp->common.chip_id;
7622 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7623
1c06328c
EG
7624 val = (REG_RD(bp, 0x2874) & 0x55);
7625 if ((bp->common.chip_id & 0x1) ||
7626 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7627 bp->flags |= ONE_PORT_FLAG;
7628 BNX2X_DEV_INFO("single port device\n");
7629 }
7630
34f80b04
EG
7631 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7632 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7633 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7634 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7635 bp->common.flash_size, bp->common.flash_size);
7636
7637 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7638 bp->link_params.shmem_base = bp->common.shmem_base;
7639 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7640
7641 if (!bp->common.shmem_base ||
7642 (bp->common.shmem_base < 0xA0000) ||
7643 (bp->common.shmem_base >= 0xC0000)) {
7644 BNX2X_DEV_INFO("MCP not active\n");
7645 bp->flags |= NO_MCP_FLAG;
7646 return;
7647 }
7648
7649 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7650 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7651 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7652 BNX2X_ERR("BAD MCP validity signature\n");
7653
7654 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7655 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7656
7657 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7658 SHARED_HW_CFG_LED_MODE_MASK) >>
7659 SHARED_HW_CFG_LED_MODE_SHIFT);
7660
c2c8b03e
EG
7661 bp->link_params.feature_config_flags = 0;
7662 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7663 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7664 bp->link_params.feature_config_flags |=
7665 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7666 else
7667 bp->link_params.feature_config_flags &=
7668 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7669
34f80b04
EG
7670 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7671 bp->common.bc_ver = val;
7672 BNX2X_DEV_INFO("bc_ver %X\n", val);
7673 if (val < BNX2X_BC_VER) {
7674 /* for now only warn
7675 * later we might need to enforce this */
7676 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7677 " please upgrade BC\n", BNX2X_BC_VER, val);
7678 }
72ce58c3
EG
7679
7680 if (BP_E1HVN(bp) == 0) {
7681 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7682 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7683 } else {
7684 /* no WOL capability for E1HVN != 0 */
7685 bp->flags |= NO_WOL_FLAG;
7686 }
7687 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7688 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7689
7690 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7691 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7692 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7693 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7694
7695 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7696 val, val2, val3, val4);
7697}
7698
7699static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7700 u32 switch_cfg)
a2fbb9ea 7701{
34f80b04 7702 int port = BP_PORT(bp);
a2fbb9ea
ET
7703 u32 ext_phy_type;
7704
a2fbb9ea
ET
7705 switch (switch_cfg) {
7706 case SWITCH_CFG_1G:
7707 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7708
c18487ee
YR
7709 ext_phy_type =
7710 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7711 switch (ext_phy_type) {
7712 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7713 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7714 ext_phy_type);
7715
34f80b04
EG
7716 bp->port.supported |= (SUPPORTED_10baseT_Half |
7717 SUPPORTED_10baseT_Full |
7718 SUPPORTED_100baseT_Half |
7719 SUPPORTED_100baseT_Full |
7720 SUPPORTED_1000baseT_Full |
7721 SUPPORTED_2500baseX_Full |
7722 SUPPORTED_TP |
7723 SUPPORTED_FIBRE |
7724 SUPPORTED_Autoneg |
7725 SUPPORTED_Pause |
7726 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7727 break;
7728
7729 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7730 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7731 ext_phy_type);
7732
34f80b04
EG
7733 bp->port.supported |= (SUPPORTED_10baseT_Half |
7734 SUPPORTED_10baseT_Full |
7735 SUPPORTED_100baseT_Half |
7736 SUPPORTED_100baseT_Full |
7737 SUPPORTED_1000baseT_Full |
7738 SUPPORTED_TP |
7739 SUPPORTED_FIBRE |
7740 SUPPORTED_Autoneg |
7741 SUPPORTED_Pause |
7742 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7743 break;
7744
7745 default:
7746 BNX2X_ERR("NVRAM config error. "
7747 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7748 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7749 return;
7750 }
7751
34f80b04
EG
7752 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7753 port*0x10);
7754 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7755 break;
7756
7757 case SWITCH_CFG_10G:
7758 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7759
c18487ee
YR
7760 ext_phy_type =
7761 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7762 switch (ext_phy_type) {
7763 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7764 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7765 ext_phy_type);
7766
34f80b04
EG
7767 bp->port.supported |= (SUPPORTED_10baseT_Half |
7768 SUPPORTED_10baseT_Full |
7769 SUPPORTED_100baseT_Half |
7770 SUPPORTED_100baseT_Full |
7771 SUPPORTED_1000baseT_Full |
7772 SUPPORTED_2500baseX_Full |
7773 SUPPORTED_10000baseT_Full |
7774 SUPPORTED_TP |
7775 SUPPORTED_FIBRE |
7776 SUPPORTED_Autoneg |
7777 SUPPORTED_Pause |
7778 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7779 break;
7780
589abe3a
EG
7781 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7782 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7783 ext_phy_type);
f1410647 7784
34f80b04 7785 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7786 SUPPORTED_1000baseT_Full |
34f80b04 7787 SUPPORTED_FIBRE |
589abe3a 7788 SUPPORTED_Autoneg |
34f80b04
EG
7789 SUPPORTED_Pause |
7790 SUPPORTED_Asym_Pause);
f1410647
ET
7791 break;
7792
589abe3a
EG
7793 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7794 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7795 ext_phy_type);
7796
34f80b04 7797 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7798 SUPPORTED_2500baseX_Full |
34f80b04 7799 SUPPORTED_1000baseT_Full |
589abe3a
EG
7800 SUPPORTED_FIBRE |
7801 SUPPORTED_Autoneg |
7802 SUPPORTED_Pause |
7803 SUPPORTED_Asym_Pause);
7804 break;
7805
7806 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7807 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7808 ext_phy_type);
7809
7810 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7811 SUPPORTED_FIBRE |
7812 SUPPORTED_Pause |
7813 SUPPORTED_Asym_Pause);
f1410647
ET
7814 break;
7815
589abe3a
EG
7816 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7817 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7818 ext_phy_type);
7819
34f80b04
EG
7820 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7821 SUPPORTED_1000baseT_Full |
7822 SUPPORTED_FIBRE |
34f80b04
EG
7823 SUPPORTED_Pause |
7824 SUPPORTED_Asym_Pause);
f1410647
ET
7825 break;
7826
589abe3a
EG
7827 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7828 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7829 ext_phy_type);
7830
34f80b04 7831 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7832 SUPPORTED_1000baseT_Full |
34f80b04 7833 SUPPORTED_Autoneg |
589abe3a 7834 SUPPORTED_FIBRE |
34f80b04
EG
7835 SUPPORTED_Pause |
7836 SUPPORTED_Asym_Pause);
c18487ee
YR
7837 break;
7838
f1410647
ET
7839 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7840 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7841 ext_phy_type);
7842
34f80b04
EG
7843 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7844 SUPPORTED_TP |
7845 SUPPORTED_Autoneg |
7846 SUPPORTED_Pause |
7847 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7848 break;
7849
28577185
EG
7850 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7851 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7852 ext_phy_type);
7853
7854 bp->port.supported |= (SUPPORTED_10baseT_Half |
7855 SUPPORTED_10baseT_Full |
7856 SUPPORTED_100baseT_Half |
7857 SUPPORTED_100baseT_Full |
7858 SUPPORTED_1000baseT_Full |
7859 SUPPORTED_10000baseT_Full |
7860 SUPPORTED_TP |
7861 SUPPORTED_Autoneg |
7862 SUPPORTED_Pause |
7863 SUPPORTED_Asym_Pause);
7864 break;
7865
c18487ee
YR
7866 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7867 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7868 bp->link_params.ext_phy_config);
7869 break;
7870
a2fbb9ea
ET
7871 default:
7872 BNX2X_ERR("NVRAM config error. "
7873 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7874 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7875 return;
7876 }
7877
34f80b04
EG
7878 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7879 port*0x18);
7880 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7881
a2fbb9ea
ET
7882 break;
7883
7884 default:
7885 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7886 bp->port.link_config);
a2fbb9ea
ET
7887 return;
7888 }
34f80b04 7889 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7890
7891 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7892 if (!(bp->link_params.speed_cap_mask &
7893 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7894 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7895
c18487ee
YR
7896 if (!(bp->link_params.speed_cap_mask &
7897 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7898 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7899
c18487ee
YR
7900 if (!(bp->link_params.speed_cap_mask &
7901 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7902 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7903
c18487ee
YR
7904 if (!(bp->link_params.speed_cap_mask &
7905 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7906 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7907
c18487ee
YR
7908 if (!(bp->link_params.speed_cap_mask &
7909 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7910 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7911 SUPPORTED_1000baseT_Full);
a2fbb9ea 7912
c18487ee
YR
7913 if (!(bp->link_params.speed_cap_mask &
7914 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7915 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7916
c18487ee
YR
7917 if (!(bp->link_params.speed_cap_mask &
7918 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7919 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7920
34f80b04 7921 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7922}
7923
34f80b04 7924static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7925{
c18487ee 7926 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7927
34f80b04 7928 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7929 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7930 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7931 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7932 bp->port.advertising = bp->port.supported;
a2fbb9ea 7933 } else {
c18487ee
YR
7934 u32 ext_phy_type =
7935 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7936
7937 if ((ext_phy_type ==
7938 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7939 (ext_phy_type ==
7940 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7941 /* force 10G, no AN */
c18487ee 7942 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7943 bp->port.advertising =
a2fbb9ea
ET
7944 (ADVERTISED_10000baseT_Full |
7945 ADVERTISED_FIBRE);
7946 break;
7947 }
7948 BNX2X_ERR("NVRAM config error. "
7949 "Invalid link_config 0x%x"
7950 " Autoneg not supported\n",
34f80b04 7951 bp->port.link_config);
a2fbb9ea
ET
7952 return;
7953 }
7954 break;
7955
7956 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7957 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7958 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7959 bp->port.advertising = (ADVERTISED_10baseT_Full |
7960 ADVERTISED_TP);
a2fbb9ea
ET
7961 } else {
7962 BNX2X_ERR("NVRAM config error. "
7963 "Invalid link_config 0x%x"
7964 " speed_cap_mask 0x%x\n",
34f80b04 7965 bp->port.link_config,
c18487ee 7966 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7967 return;
7968 }
7969 break;
7970
7971 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7972 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7973 bp->link_params.req_line_speed = SPEED_10;
7974 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7975 bp->port.advertising = (ADVERTISED_10baseT_Half |
7976 ADVERTISED_TP);
a2fbb9ea
ET
7977 } else {
7978 BNX2X_ERR("NVRAM config error. "
7979 "Invalid link_config 0x%x"
7980 " speed_cap_mask 0x%x\n",
34f80b04 7981 bp->port.link_config,
c18487ee 7982 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7983 return;
7984 }
7985 break;
7986
7987 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7988 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7989 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7990 bp->port.advertising = (ADVERTISED_100baseT_Full |
7991 ADVERTISED_TP);
a2fbb9ea
ET
7992 } else {
7993 BNX2X_ERR("NVRAM config error. "
7994 "Invalid link_config 0x%x"
7995 " speed_cap_mask 0x%x\n",
34f80b04 7996 bp->port.link_config,
c18487ee 7997 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7998 return;
7999 }
8000 break;
8001
8002 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8003 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8004 bp->link_params.req_line_speed = SPEED_100;
8005 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8006 bp->port.advertising = (ADVERTISED_100baseT_Half |
8007 ADVERTISED_TP);
a2fbb9ea
ET
8008 } else {
8009 BNX2X_ERR("NVRAM config error. "
8010 "Invalid link_config 0x%x"
8011 " speed_cap_mask 0x%x\n",
34f80b04 8012 bp->port.link_config,
c18487ee 8013 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8014 return;
8015 }
8016 break;
8017
8018 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8019 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8020 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8021 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8022 ADVERTISED_TP);
a2fbb9ea
ET
8023 } else {
8024 BNX2X_ERR("NVRAM config error. "
8025 "Invalid link_config 0x%x"
8026 " speed_cap_mask 0x%x\n",
34f80b04 8027 bp->port.link_config,
c18487ee 8028 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8029 return;
8030 }
8031 break;
8032
8033 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8034 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8035 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8036 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8037 ADVERTISED_TP);
a2fbb9ea
ET
8038 } else {
8039 BNX2X_ERR("NVRAM config error. "
8040 "Invalid link_config 0x%x"
8041 " speed_cap_mask 0x%x\n",
34f80b04 8042 bp->port.link_config,
c18487ee 8043 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8044 return;
8045 }
8046 break;
8047
8048 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8049 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8050 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8051 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8052 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8053 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8054 ADVERTISED_FIBRE);
a2fbb9ea
ET
8055 } else {
8056 BNX2X_ERR("NVRAM config error. "
8057 "Invalid link_config 0x%x"
8058 " speed_cap_mask 0x%x\n",
34f80b04 8059 bp->port.link_config,
c18487ee 8060 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8061 return;
8062 }
8063 break;
8064
8065 default:
8066 BNX2X_ERR("NVRAM config error. "
8067 "BAD link speed link_config 0x%x\n",
34f80b04 8068 bp->port.link_config);
c18487ee 8069 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8070 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8071 break;
8072 }
a2fbb9ea 8073
34f80b04
EG
8074 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8075 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8076 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8077 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8078 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8079
c18487ee 8080 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8081 " advertising 0x%x\n",
c18487ee
YR
8082 bp->link_params.req_line_speed,
8083 bp->link_params.req_duplex,
34f80b04 8084 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8085}
8086
34f80b04 8087static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8088{
34f80b04
EG
8089 int port = BP_PORT(bp);
8090 u32 val, val2;
589abe3a 8091 u32 config;
c2c8b03e 8092 u16 i;
a2fbb9ea 8093
c18487ee 8094 bp->link_params.bp = bp;
34f80b04 8095 bp->link_params.port = port;
c18487ee 8096
c18487ee 8097 bp->link_params.lane_config =
a2fbb9ea 8098 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8099 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8100 SHMEM_RD(bp,
8101 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8102 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8103 SHMEM_RD(bp,
8104 dev_info.port_hw_config[port].speed_capability_mask);
8105
34f80b04 8106 bp->port.link_config =
a2fbb9ea
ET
8107 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8108
c2c8b03e
EG
8109 /* Get the 4 lanes xgxs config rx and tx */
8110 for (i = 0; i < 2; i++) {
8111 val = SHMEM_RD(bp,
8112 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8113 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8114 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8115
8116 val = SHMEM_RD(bp,
8117 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8118 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8119 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8120 }
8121
589abe3a
EG
8122 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8123 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8124 bp->link_params.feature_config_flags |=
8125 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8126 else
8127 bp->link_params.feature_config_flags &=
8128 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8129
3ce2c3f9
EG
8130 /* If the device is capable of WoL, set the default state according
8131 * to the HW
8132 */
8133 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8134 (config & PORT_FEATURE_WOL_ENABLED));
8135
c2c8b03e
EG
8136 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8137 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8138 bp->link_params.lane_config,
8139 bp->link_params.ext_phy_config,
34f80b04 8140 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8141
34f80b04 8142 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8143 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8144 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8145
8146 bnx2x_link_settings_requested(bp);
8147
8148 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8149 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8150 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8151 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8152 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8153 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8154 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8155 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8156 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8157 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8158}
8159
8160static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8161{
8162 int func = BP_FUNC(bp);
8163 u32 val, val2;
8164 int rc = 0;
a2fbb9ea 8165
34f80b04 8166 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8167
34f80b04
EG
8168 bp->e1hov = 0;
8169 bp->e1hmf = 0;
8170 if (CHIP_IS_E1H(bp)) {
8171 bp->mf_config =
8172 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8173
3196a88a
EG
8174 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8175 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8176 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8177
34f80b04
EG
8178 bp->e1hov = val;
8179 bp->e1hmf = 1;
8180 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8181 "(0x%04x)\n",
8182 func, bp->e1hov, bp->e1hov);
8183 } else {
f5372251 8184 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8185 if (BP_E1HVN(bp)) {
8186 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8187 " aborting\n", func);
8188 rc = -EPERM;
8189 }
8190 }
8191 }
a2fbb9ea 8192
34f80b04
EG
8193 if (!BP_NOMCP(bp)) {
8194 bnx2x_get_port_hwinfo(bp);
8195
8196 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8197 DRV_MSG_SEQ_NUMBER_MASK);
8198 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8199 }
8200
8201 if (IS_E1HMF(bp)) {
8202 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8203 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8204 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8205 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8206 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8207 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8208 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8209 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8210 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8211 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8212 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8213 ETH_ALEN);
8214 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8215 ETH_ALEN);
a2fbb9ea 8216 }
34f80b04
EG
8217
8218 return rc;
a2fbb9ea
ET
8219 }
8220
34f80b04
EG
8221 if (BP_NOMCP(bp)) {
8222 /* only supposed to happen on emulation/FPGA */
33471629 8223 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8224 random_ether_addr(bp->dev->dev_addr);
8225 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8226 }
a2fbb9ea 8227
34f80b04
EG
8228 return rc;
8229}
8230
8231static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8232{
8233 int func = BP_FUNC(bp);
87942b46 8234 int timer_interval;
34f80b04
EG
8235 int rc;
8236
da5a662a
VZ
8237 /* Disable interrupt handling until HW is initialized */
8238 atomic_set(&bp->intr_sem, 1);
e1510706 8239 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8240
34f80b04 8241 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8242
1cf167f2 8243 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8244 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8245
8246 rc = bnx2x_get_hwinfo(bp);
8247
8248 /* need to reset chip if undi was active */
8249 if (!BP_NOMCP(bp))
8250 bnx2x_undi_unload(bp);
8251
8252 if (CHIP_REV_IS_FPGA(bp))
8253 printk(KERN_ERR PFX "FPGA detected\n");
8254
8255 if (BP_NOMCP(bp) && (func == 0))
8256 printk(KERN_ERR PFX
8257 "MCP disabled, must load devices in order!\n");
8258
555f6c78 8259 /* Set multi queue mode */
8badd27a
EG
8260 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8261 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8262 printk(KERN_ERR PFX
8badd27a 8263 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8264 multi_mode = ETH_RSS_MODE_DISABLED;
8265 }
8266 bp->multi_mode = multi_mode;
8267
8268
7a9b2557
VZ
8269 /* Set TPA flags */
8270 if (disable_tpa) {
8271 bp->flags &= ~TPA_ENABLE_FLAG;
8272 bp->dev->features &= ~NETIF_F_LRO;
8273 } else {
8274 bp->flags |= TPA_ENABLE_FLAG;
8275 bp->dev->features |= NETIF_F_LRO;
8276 }
8277
8d5726c4 8278 bp->mrrs = mrrs;
7a9b2557 8279
34f80b04
EG
8280 bp->tx_ring_size = MAX_TX_AVAIL;
8281 bp->rx_ring_size = MAX_RX_AVAIL;
8282
8283 bp->rx_csum = 1;
34f80b04
EG
8284
8285 bp->tx_ticks = 50;
8286 bp->rx_ticks = 25;
8287
87942b46
EG
8288 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8289 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8290
8291 init_timer(&bp->timer);
8292 bp->timer.expires = jiffies + bp->current_interval;
8293 bp->timer.data = (unsigned long) bp;
8294 bp->timer.function = bnx2x_timer;
8295
8296 return rc;
a2fbb9ea
ET
8297}
8298
8299/*
8300 * ethtool service functions
8301 */
8302
8303/* All ethtool functions called with rtnl_lock */
8304
8305static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8306{
8307 struct bnx2x *bp = netdev_priv(dev);
8308
34f80b04
EG
8309 cmd->supported = bp->port.supported;
8310 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8311
8312 if (netif_carrier_ok(dev)) {
c18487ee
YR
8313 cmd->speed = bp->link_vars.line_speed;
8314 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8315 } else {
c18487ee
YR
8316 cmd->speed = bp->link_params.req_line_speed;
8317 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8318 }
34f80b04
EG
8319 if (IS_E1HMF(bp)) {
8320 u16 vn_max_rate;
8321
8322 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8323 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8324 if (vn_max_rate < cmd->speed)
8325 cmd->speed = vn_max_rate;
8326 }
a2fbb9ea 8327
c18487ee
YR
8328 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8329 u32 ext_phy_type =
8330 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8331
8332 switch (ext_phy_type) {
8333 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8335 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8336 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8337 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8338 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8339 cmd->port = PORT_FIBRE;
8340 break;
8341
8342 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8343 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8344 cmd->port = PORT_TP;
8345 break;
8346
c18487ee
YR
8347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8348 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8349 bp->link_params.ext_phy_config);
8350 break;
8351
f1410647
ET
8352 default:
8353 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8354 bp->link_params.ext_phy_config);
8355 break;
f1410647
ET
8356 }
8357 } else
a2fbb9ea 8358 cmd->port = PORT_TP;
a2fbb9ea 8359
34f80b04 8360 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8361 cmd->transceiver = XCVR_INTERNAL;
8362
c18487ee 8363 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8364 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8365 else
a2fbb9ea 8366 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8367
8368 cmd->maxtxpkt = 0;
8369 cmd->maxrxpkt = 0;
8370
8371 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8372 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8373 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8374 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8375 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8376 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8377 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8378
8379 return 0;
8380}
8381
8382static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8383{
8384 struct bnx2x *bp = netdev_priv(dev);
8385 u32 advertising;
8386
34f80b04
EG
8387 if (IS_E1HMF(bp))
8388 return 0;
8389
a2fbb9ea
ET
8390 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8391 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8392 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8393 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8394 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8395 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8396 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8397
a2fbb9ea 8398 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8399 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8400 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8401 return -EINVAL;
f1410647 8402 }
a2fbb9ea
ET
8403
8404 /* advertise the requested speed and duplex if supported */
34f80b04 8405 cmd->advertising &= bp->port.supported;
a2fbb9ea 8406
c18487ee
YR
8407 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8408 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8409 bp->port.advertising |= (ADVERTISED_Autoneg |
8410 cmd->advertising);
a2fbb9ea
ET
8411
8412 } else { /* forced speed */
8413 /* advertise the requested speed and duplex if supported */
8414 switch (cmd->speed) {
8415 case SPEED_10:
8416 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8417 if (!(bp->port.supported &
f1410647
ET
8418 SUPPORTED_10baseT_Full)) {
8419 DP(NETIF_MSG_LINK,
8420 "10M full not supported\n");
a2fbb9ea 8421 return -EINVAL;
f1410647 8422 }
a2fbb9ea
ET
8423
8424 advertising = (ADVERTISED_10baseT_Full |
8425 ADVERTISED_TP);
8426 } else {
34f80b04 8427 if (!(bp->port.supported &
f1410647
ET
8428 SUPPORTED_10baseT_Half)) {
8429 DP(NETIF_MSG_LINK,
8430 "10M half not supported\n");
a2fbb9ea 8431 return -EINVAL;
f1410647 8432 }
a2fbb9ea
ET
8433
8434 advertising = (ADVERTISED_10baseT_Half |
8435 ADVERTISED_TP);
8436 }
8437 break;
8438
8439 case SPEED_100:
8440 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8441 if (!(bp->port.supported &
f1410647
ET
8442 SUPPORTED_100baseT_Full)) {
8443 DP(NETIF_MSG_LINK,
8444 "100M full not supported\n");
a2fbb9ea 8445 return -EINVAL;
f1410647 8446 }
a2fbb9ea
ET
8447
8448 advertising = (ADVERTISED_100baseT_Full |
8449 ADVERTISED_TP);
8450 } else {
34f80b04 8451 if (!(bp->port.supported &
f1410647
ET
8452 SUPPORTED_100baseT_Half)) {
8453 DP(NETIF_MSG_LINK,
8454 "100M half not supported\n");
a2fbb9ea 8455 return -EINVAL;
f1410647 8456 }
a2fbb9ea
ET
8457
8458 advertising = (ADVERTISED_100baseT_Half |
8459 ADVERTISED_TP);
8460 }
8461 break;
8462
8463 case SPEED_1000:
f1410647
ET
8464 if (cmd->duplex != DUPLEX_FULL) {
8465 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8466 return -EINVAL;
f1410647 8467 }
a2fbb9ea 8468
34f80b04 8469 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8470 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8471 return -EINVAL;
f1410647 8472 }
a2fbb9ea
ET
8473
8474 advertising = (ADVERTISED_1000baseT_Full |
8475 ADVERTISED_TP);
8476 break;
8477
8478 case SPEED_2500:
f1410647
ET
8479 if (cmd->duplex != DUPLEX_FULL) {
8480 DP(NETIF_MSG_LINK,
8481 "2.5G half not supported\n");
a2fbb9ea 8482 return -EINVAL;
f1410647 8483 }
a2fbb9ea 8484
34f80b04 8485 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8486 DP(NETIF_MSG_LINK,
8487 "2.5G full not supported\n");
a2fbb9ea 8488 return -EINVAL;
f1410647 8489 }
a2fbb9ea 8490
f1410647 8491 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8492 ADVERTISED_TP);
8493 break;
8494
8495 case SPEED_10000:
f1410647
ET
8496 if (cmd->duplex != DUPLEX_FULL) {
8497 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8498 return -EINVAL;
f1410647 8499 }
a2fbb9ea 8500
34f80b04 8501 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8502 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8503 return -EINVAL;
f1410647 8504 }
a2fbb9ea
ET
8505
8506 advertising = (ADVERTISED_10000baseT_Full |
8507 ADVERTISED_FIBRE);
8508 break;
8509
8510 default:
f1410647 8511 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8512 return -EINVAL;
8513 }
8514
c18487ee
YR
8515 bp->link_params.req_line_speed = cmd->speed;
8516 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8517 bp->port.advertising = advertising;
a2fbb9ea
ET
8518 }
8519
c18487ee 8520 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8521 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8522 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8523 bp->port.advertising);
a2fbb9ea 8524
34f80b04 8525 if (netif_running(dev)) {
bb2a0f7a 8526 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8527 bnx2x_link_set(bp);
8528 }
a2fbb9ea
ET
8529
8530 return 0;
8531}
8532
c18487ee
YR
8533#define PHY_FW_VER_LEN 10
8534
a2fbb9ea
ET
8535static void bnx2x_get_drvinfo(struct net_device *dev,
8536 struct ethtool_drvinfo *info)
8537{
8538 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8539 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8540
8541 strcpy(info->driver, DRV_MODULE_NAME);
8542 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8543
8544 phy_fw_ver[0] = '\0';
34f80b04 8545 if (bp->port.pmf) {
4a37fb66 8546 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8547 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8548 (bp->state != BNX2X_STATE_CLOSED),
8549 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8550 bnx2x_release_phy_lock(bp);
34f80b04 8551 }
c18487ee 8552
f0e53a84
EG
8553 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8554 (bp->common.bc_ver & 0xff0000) >> 16,
8555 (bp->common.bc_ver & 0xff00) >> 8,
8556 (bp->common.bc_ver & 0xff),
8557 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8558 strcpy(info->bus_info, pci_name(bp->pdev));
8559 info->n_stats = BNX2X_NUM_STATS;
8560 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8561 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8562 info->regdump_len = 0;
8563}
8564
0a64ea57
EG
8565#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8566#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8567
8568static int bnx2x_get_regs_len(struct net_device *dev)
8569{
8570 static u32 regdump_len;
8571 struct bnx2x *bp = netdev_priv(dev);
8572 int i;
8573
8574 if (regdump_len)
8575 return regdump_len;
8576
8577 if (CHIP_IS_E1(bp)) {
8578 for (i = 0; i < REGS_COUNT; i++)
8579 if (IS_E1_ONLINE(reg_addrs[i].info))
8580 regdump_len += reg_addrs[i].size;
8581
8582 for (i = 0; i < WREGS_COUNT_E1; i++)
8583 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8584 regdump_len += wreg_addrs_e1[i].size *
8585 (1 + wreg_addrs_e1[i].read_regs_count);
8586
8587 } else { /* E1H */
8588 for (i = 0; i < REGS_COUNT; i++)
8589 if (IS_E1H_ONLINE(reg_addrs[i].info))
8590 regdump_len += reg_addrs[i].size;
8591
8592 for (i = 0; i < WREGS_COUNT_E1H; i++)
8593 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8594 regdump_len += wreg_addrs_e1h[i].size *
8595 (1 + wreg_addrs_e1h[i].read_regs_count);
8596 }
8597 regdump_len *= 4;
8598 regdump_len += sizeof(struct dump_hdr);
8599
8600 return regdump_len;
8601}
8602
8603static void bnx2x_get_regs(struct net_device *dev,
8604 struct ethtool_regs *regs, void *_p)
8605{
8606 u32 *p = _p, i, j;
8607 struct bnx2x *bp = netdev_priv(dev);
8608 struct dump_hdr dump_hdr = {0};
8609
8610 regs->version = 0;
8611 memset(p, 0, regs->len);
8612
8613 if (!netif_running(bp->dev))
8614 return;
8615
8616 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8617 dump_hdr.dump_sign = dump_sign_all;
8618 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8619 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8620 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8621 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8622 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8623
8624 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8625 p += dump_hdr.hdr_size + 1;
8626
8627 if (CHIP_IS_E1(bp)) {
8628 for (i = 0; i < REGS_COUNT; i++)
8629 if (IS_E1_ONLINE(reg_addrs[i].info))
8630 for (j = 0; j < reg_addrs[i].size; j++)
8631 *p++ = REG_RD(bp,
8632 reg_addrs[i].addr + j*4);
8633
8634 } else { /* E1H */
8635 for (i = 0; i < REGS_COUNT; i++)
8636 if (IS_E1H_ONLINE(reg_addrs[i].info))
8637 for (j = 0; j < reg_addrs[i].size; j++)
8638 *p++ = REG_RD(bp,
8639 reg_addrs[i].addr + j*4);
8640 }
8641}
8642
a2fbb9ea
ET
8643static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8644{
8645 struct bnx2x *bp = netdev_priv(dev);
8646
8647 if (bp->flags & NO_WOL_FLAG) {
8648 wol->supported = 0;
8649 wol->wolopts = 0;
8650 } else {
8651 wol->supported = WAKE_MAGIC;
8652 if (bp->wol)
8653 wol->wolopts = WAKE_MAGIC;
8654 else
8655 wol->wolopts = 0;
8656 }
8657 memset(&wol->sopass, 0, sizeof(wol->sopass));
8658}
8659
8660static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8661{
8662 struct bnx2x *bp = netdev_priv(dev);
8663
8664 if (wol->wolopts & ~WAKE_MAGIC)
8665 return -EINVAL;
8666
8667 if (wol->wolopts & WAKE_MAGIC) {
8668 if (bp->flags & NO_WOL_FLAG)
8669 return -EINVAL;
8670
8671 bp->wol = 1;
34f80b04 8672 } else
a2fbb9ea 8673 bp->wol = 0;
34f80b04 8674
a2fbb9ea
ET
8675 return 0;
8676}
8677
8678static u32 bnx2x_get_msglevel(struct net_device *dev)
8679{
8680 struct bnx2x *bp = netdev_priv(dev);
8681
8682 return bp->msglevel;
8683}
8684
8685static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8686{
8687 struct bnx2x *bp = netdev_priv(dev);
8688
8689 if (capable(CAP_NET_ADMIN))
8690 bp->msglevel = level;
8691}
8692
8693static int bnx2x_nway_reset(struct net_device *dev)
8694{
8695 struct bnx2x *bp = netdev_priv(dev);
8696
34f80b04
EG
8697 if (!bp->port.pmf)
8698 return 0;
a2fbb9ea 8699
34f80b04 8700 if (netif_running(dev)) {
bb2a0f7a 8701 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8702 bnx2x_link_set(bp);
8703 }
a2fbb9ea
ET
8704
8705 return 0;
8706}
8707
01e53298
NO
8708static u32
8709bnx2x_get_link(struct net_device *dev)
8710{
8711 struct bnx2x *bp = netdev_priv(dev);
8712
8713 return bp->link_vars.link_up;
8714}
8715
a2fbb9ea
ET
8716static int bnx2x_get_eeprom_len(struct net_device *dev)
8717{
8718 struct bnx2x *bp = netdev_priv(dev);
8719
34f80b04 8720 return bp->common.flash_size;
a2fbb9ea
ET
8721}
8722
8723static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8724{
34f80b04 8725 int port = BP_PORT(bp);
a2fbb9ea
ET
8726 int count, i;
8727 u32 val = 0;
8728
8729 /* adjust timeout for emulation/FPGA */
8730 count = NVRAM_TIMEOUT_COUNT;
8731 if (CHIP_REV_IS_SLOW(bp))
8732 count *= 100;
8733
8734 /* request access to nvram interface */
8735 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8736 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8737
8738 for (i = 0; i < count*10; i++) {
8739 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8740 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8741 break;
8742
8743 udelay(5);
8744 }
8745
8746 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8747 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8748 return -EBUSY;
8749 }
8750
8751 return 0;
8752}
8753
8754static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8755{
34f80b04 8756 int port = BP_PORT(bp);
a2fbb9ea
ET
8757 int count, i;
8758 u32 val = 0;
8759
8760 /* adjust timeout for emulation/FPGA */
8761 count = NVRAM_TIMEOUT_COUNT;
8762 if (CHIP_REV_IS_SLOW(bp))
8763 count *= 100;
8764
8765 /* relinquish nvram interface */
8766 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8767 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8768
8769 for (i = 0; i < count*10; i++) {
8770 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8771 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8772 break;
8773
8774 udelay(5);
8775 }
8776
8777 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8778 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8779 return -EBUSY;
8780 }
8781
8782 return 0;
8783}
8784
8785static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8786{
8787 u32 val;
8788
8789 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8790
8791 /* enable both bits, even on read */
8792 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8793 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8794 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8795}
8796
8797static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8798{
8799 u32 val;
8800
8801 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8802
8803 /* disable both bits, even after read */
8804 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8805 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8806 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8807}
8808
4781bfad 8809static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8810 u32 cmd_flags)
8811{
f1410647 8812 int count, i, rc;
a2fbb9ea
ET
8813 u32 val;
8814
8815 /* build the command word */
8816 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8817
8818 /* need to clear DONE bit separately */
8819 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8820
8821 /* address of the NVRAM to read from */
8822 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8823 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8824
8825 /* issue a read command */
8826 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8827
8828 /* adjust timeout for emulation/FPGA */
8829 count = NVRAM_TIMEOUT_COUNT;
8830 if (CHIP_REV_IS_SLOW(bp))
8831 count *= 100;
8832
8833 /* wait for completion */
8834 *ret_val = 0;
8835 rc = -EBUSY;
8836 for (i = 0; i < count; i++) {
8837 udelay(5);
8838 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8839
8840 if (val & MCPR_NVM_COMMAND_DONE) {
8841 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8842 /* we read nvram data in cpu order
8843 * but ethtool sees it as an array of bytes
8844 * converting to big-endian will do the work */
4781bfad 8845 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8846 rc = 0;
8847 break;
8848 }
8849 }
8850
8851 return rc;
8852}
8853
8854static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8855 int buf_size)
8856{
8857 int rc;
8858 u32 cmd_flags;
4781bfad 8859 __be32 val;
a2fbb9ea
ET
8860
8861 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8862 DP(BNX2X_MSG_NVM,
c14423fe 8863 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8864 offset, buf_size);
8865 return -EINVAL;
8866 }
8867
34f80b04
EG
8868 if (offset + buf_size > bp->common.flash_size) {
8869 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8870 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8871 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8872 return -EINVAL;
8873 }
8874
8875 /* request access to nvram interface */
8876 rc = bnx2x_acquire_nvram_lock(bp);
8877 if (rc)
8878 return rc;
8879
8880 /* enable access to nvram interface */
8881 bnx2x_enable_nvram_access(bp);
8882
8883 /* read the first word(s) */
8884 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8885 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8886 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8887 memcpy(ret_buf, &val, 4);
8888
8889 /* advance to the next dword */
8890 offset += sizeof(u32);
8891 ret_buf += sizeof(u32);
8892 buf_size -= sizeof(u32);
8893 cmd_flags = 0;
8894 }
8895
8896 if (rc == 0) {
8897 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8898 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8899 memcpy(ret_buf, &val, 4);
8900 }
8901
8902 /* disable access to nvram interface */
8903 bnx2x_disable_nvram_access(bp);
8904 bnx2x_release_nvram_lock(bp);
8905
8906 return rc;
8907}
8908
8909static int bnx2x_get_eeprom(struct net_device *dev,
8910 struct ethtool_eeprom *eeprom, u8 *eebuf)
8911{
8912 struct bnx2x *bp = netdev_priv(dev);
8913 int rc;
8914
2add3acb
EG
8915 if (!netif_running(dev))
8916 return -EAGAIN;
8917
34f80b04 8918 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8919 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8920 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8921 eeprom->len, eeprom->len);
8922
8923 /* parameters already validated in ethtool_get_eeprom */
8924
8925 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8926
8927 return rc;
8928}
8929
8930static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8931 u32 cmd_flags)
8932{
f1410647 8933 int count, i, rc;
a2fbb9ea
ET
8934
8935 /* build the command word */
8936 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8937
8938 /* need to clear DONE bit separately */
8939 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8940
8941 /* write the data */
8942 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8943
8944 /* address of the NVRAM to write to */
8945 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8946 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8947
8948 /* issue the write command */
8949 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8950
8951 /* adjust timeout for emulation/FPGA */
8952 count = NVRAM_TIMEOUT_COUNT;
8953 if (CHIP_REV_IS_SLOW(bp))
8954 count *= 100;
8955
8956 /* wait for completion */
8957 rc = -EBUSY;
8958 for (i = 0; i < count; i++) {
8959 udelay(5);
8960 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8961 if (val & MCPR_NVM_COMMAND_DONE) {
8962 rc = 0;
8963 break;
8964 }
8965 }
8966
8967 return rc;
8968}
8969
f1410647 8970#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8971
8972static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8973 int buf_size)
8974{
8975 int rc;
8976 u32 cmd_flags;
8977 u32 align_offset;
4781bfad 8978 __be32 val;
a2fbb9ea 8979
34f80b04
EG
8980 if (offset + buf_size > bp->common.flash_size) {
8981 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8982 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8983 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8984 return -EINVAL;
8985 }
8986
8987 /* request access to nvram interface */
8988 rc = bnx2x_acquire_nvram_lock(bp);
8989 if (rc)
8990 return rc;
8991
8992 /* enable access to nvram interface */
8993 bnx2x_enable_nvram_access(bp);
8994
8995 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8996 align_offset = (offset & ~0x03);
8997 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8998
8999 if (rc == 0) {
9000 val &= ~(0xff << BYTE_OFFSET(offset));
9001 val |= (*data_buf << BYTE_OFFSET(offset));
9002
9003 /* nvram data is returned as an array of bytes
9004 * convert it back to cpu order */
9005 val = be32_to_cpu(val);
9006
a2fbb9ea
ET
9007 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9008 cmd_flags);
9009 }
9010
9011 /* disable access to nvram interface */
9012 bnx2x_disable_nvram_access(bp);
9013 bnx2x_release_nvram_lock(bp);
9014
9015 return rc;
9016}
9017
9018static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9019 int buf_size)
9020{
9021 int rc;
9022 u32 cmd_flags;
9023 u32 val;
9024 u32 written_so_far;
9025
34f80b04 9026 if (buf_size == 1) /* ethtool */
a2fbb9ea 9027 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9028
9029 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9030 DP(BNX2X_MSG_NVM,
c14423fe 9031 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9032 offset, buf_size);
9033 return -EINVAL;
9034 }
9035
34f80b04
EG
9036 if (offset + buf_size > bp->common.flash_size) {
9037 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9038 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9039 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9040 return -EINVAL;
9041 }
9042
9043 /* request access to nvram interface */
9044 rc = bnx2x_acquire_nvram_lock(bp);
9045 if (rc)
9046 return rc;
9047
9048 /* enable access to nvram interface */
9049 bnx2x_enable_nvram_access(bp);
9050
9051 written_so_far = 0;
9052 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9053 while ((written_so_far < buf_size) && (rc == 0)) {
9054 if (written_so_far == (buf_size - sizeof(u32)))
9055 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9056 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9057 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9058 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9059 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9060
9061 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9062
9063 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9064
9065 /* advance to the next dword */
9066 offset += sizeof(u32);
9067 data_buf += sizeof(u32);
9068 written_so_far += sizeof(u32);
9069 cmd_flags = 0;
9070 }
9071
9072 /* disable access to nvram interface */
9073 bnx2x_disable_nvram_access(bp);
9074 bnx2x_release_nvram_lock(bp);
9075
9076 return rc;
9077}
9078
9079static int bnx2x_set_eeprom(struct net_device *dev,
9080 struct ethtool_eeprom *eeprom, u8 *eebuf)
9081{
9082 struct bnx2x *bp = netdev_priv(dev);
9083 int rc;
9084
9f4c9583
EG
9085 if (!netif_running(dev))
9086 return -EAGAIN;
9087
34f80b04 9088 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9089 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9090 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9091 eeprom->len, eeprom->len);
9092
9093 /* parameters already validated in ethtool_set_eeprom */
9094
c18487ee 9095 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
9096 if (eeprom->magic == 0x00504859)
9097 if (bp->port.pmf) {
9098
4a37fb66 9099 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9100 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9101 bp->link_params.ext_phy_config,
9102 (bp->state != BNX2X_STATE_CLOSED),
9103 eebuf, eeprom->len);
bb2a0f7a
YG
9104 if ((bp->state == BNX2X_STATE_OPEN) ||
9105 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9106 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9107 &bp->link_vars, 1);
34f80b04
EG
9108 rc |= bnx2x_phy_init(&bp->link_params,
9109 &bp->link_vars);
bb2a0f7a 9110 }
4a37fb66 9111 bnx2x_release_phy_lock(bp);
34f80b04
EG
9112
9113 } else /* Only the PMF can access the PHY */
9114 return -EINVAL;
9115 else
c18487ee 9116 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9117
9118 return rc;
9119}
9120
9121static int bnx2x_get_coalesce(struct net_device *dev,
9122 struct ethtool_coalesce *coal)
9123{
9124 struct bnx2x *bp = netdev_priv(dev);
9125
9126 memset(coal, 0, sizeof(struct ethtool_coalesce));
9127
9128 coal->rx_coalesce_usecs = bp->rx_ticks;
9129 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9130
9131 return 0;
9132}
9133
9134static int bnx2x_set_coalesce(struct net_device *dev,
9135 struct ethtool_coalesce *coal)
9136{
9137 struct bnx2x *bp = netdev_priv(dev);
9138
9139 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
1e9d9987
EG
9140 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9141 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea
ET
9142
9143 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
1e9d9987
EG
9144 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9145 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 9146
34f80b04 9147 if (netif_running(dev))
a2fbb9ea
ET
9148 bnx2x_update_coalesce(bp);
9149
9150 return 0;
9151}
9152
9153static void bnx2x_get_ringparam(struct net_device *dev,
9154 struct ethtool_ringparam *ering)
9155{
9156 struct bnx2x *bp = netdev_priv(dev);
9157
9158 ering->rx_max_pending = MAX_RX_AVAIL;
9159 ering->rx_mini_max_pending = 0;
9160 ering->rx_jumbo_max_pending = 0;
9161
9162 ering->rx_pending = bp->rx_ring_size;
9163 ering->rx_mini_pending = 0;
9164 ering->rx_jumbo_pending = 0;
9165
9166 ering->tx_max_pending = MAX_TX_AVAIL;
9167 ering->tx_pending = bp->tx_ring_size;
9168}
9169
9170static int bnx2x_set_ringparam(struct net_device *dev,
9171 struct ethtool_ringparam *ering)
9172{
9173 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9174 int rc = 0;
a2fbb9ea
ET
9175
9176 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9177 (ering->tx_pending > MAX_TX_AVAIL) ||
9178 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9179 return -EINVAL;
9180
9181 bp->rx_ring_size = ering->rx_pending;
9182 bp->tx_ring_size = ering->tx_pending;
9183
34f80b04
EG
9184 if (netif_running(dev)) {
9185 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9186 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9187 }
9188
34f80b04 9189 return rc;
a2fbb9ea
ET
9190}
9191
9192static void bnx2x_get_pauseparam(struct net_device *dev,
9193 struct ethtool_pauseparam *epause)
9194{
9195 struct bnx2x *bp = netdev_priv(dev);
9196
356e2385
EG
9197 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9198 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9199 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9200
c0700f90
DM
9201 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9202 BNX2X_FLOW_CTRL_RX);
9203 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9204 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9205
9206 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9207 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9208 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9209}
9210
9211static int bnx2x_set_pauseparam(struct net_device *dev,
9212 struct ethtool_pauseparam *epause)
9213{
9214 struct bnx2x *bp = netdev_priv(dev);
9215
34f80b04
EG
9216 if (IS_E1HMF(bp))
9217 return 0;
9218
a2fbb9ea
ET
9219 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9220 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9221 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9222
c0700f90 9223 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9224
f1410647 9225 if (epause->rx_pause)
c0700f90 9226 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9227
f1410647 9228 if (epause->tx_pause)
c0700f90 9229 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9230
c0700f90
DM
9231 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9232 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9233
c18487ee 9234 if (epause->autoneg) {
34f80b04 9235 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9236 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9237 return -EINVAL;
9238 }
a2fbb9ea 9239
c18487ee 9240 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9241 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9242 }
a2fbb9ea 9243
c18487ee
YR
9244 DP(NETIF_MSG_LINK,
9245 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9246
9247 if (netif_running(dev)) {
bb2a0f7a 9248 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9249 bnx2x_link_set(bp);
9250 }
a2fbb9ea
ET
9251
9252 return 0;
9253}
9254
df0f2343
VZ
9255static int bnx2x_set_flags(struct net_device *dev, u32 data)
9256{
9257 struct bnx2x *bp = netdev_priv(dev);
9258 int changed = 0;
9259 int rc = 0;
9260
9261 /* TPA requires Rx CSUM offloading */
9262 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9263 if (!(dev->features & NETIF_F_LRO)) {
9264 dev->features |= NETIF_F_LRO;
9265 bp->flags |= TPA_ENABLE_FLAG;
9266 changed = 1;
9267 }
9268
9269 } else if (dev->features & NETIF_F_LRO) {
9270 dev->features &= ~NETIF_F_LRO;
9271 bp->flags &= ~TPA_ENABLE_FLAG;
9272 changed = 1;
9273 }
9274
9275 if (changed && netif_running(dev)) {
9276 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9277 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9278 }
9279
9280 return rc;
9281}
9282
a2fbb9ea
ET
9283static u32 bnx2x_get_rx_csum(struct net_device *dev)
9284{
9285 struct bnx2x *bp = netdev_priv(dev);
9286
9287 return bp->rx_csum;
9288}
9289
9290static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9291{
9292 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9293 int rc = 0;
a2fbb9ea
ET
9294
9295 bp->rx_csum = data;
df0f2343
VZ
9296
9297 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9298 TPA'ed packets will be discarded due to wrong TCP CSUM */
9299 if (!data) {
9300 u32 flags = ethtool_op_get_flags(dev);
9301
9302 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9303 }
9304
9305 return rc;
a2fbb9ea
ET
9306}
9307
9308static int bnx2x_set_tso(struct net_device *dev, u32 data)
9309{
755735eb 9310 if (data) {
a2fbb9ea 9311 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 9312 dev->features |= NETIF_F_TSO6;
5316bc0b
EG
9313#ifdef BCM_VLAN
9314 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9315 dev->vlan_features |= NETIF_F_TSO6;
9316#endif
755735eb 9317 } else {
a2fbb9ea 9318 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 9319 dev->features &= ~NETIF_F_TSO6;
5316bc0b
EG
9320#ifdef BCM_VLAN
9321 dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9322 dev->vlan_features &= ~NETIF_F_TSO6;
9323#endif
755735eb
EG
9324 }
9325
a2fbb9ea
ET
9326 return 0;
9327}
9328
f3c87cdd 9329static const struct {
a2fbb9ea
ET
9330 char string[ETH_GSTRING_LEN];
9331} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9332 { "register_test (offline)" },
9333 { "memory_test (offline)" },
9334 { "loopback_test (offline)" },
9335 { "nvram_test (online)" },
9336 { "interrupt_test (online)" },
9337 { "link_test (online)" },
d3d4f495 9338 { "idle check (online)" }
a2fbb9ea
ET
9339};
9340
9341static int bnx2x_self_test_count(struct net_device *dev)
9342{
9343 return BNX2X_NUM_TESTS;
9344}
9345
f3c87cdd
YG
9346static int bnx2x_test_registers(struct bnx2x *bp)
9347{
9348 int idx, i, rc = -ENODEV;
9349 u32 wr_val = 0;
9dabc424 9350 int port = BP_PORT(bp);
f3c87cdd
YG
9351 static const struct {
9352 u32 offset0;
9353 u32 offset1;
9354 u32 mask;
9355 } reg_tbl[] = {
9356/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9357 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9358 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9359 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9360 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9361 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9362 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9363 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9364 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9365 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9366/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9367 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9368 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9369 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9370 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9371 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9372 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9373 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9374 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9375 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9376/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9377 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9378 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9379 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9380 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9381 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9382 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9383 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9384 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9385 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9386/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9387 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9388 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9389 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9390 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9391 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9392 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9393 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9394
9395 { 0xffffffff, 0, 0x00000000 }
9396 };
9397
9398 if (!netif_running(bp->dev))
9399 return rc;
9400
9401 /* Repeat the test twice:
9402 First by writing 0x00000000, second by writing 0xffffffff */
9403 for (idx = 0; idx < 2; idx++) {
9404
9405 switch (idx) {
9406 case 0:
9407 wr_val = 0;
9408 break;
9409 case 1:
9410 wr_val = 0xffffffff;
9411 break;
9412 }
9413
9414 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9415 u32 offset, mask, save_val, val;
f3c87cdd
YG
9416
9417 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9418 mask = reg_tbl[i].mask;
9419
9420 save_val = REG_RD(bp, offset);
9421
9422 REG_WR(bp, offset, wr_val);
9423 val = REG_RD(bp, offset);
9424
9425 /* Restore the original register's value */
9426 REG_WR(bp, offset, save_val);
9427
9428 /* verify that value is as expected value */
9429 if ((val & mask) != (wr_val & mask))
9430 goto test_reg_exit;
9431 }
9432 }
9433
9434 rc = 0;
9435
9436test_reg_exit:
9437 return rc;
9438}
9439
9440static int bnx2x_test_memory(struct bnx2x *bp)
9441{
9442 int i, j, rc = -ENODEV;
9443 u32 val;
9444 static const struct {
9445 u32 offset;
9446 int size;
9447 } mem_tbl[] = {
9448 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9449 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9450 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9451 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9452 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9453 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9454 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9455
9456 { 0xffffffff, 0 }
9457 };
9458 static const struct {
9459 char *name;
9460 u32 offset;
9dabc424
YG
9461 u32 e1_mask;
9462 u32 e1h_mask;
f3c87cdd 9463 } prty_tbl[] = {
9dabc424
YG
9464 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9465 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9466 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9467 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9468 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9469 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9470
9471 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9472 };
9473
9474 if (!netif_running(bp->dev))
9475 return rc;
9476
9477 /* Go through all the memories */
9478 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9479 for (j = 0; j < mem_tbl[i].size; j++)
9480 REG_RD(bp, mem_tbl[i].offset + j*4);
9481
9482 /* Check the parity status */
9483 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9484 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9485 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9486 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9487 DP(NETIF_MSG_HW,
9488 "%s is 0x%x\n", prty_tbl[i].name, val);
9489 goto test_mem_exit;
9490 }
9491 }
9492
9493 rc = 0;
9494
9495test_mem_exit:
9496 return rc;
9497}
9498
f3c87cdd
YG
9499static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9500{
9501 int cnt = 1000;
9502
9503 if (link_up)
9504 while (bnx2x_link_test(bp) && cnt--)
9505 msleep(10);
9506}
9507
9508static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9509{
9510 unsigned int pkt_size, num_pkts, i;
9511 struct sk_buff *skb;
9512 unsigned char *packet;
9513 struct bnx2x_fastpath *fp = &bp->fp[0];
9514 u16 tx_start_idx, tx_idx;
9515 u16 rx_start_idx, rx_idx;
9516 u16 pkt_prod;
9517 struct sw_tx_bd *tx_buf;
9518 struct eth_tx_bd *tx_bd;
9519 dma_addr_t mapping;
9520 union eth_rx_cqe *cqe;
9521 u8 cqe_fp_flags;
9522 struct sw_rx_bd *rx_buf;
9523 u16 len;
9524 int rc = -ENODEV;
9525
b5bf9068
EG
9526 /* check the loopback mode */
9527 switch (loopback_mode) {
9528 case BNX2X_PHY_LOOPBACK:
9529 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9530 return -EINVAL;
9531 break;
9532 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9533 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9534 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9535 break;
9536 default:
f3c87cdd 9537 return -EINVAL;
b5bf9068 9538 }
f3c87cdd 9539
b5bf9068
EG
9540 /* prepare the loopback packet */
9541 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9542 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9543 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9544 if (!skb) {
9545 rc = -ENOMEM;
9546 goto test_loopback_exit;
9547 }
9548 packet = skb_put(skb, pkt_size);
9549 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9550 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9551 for (i = ETH_HLEN; i < pkt_size; i++)
9552 packet[i] = (unsigned char) (i & 0xff);
9553
b5bf9068 9554 /* send the loopback packet */
f3c87cdd
YG
9555 num_pkts = 0;
9556 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9557 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9558
9559 pkt_prod = fp->tx_pkt_prod++;
9560 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9561 tx_buf->first_bd = fp->tx_bd_prod;
9562 tx_buf->skb = skb;
9563
9564 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9565 mapping = pci_map_single(bp->pdev, skb->data,
9566 skb_headlen(skb), PCI_DMA_TODEVICE);
9567 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9568 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9569 tx_bd->nbd = cpu_to_le16(1);
9570 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9571 tx_bd->vlan = cpu_to_le16(pkt_prod);
9572 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9573 ETH_TX_BD_FLAGS_END_BD);
9574 tx_bd->general_data = ((UNICAST_ADDRESS <<
9575 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9576
58f4c4cf
EG
9577 wmb();
9578
4781bfad 9579 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9580 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9581 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9582 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9583
9584 mmiowb();
9585
9586 num_pkts++;
9587 fp->tx_bd_prod++;
9588 bp->dev->trans_start = jiffies;
9589
9590 udelay(100);
9591
9592 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9593 if (tx_idx != tx_start_idx + num_pkts)
9594 goto test_loopback_exit;
9595
9596 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9597 if (rx_idx != rx_start_idx + num_pkts)
9598 goto test_loopback_exit;
9599
9600 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9601 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9602 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9603 goto test_loopback_rx_exit;
9604
9605 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9606 if (len != pkt_size)
9607 goto test_loopback_rx_exit;
9608
9609 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9610 skb = rx_buf->skb;
9611 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9612 for (i = ETH_HLEN; i < pkt_size; i++)
9613 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9614 goto test_loopback_rx_exit;
9615
9616 rc = 0;
9617
9618test_loopback_rx_exit:
f3c87cdd
YG
9619
9620 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9621 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9622 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9623 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9624
9625 /* Update producers */
9626 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9627 fp->rx_sge_prod);
f3c87cdd
YG
9628
9629test_loopback_exit:
9630 bp->link_params.loopback_mode = LOOPBACK_NONE;
9631
9632 return rc;
9633}
9634
9635static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9636{
b5bf9068 9637 int rc = 0, res;
f3c87cdd
YG
9638
9639 if (!netif_running(bp->dev))
9640 return BNX2X_LOOPBACK_FAILED;
9641
f8ef6e44 9642 bnx2x_netif_stop(bp, 1);
3910c8ae 9643 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9644
b5bf9068
EG
9645 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9646 if (res) {
9647 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9648 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9649 }
9650
b5bf9068
EG
9651 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9652 if (res) {
9653 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9654 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9655 }
9656
3910c8ae 9657 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9658 bnx2x_netif_start(bp);
9659
9660 return rc;
9661}
9662
9663#define CRC32_RESIDUAL 0xdebb20e3
9664
9665static int bnx2x_test_nvram(struct bnx2x *bp)
9666{
9667 static const struct {
9668 int offset;
9669 int size;
9670 } nvram_tbl[] = {
9671 { 0, 0x14 }, /* bootstrap */
9672 { 0x14, 0xec }, /* dir */
9673 { 0x100, 0x350 }, /* manuf_info */
9674 { 0x450, 0xf0 }, /* feature_info */
9675 { 0x640, 0x64 }, /* upgrade_key_info */
9676 { 0x6a4, 0x64 },
9677 { 0x708, 0x70 }, /* manuf_key_info */
9678 { 0x778, 0x70 },
9679 { 0, 0 }
9680 };
4781bfad 9681 __be32 buf[0x350 / 4];
f3c87cdd
YG
9682 u8 *data = (u8 *)buf;
9683 int i, rc;
9684 u32 magic, csum;
9685
9686 rc = bnx2x_nvram_read(bp, 0, data, 4);
9687 if (rc) {
f5372251 9688 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9689 goto test_nvram_exit;
9690 }
9691
9692 magic = be32_to_cpu(buf[0]);
9693 if (magic != 0x669955aa) {
9694 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9695 rc = -ENODEV;
9696 goto test_nvram_exit;
9697 }
9698
9699 for (i = 0; nvram_tbl[i].size; i++) {
9700
9701 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9702 nvram_tbl[i].size);
9703 if (rc) {
9704 DP(NETIF_MSG_PROBE,
f5372251 9705 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9706 goto test_nvram_exit;
9707 }
9708
9709 csum = ether_crc_le(nvram_tbl[i].size, data);
9710 if (csum != CRC32_RESIDUAL) {
9711 DP(NETIF_MSG_PROBE,
9712 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9713 rc = -ENODEV;
9714 goto test_nvram_exit;
9715 }
9716 }
9717
9718test_nvram_exit:
9719 return rc;
9720}
9721
9722static int bnx2x_test_intr(struct bnx2x *bp)
9723{
9724 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9725 int i, rc;
9726
9727 if (!netif_running(bp->dev))
9728 return -ENODEV;
9729
8d9c5f34 9730 config->hdr.length = 0;
af246401
EG
9731 if (CHIP_IS_E1(bp))
9732 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9733 else
9734 config->hdr.offset = BP_FUNC(bp);
0626b899 9735 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9736 config->hdr.reserved1 = 0;
9737
9738 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9739 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9740 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9741 if (rc == 0) {
9742 bp->set_mac_pending++;
9743 for (i = 0; i < 10; i++) {
9744 if (!bp->set_mac_pending)
9745 break;
9746 msleep_interruptible(10);
9747 }
9748 if (i == 10)
9749 rc = -ENODEV;
9750 }
9751
9752 return rc;
9753}
9754
a2fbb9ea
ET
9755static void bnx2x_self_test(struct net_device *dev,
9756 struct ethtool_test *etest, u64 *buf)
9757{
9758 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9759
9760 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9761
f3c87cdd 9762 if (!netif_running(dev))
a2fbb9ea 9763 return;
a2fbb9ea 9764
33471629 9765 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9766 if (IS_E1HMF(bp))
9767 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9768
9769 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
9770 int port = BP_PORT(bp);
9771 u32 val;
f3c87cdd
YG
9772 u8 link_up;
9773
279abdf5
EG
9774 /* save current value of input enable for TX port IF */
9775 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9776 /* disable input for TX port IF */
9777 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9778
f3c87cdd
YG
9779 link_up = bp->link_vars.link_up;
9780 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9781 bnx2x_nic_load(bp, LOAD_DIAG);
9782 /* wait until link state is restored */
9783 bnx2x_wait_for_link(bp, link_up);
9784
9785 if (bnx2x_test_registers(bp) != 0) {
9786 buf[0] = 1;
9787 etest->flags |= ETH_TEST_FL_FAILED;
9788 }
9789 if (bnx2x_test_memory(bp) != 0) {
9790 buf[1] = 1;
9791 etest->flags |= ETH_TEST_FL_FAILED;
9792 }
9793 buf[2] = bnx2x_test_loopback(bp, link_up);
9794 if (buf[2] != 0)
9795 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9796
f3c87cdd 9797 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
9798
9799 /* restore input for TX port IF */
9800 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9801
f3c87cdd
YG
9802 bnx2x_nic_load(bp, LOAD_NORMAL);
9803 /* wait until link state is restored */
9804 bnx2x_wait_for_link(bp, link_up);
9805 }
9806 if (bnx2x_test_nvram(bp) != 0) {
9807 buf[3] = 1;
a2fbb9ea
ET
9808 etest->flags |= ETH_TEST_FL_FAILED;
9809 }
f3c87cdd
YG
9810 if (bnx2x_test_intr(bp) != 0) {
9811 buf[4] = 1;
9812 etest->flags |= ETH_TEST_FL_FAILED;
9813 }
9814 if (bp->port.pmf)
9815 if (bnx2x_link_test(bp) != 0) {
9816 buf[5] = 1;
9817 etest->flags |= ETH_TEST_FL_FAILED;
9818 }
f3c87cdd
YG
9819
9820#ifdef BNX2X_EXTRA_DEBUG
9821 bnx2x_panic_dump(bp);
9822#endif
a2fbb9ea
ET
9823}
9824
de832a55
EG
9825static const struct {
9826 long offset;
9827 int size;
9828 u8 string[ETH_GSTRING_LEN];
9829} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9830/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9831 { Q_STATS_OFFSET32(error_bytes_received_hi),
9832 8, "[%d]: rx_error_bytes" },
9833 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9834 8, "[%d]: rx_ucast_packets" },
9835 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9836 8, "[%d]: rx_mcast_packets" },
9837 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9838 8, "[%d]: rx_bcast_packets" },
9839 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9840 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9841 4, "[%d]: rx_phy_ip_err_discards"},
9842 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9843 4, "[%d]: rx_skb_alloc_discard" },
9844 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9845
9846/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9847 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9848 8, "[%d]: tx_packets" }
9849};
9850
bb2a0f7a
YG
9851static const struct {
9852 long offset;
9853 int size;
9854 u32 flags;
66e855f3
YG
9855#define STATS_FLAGS_PORT 1
9856#define STATS_FLAGS_FUNC 2
de832a55 9857#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9858 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9859} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9860/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9861 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9862 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9863 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9864 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9865 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9866 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9867 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9868 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9869 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9870 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9871 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9872 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9873 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9874 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9875 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9876 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9877 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9878/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9879 8, STATS_FLAGS_PORT, "rx_fragments" },
9880 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9881 8, STATS_FLAGS_PORT, "rx_jabbers" },
9882 { STATS_OFFSET32(no_buff_discard_hi),
9883 8, STATS_FLAGS_BOTH, "rx_discards" },
9884 { STATS_OFFSET32(mac_filter_discard),
9885 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9886 { STATS_OFFSET32(xxoverflow_discard),
9887 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9888 { STATS_OFFSET32(brb_drop_hi),
9889 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9890 { STATS_OFFSET32(brb_truncate_hi),
9891 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9892 { STATS_OFFSET32(pause_frames_received_hi),
9893 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9894 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9895 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9896 { STATS_OFFSET32(nig_timer_max),
9897 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9898/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9899 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9900 { STATS_OFFSET32(rx_skb_alloc_failed),
9901 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9902 { STATS_OFFSET32(hw_csum_err),
9903 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9904
9905 { STATS_OFFSET32(total_bytes_transmitted_hi),
9906 8, STATS_FLAGS_BOTH, "tx_bytes" },
9907 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9908 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9909 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9910 8, STATS_FLAGS_BOTH, "tx_packets" },
9911 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9912 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9913 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9914 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9915 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9916 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9917 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9918 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9919/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9920 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9921 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9922 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9923 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9924 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9925 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9926 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9927 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9928 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9929 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9930 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9931 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9932 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9933 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9934 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9935 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9936 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9937 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9938 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9939/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9940 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9941 { STATS_OFFSET32(pause_frames_sent_hi),
9942 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9943};
9944
de832a55
EG
9945#define IS_PORT_STAT(i) \
9946 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9947#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9948#define IS_E1HMF_MODE_STAT(bp) \
9949 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9950
a2fbb9ea
ET
9951static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9952{
bb2a0f7a 9953 struct bnx2x *bp = netdev_priv(dev);
de832a55 9954 int i, j, k;
bb2a0f7a 9955
a2fbb9ea
ET
9956 switch (stringset) {
9957 case ETH_SS_STATS:
de832a55
EG
9958 if (is_multi(bp)) {
9959 k = 0;
9960 for_each_queue(bp, i) {
9961 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9962 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9963 bnx2x_q_stats_arr[j].string, i);
9964 k += BNX2X_NUM_Q_STATS;
9965 }
9966 if (IS_E1HMF_MODE_STAT(bp))
9967 break;
9968 for (j = 0; j < BNX2X_NUM_STATS; j++)
9969 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9970 bnx2x_stats_arr[j].string);
9971 } else {
9972 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9973 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9974 continue;
9975 strcpy(buf + j*ETH_GSTRING_LEN,
9976 bnx2x_stats_arr[i].string);
9977 j++;
9978 }
bb2a0f7a 9979 }
a2fbb9ea
ET
9980 break;
9981
9982 case ETH_SS_TEST:
9983 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9984 break;
9985 }
9986}
9987
9988static int bnx2x_get_stats_count(struct net_device *dev)
9989{
bb2a0f7a 9990 struct bnx2x *bp = netdev_priv(dev);
de832a55 9991 int i, num_stats;
bb2a0f7a 9992
de832a55
EG
9993 if (is_multi(bp)) {
9994 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9995 if (!IS_E1HMF_MODE_STAT(bp))
9996 num_stats += BNX2X_NUM_STATS;
9997 } else {
9998 if (IS_E1HMF_MODE_STAT(bp)) {
9999 num_stats = 0;
10000 for (i = 0; i < BNX2X_NUM_STATS; i++)
10001 if (IS_FUNC_STAT(i))
10002 num_stats++;
10003 } else
10004 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10005 }
de832a55 10006
bb2a0f7a 10007 return num_stats;
a2fbb9ea
ET
10008}
10009
10010static void bnx2x_get_ethtool_stats(struct net_device *dev,
10011 struct ethtool_stats *stats, u64 *buf)
10012{
10013 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10014 u32 *hw_stats, *offset;
10015 int i, j, k;
bb2a0f7a 10016
de832a55
EG
10017 if (is_multi(bp)) {
10018 k = 0;
10019 for_each_queue(bp, i) {
10020 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10021 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10022 if (bnx2x_q_stats_arr[j].size == 0) {
10023 /* skip this counter */
10024 buf[k + j] = 0;
10025 continue;
10026 }
10027 offset = (hw_stats +
10028 bnx2x_q_stats_arr[j].offset);
10029 if (bnx2x_q_stats_arr[j].size == 4) {
10030 /* 4-byte counter */
10031 buf[k + j] = (u64) *offset;
10032 continue;
10033 }
10034 /* 8-byte counter */
10035 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10036 }
10037 k += BNX2X_NUM_Q_STATS;
10038 }
10039 if (IS_E1HMF_MODE_STAT(bp))
10040 return;
10041 hw_stats = (u32 *)&bp->eth_stats;
10042 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10043 if (bnx2x_stats_arr[j].size == 0) {
10044 /* skip this counter */
10045 buf[k + j] = 0;
10046 continue;
10047 }
10048 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10049 if (bnx2x_stats_arr[j].size == 4) {
10050 /* 4-byte counter */
10051 buf[k + j] = (u64) *offset;
10052 continue;
10053 }
10054 /* 8-byte counter */
10055 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10056 }
de832a55
EG
10057 } else {
10058 hw_stats = (u32 *)&bp->eth_stats;
10059 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10060 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10061 continue;
10062 if (bnx2x_stats_arr[i].size == 0) {
10063 /* skip this counter */
10064 buf[j] = 0;
10065 j++;
10066 continue;
10067 }
10068 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10069 if (bnx2x_stats_arr[i].size == 4) {
10070 /* 4-byte counter */
10071 buf[j] = (u64) *offset;
10072 j++;
10073 continue;
10074 }
10075 /* 8-byte counter */
10076 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10077 j++;
a2fbb9ea 10078 }
a2fbb9ea
ET
10079 }
10080}
10081
10082static int bnx2x_phys_id(struct net_device *dev, u32 data)
10083{
10084 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10085 int port = BP_PORT(bp);
a2fbb9ea
ET
10086 int i;
10087
34f80b04
EG
10088 if (!netif_running(dev))
10089 return 0;
10090
10091 if (!bp->port.pmf)
10092 return 0;
10093
a2fbb9ea
ET
10094 if (data == 0)
10095 data = 2;
10096
10097 for (i = 0; i < (data * 2); i++) {
c18487ee 10098 if ((i % 2) == 0)
34f80b04 10099 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10100 bp->link_params.hw_led_mode,
10101 bp->link_params.chip_id);
10102 else
34f80b04 10103 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10104 bp->link_params.hw_led_mode,
10105 bp->link_params.chip_id);
10106
a2fbb9ea
ET
10107 msleep_interruptible(500);
10108 if (signal_pending(current))
10109 break;
10110 }
10111
c18487ee 10112 if (bp->link_vars.link_up)
34f80b04 10113 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10114 bp->link_vars.line_speed,
10115 bp->link_params.hw_led_mode,
10116 bp->link_params.chip_id);
a2fbb9ea
ET
10117
10118 return 0;
10119}
10120
10121static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10122 .get_settings = bnx2x_get_settings,
10123 .set_settings = bnx2x_set_settings,
10124 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10125 .get_regs_len = bnx2x_get_regs_len,
10126 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10127 .get_wol = bnx2x_get_wol,
10128 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10129 .get_msglevel = bnx2x_get_msglevel,
10130 .set_msglevel = bnx2x_set_msglevel,
10131 .nway_reset = bnx2x_nway_reset,
01e53298 10132 .get_link = bnx2x_get_link,
7a9b2557
VZ
10133 .get_eeprom_len = bnx2x_get_eeprom_len,
10134 .get_eeprom = bnx2x_get_eeprom,
10135 .set_eeprom = bnx2x_set_eeprom,
10136 .get_coalesce = bnx2x_get_coalesce,
10137 .set_coalesce = bnx2x_set_coalesce,
10138 .get_ringparam = bnx2x_get_ringparam,
10139 .set_ringparam = bnx2x_set_ringparam,
10140 .get_pauseparam = bnx2x_get_pauseparam,
10141 .set_pauseparam = bnx2x_set_pauseparam,
10142 .get_rx_csum = bnx2x_get_rx_csum,
10143 .set_rx_csum = bnx2x_set_rx_csum,
10144 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10145 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10146 .set_flags = bnx2x_set_flags,
10147 .get_flags = ethtool_op_get_flags,
10148 .get_sg = ethtool_op_get_sg,
10149 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10150 .get_tso = ethtool_op_get_tso,
10151 .set_tso = bnx2x_set_tso,
10152 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10153 .self_test = bnx2x_self_test,
10154 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10155 .phys_id = bnx2x_phys_id,
10156 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10157 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10158};
10159
10160/* end of ethtool_ops */
10161
10162/****************************************************************************
10163* General service functions
10164****************************************************************************/
10165
10166static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10167{
10168 u16 pmcsr;
10169
10170 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10171
10172 switch (state) {
10173 case PCI_D0:
34f80b04 10174 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10175 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10176 PCI_PM_CTRL_PME_STATUS));
10177
10178 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10179 /* delay required during transition out of D3hot */
a2fbb9ea 10180 msleep(20);
34f80b04 10181 break;
a2fbb9ea 10182
34f80b04
EG
10183 case PCI_D3hot:
10184 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10185 pmcsr |= 3;
a2fbb9ea 10186
34f80b04
EG
10187 if (bp->wol)
10188 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10189
34f80b04
EG
10190 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10191 pmcsr);
a2fbb9ea 10192
34f80b04
EG
10193 /* No more memory access after this point until
10194 * device is brought back to D0.
10195 */
10196 break;
10197
10198 default:
10199 return -EINVAL;
10200 }
10201 return 0;
a2fbb9ea
ET
10202}
10203
237907c1
EG
10204static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10205{
10206 u16 rx_cons_sb;
10207
10208 /* Tell compiler that status block fields can change */
10209 barrier();
10210 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10211 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10212 rx_cons_sb++;
10213 return (fp->rx_comp_cons != rx_cons_sb);
10214}
10215
34f80b04
EG
10216/*
10217 * net_device service functions
10218 */
10219
a2fbb9ea
ET
10220static int bnx2x_poll(struct napi_struct *napi, int budget)
10221{
10222 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10223 napi);
10224 struct bnx2x *bp = fp->bp;
10225 int work_done = 0;
10226
10227#ifdef BNX2X_STOP_ON_ERROR
10228 if (unlikely(bp->panic))
34f80b04 10229 goto poll_panic;
a2fbb9ea
ET
10230#endif
10231
10232 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10233 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10234 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10235
10236 bnx2x_update_fpsb_idx(fp);
10237
237907c1 10238 if (bnx2x_has_tx_work(fp))
7961f791 10239 bnx2x_tx_int(fp);
a2fbb9ea 10240
8534f32c 10241 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10242 work_done = bnx2x_rx_int(fp, budget);
356e2385 10243
8534f32c
EG
10244 /* must not complete if we consumed full budget */
10245 if (work_done >= budget)
10246 goto poll_again;
10247 }
a2fbb9ea 10248
8534f32c
EG
10249 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10250 * ensure that status block indices have been actually read
10251 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10252 * so that we won't write the "newer" value of the status block to IGU
10253 * (if there was a DMA right after BNX2X_HAS_WORK and
10254 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10255 * may be postponed to right before bnx2x_ack_sb). In this case
10256 * there will never be another interrupt until there is another update
10257 * of the status block, while there is still unhandled work.
10258 */
10259 rmb();
a2fbb9ea 10260
8534f32c 10261 if (!BNX2X_HAS_WORK(fp)) {
a2fbb9ea 10262#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10263poll_panic:
a2fbb9ea 10264#endif
288379f0 10265 napi_complete(napi);
a2fbb9ea 10266
0626b899 10267 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10268 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10269 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10270 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10271 }
356e2385 10272
8534f32c 10273poll_again:
a2fbb9ea
ET
10274 return work_done;
10275}
10276
755735eb
EG
10277
10278/* we split the first BD into headers and data BDs
33471629 10279 * to ease the pain of our fellow microcode engineers
755735eb
EG
10280 * we use one mapping for both BDs
10281 * So far this has only been observed to happen
10282 * in Other Operating Systems(TM)
10283 */
10284static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10285 struct bnx2x_fastpath *fp,
10286 struct eth_tx_bd **tx_bd, u16 hlen,
10287 u16 bd_prod, int nbd)
10288{
10289 struct eth_tx_bd *h_tx_bd = *tx_bd;
10290 struct eth_tx_bd *d_tx_bd;
10291 dma_addr_t mapping;
10292 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10293
10294 /* first fix first BD */
10295 h_tx_bd->nbd = cpu_to_le16(nbd);
10296 h_tx_bd->nbytes = cpu_to_le16(hlen);
10297
10298 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10299 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10300 h_tx_bd->addr_lo, h_tx_bd->nbd);
10301
10302 /* now get a new data BD
10303 * (after the pbd) and fill it */
10304 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10305 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10306
10307 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10308 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10309
10310 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10311 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10312 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10313 d_tx_bd->vlan = 0;
10314 /* this marks the BD as one that has no individual mapping
10315 * the FW ignores this flag in a BD not marked start
10316 */
10317 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10318 DP(NETIF_MSG_TX_QUEUED,
10319 "TSO split data size is %d (%x:%x)\n",
10320 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10321
10322 /* update tx_bd for marking the last BD flag */
10323 *tx_bd = d_tx_bd;
10324
10325 return bd_prod;
10326}
10327
10328static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10329{
10330 if (fix > 0)
10331 csum = (u16) ~csum_fold(csum_sub(csum,
10332 csum_partial(t_header - fix, fix, 0)));
10333
10334 else if (fix < 0)
10335 csum = (u16) ~csum_fold(csum_add(csum,
10336 csum_partial(t_header, -fix, 0)));
10337
10338 return swab16(csum);
10339}
10340
10341static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10342{
10343 u32 rc;
10344
10345 if (skb->ip_summed != CHECKSUM_PARTIAL)
10346 rc = XMIT_PLAIN;
10347
10348 else {
4781bfad 10349 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10350 rc = XMIT_CSUM_V6;
10351 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10352 rc |= XMIT_CSUM_TCP;
10353
10354 } else {
10355 rc = XMIT_CSUM_V4;
10356 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10357 rc |= XMIT_CSUM_TCP;
10358 }
10359 }
10360
10361 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10362 rc |= XMIT_GSO_V4;
10363
10364 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10365 rc |= XMIT_GSO_V6;
10366
10367 return rc;
10368}
10369
632da4d6 10370#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10371/* check if packet requires linearization (packet is too fragmented)
10372 no need to check fragmentation if page size > 8K (there will be no
10373 violation to FW restrictions) */
755735eb
EG
10374static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10375 u32 xmit_type)
10376{
10377 int to_copy = 0;
10378 int hlen = 0;
10379 int first_bd_sz = 0;
10380
10381 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10382 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10383
10384 if (xmit_type & XMIT_GSO) {
10385 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10386 /* Check if LSO packet needs to be copied:
10387 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10388 int wnd_size = MAX_FETCH_BD - 3;
33471629 10389 /* Number of windows to check */
755735eb
EG
10390 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10391 int wnd_idx = 0;
10392 int frag_idx = 0;
10393 u32 wnd_sum = 0;
10394
10395 /* Headers length */
10396 hlen = (int)(skb_transport_header(skb) - skb->data) +
10397 tcp_hdrlen(skb);
10398
10399 /* Amount of data (w/o headers) on linear part of SKB*/
10400 first_bd_sz = skb_headlen(skb) - hlen;
10401
10402 wnd_sum = first_bd_sz;
10403
10404 /* Calculate the first sum - it's special */
10405 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10406 wnd_sum +=
10407 skb_shinfo(skb)->frags[frag_idx].size;
10408
10409 /* If there was data on linear skb data - check it */
10410 if (first_bd_sz > 0) {
10411 if (unlikely(wnd_sum < lso_mss)) {
10412 to_copy = 1;
10413 goto exit_lbl;
10414 }
10415
10416 wnd_sum -= first_bd_sz;
10417 }
10418
10419 /* Others are easier: run through the frag list and
10420 check all windows */
10421 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10422 wnd_sum +=
10423 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10424
10425 if (unlikely(wnd_sum < lso_mss)) {
10426 to_copy = 1;
10427 break;
10428 }
10429 wnd_sum -=
10430 skb_shinfo(skb)->frags[wnd_idx].size;
10431 }
755735eb
EG
10432 } else {
10433 /* in non-LSO too fragmented packet should always
10434 be linearized */
10435 to_copy = 1;
10436 }
10437 }
10438
10439exit_lbl:
10440 if (unlikely(to_copy))
10441 DP(NETIF_MSG_TX_QUEUED,
10442 "Linearization IS REQUIRED for %s packet. "
10443 "num_frags %d hlen %d first_bd_sz %d\n",
10444 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10445 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10446
10447 return to_copy;
10448}
632da4d6 10449#endif
755735eb
EG
10450
10451/* called with netif_tx_lock
a2fbb9ea 10452 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10453 * netif_wake_queue()
a2fbb9ea
ET
10454 */
10455static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10456{
10457 struct bnx2x *bp = netdev_priv(dev);
10458 struct bnx2x_fastpath *fp;
555f6c78 10459 struct netdev_queue *txq;
a2fbb9ea
ET
10460 struct sw_tx_bd *tx_buf;
10461 struct eth_tx_bd *tx_bd;
10462 struct eth_tx_parse_bd *pbd = NULL;
10463 u16 pkt_prod, bd_prod;
755735eb 10464 int nbd, fp_index;
a2fbb9ea 10465 dma_addr_t mapping;
755735eb
EG
10466 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10467 int vlan_off = (bp->e1hov ? 4 : 0);
10468 int i;
10469 u8 hlen = 0;
a2fbb9ea
ET
10470
10471#ifdef BNX2X_STOP_ON_ERROR
10472 if (unlikely(bp->panic))
10473 return NETDEV_TX_BUSY;
10474#endif
10475
555f6c78
EG
10476 fp_index = skb_get_queue_mapping(skb);
10477 txq = netdev_get_tx_queue(dev, fp_index);
10478
a2fbb9ea 10479 fp = &bp->fp[fp_index];
755735eb 10480
231fd58a 10481 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10482 fp->eth_q_stats.driver_xoff++,
555f6c78 10483 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10484 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10485 return NETDEV_TX_BUSY;
10486 }
10487
755735eb
EG
10488 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10489 " gso type %x xmit_type %x\n",
10490 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10491 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10492
632da4d6 10493#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10494 /* First, check if we need to linearize the skb (due to FW
10495 restrictions). No need to check fragmentation if page size > 8K
10496 (there will be no violation to FW restrictions) */
755735eb
EG
10497 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10498 /* Statistics of linearization */
10499 bp->lin_cnt++;
10500 if (skb_linearize(skb) != 0) {
10501 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10502 "silently dropping this SKB\n");
10503 dev_kfree_skb_any(skb);
da5a662a 10504 return NETDEV_TX_OK;
755735eb
EG
10505 }
10506 }
632da4d6 10507#endif
755735eb 10508
a2fbb9ea 10509 /*
755735eb 10510 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10511 then for TSO or xsum we have a parsing info BD,
755735eb 10512 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10513 (don't forget to mark the last one as last,
10514 and to unmap only AFTER you write to the BD ...)
755735eb 10515 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10516 */
10517
10518 pkt_prod = fp->tx_pkt_prod++;
755735eb 10519 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10520
755735eb 10521 /* get a tx_buf and first BD */
a2fbb9ea
ET
10522 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10523 tx_bd = &fp->tx_desc_ring[bd_prod];
10524
10525 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10526 tx_bd->general_data = (UNICAST_ADDRESS <<
10527 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10528 /* header nbd */
10529 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10530
755735eb
EG
10531 /* remember the first BD of the packet */
10532 tx_buf->first_bd = fp->tx_bd_prod;
10533 tx_buf->skb = skb;
a2fbb9ea
ET
10534
10535 DP(NETIF_MSG_TX_QUEUED,
10536 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10537 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10538
0c6671b0
EG
10539#ifdef BCM_VLAN
10540 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10541 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10542 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10543 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10544 vlan_off += 4;
10545 } else
0c6671b0 10546#endif
755735eb 10547 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10548
755735eb 10549 if (xmit_type) {
755735eb 10550 /* turn on parsing and get a BD */
a2fbb9ea
ET
10551 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10552 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10553
10554 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10555 }
10556
10557 if (xmit_type & XMIT_CSUM) {
10558 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10559
10560 /* for now NS flag is not used in Linux */
4781bfad
EG
10561 pbd->global_data =
10562 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10563 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10564
755735eb
EG
10565 pbd->ip_hlen = (skb_transport_header(skb) -
10566 skb_network_header(skb)) / 2;
10567
10568 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10569
755735eb
EG
10570 pbd->total_hlen = cpu_to_le16(hlen);
10571 hlen = hlen*2 - vlan_off;
a2fbb9ea 10572
755735eb
EG
10573 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10574
10575 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10576 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10577 ETH_TX_BD_FLAGS_IP_CSUM;
10578 else
10579 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10580
10581 if (xmit_type & XMIT_CSUM_TCP) {
10582 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10583
10584 } else {
10585 s8 fix = SKB_CS_OFF(skb); /* signed! */
10586
a2fbb9ea 10587 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10588 pbd->cs_offset = fix / 2;
a2fbb9ea 10589
755735eb
EG
10590 DP(NETIF_MSG_TX_QUEUED,
10591 "hlen %d offset %d fix %d csum before fix %x\n",
10592 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10593 SKB_CS(skb));
10594
10595 /* HW bug: fixup the CSUM */
10596 pbd->tcp_pseudo_csum =
10597 bnx2x_csum_fix(skb_transport_header(skb),
10598 SKB_CS(skb), fix);
10599
10600 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10601 pbd->tcp_pseudo_csum);
10602 }
a2fbb9ea
ET
10603 }
10604
10605 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10606 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10607
10608 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10609 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10610 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10611 tx_bd->nbd = cpu_to_le16(nbd);
10612 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10613
10614 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10615 " nbytes %d flags %x vlan %x\n",
10616 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10617 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10618 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10619
755735eb 10620 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10621
10622 DP(NETIF_MSG_TX_QUEUED,
10623 "TSO packet len %d hlen %d total len %d tso size %d\n",
10624 skb->len, hlen, skb_headlen(skb),
10625 skb_shinfo(skb)->gso_size);
10626
10627 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10628
755735eb
EG
10629 if (unlikely(skb_headlen(skb) > hlen))
10630 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10631 bd_prod, ++nbd);
a2fbb9ea
ET
10632
10633 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10634 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10635 pbd->tcp_flags = pbd_tcp_flags(skb);
10636
10637 if (xmit_type & XMIT_GSO_V4) {
10638 pbd->ip_id = swab16(ip_hdr(skb)->id);
10639 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10640 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10641 ip_hdr(skb)->daddr,
10642 0, IPPROTO_TCP, 0));
755735eb
EG
10643
10644 } else
10645 pbd->tcp_pseudo_csum =
10646 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10647 &ipv6_hdr(skb)->daddr,
10648 0, IPPROTO_TCP, 0));
10649
a2fbb9ea
ET
10650 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10651 }
10652
755735eb
EG
10653 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10654 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10655
755735eb
EG
10656 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10657 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10658
755735eb
EG
10659 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10660 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10661
755735eb
EG
10662 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10663 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10664 tx_bd->nbytes = cpu_to_le16(frag->size);
10665 tx_bd->vlan = cpu_to_le16(pkt_prod);
10666 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10667
755735eb
EG
10668 DP(NETIF_MSG_TX_QUEUED,
10669 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10670 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10671 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10672 }
10673
755735eb 10674 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10675 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10676
10677 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10678 tx_bd, tx_bd->bd_flags.as_bitfield);
10679
a2fbb9ea
ET
10680 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10681
755735eb 10682 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10683 * if the packet contains or ends with it
10684 */
10685 if (TX_BD_POFF(bd_prod) < nbd)
10686 nbd++;
10687
10688 if (pbd)
10689 DP(NETIF_MSG_TX_QUEUED,
10690 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10691 " tcp_flags %x xsum %x seq %u hlen %u\n",
10692 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10693 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10694 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10695
755735eb 10696 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10697
58f4c4cf
EG
10698 /*
10699 * Make sure that the BD data is updated before updating the producer
10700 * since FW might read the BD right after the producer is updated.
10701 * This is only applicable for weak-ordered memory model archs such
10702 * as IA-64. The following barrier is also mandatory since FW will
10703 * assumes packets must have BDs.
10704 */
10705 wmb();
10706
4781bfad 10707 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10708 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10709 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10710 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10711
10712 mmiowb();
10713
755735eb 10714 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10715
10716 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10717 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10718 if we put Tx into XOFF state. */
10719 smp_mb();
555f6c78 10720 netif_tx_stop_queue(txq);
de832a55 10721 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10722 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10723 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10724 }
10725 fp->tx_pkt++;
10726
10727 return NETDEV_TX_OK;
10728}
10729
bb2a0f7a 10730/* called with rtnl_lock */
a2fbb9ea
ET
10731static int bnx2x_open(struct net_device *dev)
10732{
10733 struct bnx2x *bp = netdev_priv(dev);
10734
6eccabb3
EG
10735 netif_carrier_off(dev);
10736
a2fbb9ea
ET
10737 bnx2x_set_power_state(bp, PCI_D0);
10738
bb2a0f7a 10739 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10740}
10741
bb2a0f7a 10742/* called with rtnl_lock */
a2fbb9ea
ET
10743static int bnx2x_close(struct net_device *dev)
10744{
a2fbb9ea
ET
10745 struct bnx2x *bp = netdev_priv(dev);
10746
10747 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10748 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10749 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10750 if (!CHIP_REV_IS_SLOW(bp))
10751 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10752
10753 return 0;
10754}
10755
f5372251 10756/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10757static void bnx2x_set_rx_mode(struct net_device *dev)
10758{
10759 struct bnx2x *bp = netdev_priv(dev);
10760 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10761 int port = BP_PORT(bp);
10762
10763 if (bp->state != BNX2X_STATE_OPEN) {
10764 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10765 return;
10766 }
10767
10768 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10769
10770 if (dev->flags & IFF_PROMISC)
10771 rx_mode = BNX2X_RX_MODE_PROMISC;
10772
10773 else if ((dev->flags & IFF_ALLMULTI) ||
10774 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10775 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10776
10777 else { /* some multicasts */
10778 if (CHIP_IS_E1(bp)) {
10779 int i, old, offset;
10780 struct dev_mc_list *mclist;
10781 struct mac_configuration_cmd *config =
10782 bnx2x_sp(bp, mcast_config);
10783
10784 for (i = 0, mclist = dev->mc_list;
10785 mclist && (i < dev->mc_count);
10786 i++, mclist = mclist->next) {
10787
10788 config->config_table[i].
10789 cam_entry.msb_mac_addr =
10790 swab16(*(u16 *)&mclist->dmi_addr[0]);
10791 config->config_table[i].
10792 cam_entry.middle_mac_addr =
10793 swab16(*(u16 *)&mclist->dmi_addr[2]);
10794 config->config_table[i].
10795 cam_entry.lsb_mac_addr =
10796 swab16(*(u16 *)&mclist->dmi_addr[4]);
10797 config->config_table[i].cam_entry.flags =
10798 cpu_to_le16(port);
10799 config->config_table[i].
10800 target_table_entry.flags = 0;
10801 config->config_table[i].
10802 target_table_entry.client_id = 0;
10803 config->config_table[i].
10804 target_table_entry.vlan_id = 0;
10805
10806 DP(NETIF_MSG_IFUP,
10807 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10808 config->config_table[i].
10809 cam_entry.msb_mac_addr,
10810 config->config_table[i].
10811 cam_entry.middle_mac_addr,
10812 config->config_table[i].
10813 cam_entry.lsb_mac_addr);
10814 }
8d9c5f34 10815 old = config->hdr.length;
34f80b04
EG
10816 if (old > i) {
10817 for (; i < old; i++) {
10818 if (CAM_IS_INVALID(config->
10819 config_table[i])) {
af246401 10820 /* already invalidated */
34f80b04
EG
10821 break;
10822 }
10823 /* invalidate */
10824 CAM_INVALIDATE(config->
10825 config_table[i]);
10826 }
10827 }
10828
10829 if (CHIP_REV_IS_SLOW(bp))
10830 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10831 else
10832 offset = BNX2X_MAX_MULTICAST*(1 + port);
10833
8d9c5f34 10834 config->hdr.length = i;
34f80b04 10835 config->hdr.offset = offset;
8d9c5f34 10836 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10837 config->hdr.reserved1 = 0;
10838
10839 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10840 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10841 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10842 0);
10843 } else { /* E1H */
10844 /* Accept one or more multicasts */
10845 struct dev_mc_list *mclist;
10846 u32 mc_filter[MC_HASH_SIZE];
10847 u32 crc, bit, regidx;
10848 int i;
10849
10850 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10851
10852 for (i = 0, mclist = dev->mc_list;
10853 mclist && (i < dev->mc_count);
10854 i++, mclist = mclist->next) {
10855
7c510e4b
JB
10856 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10857 mclist->dmi_addr);
34f80b04
EG
10858
10859 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10860 bit = (crc >> 24) & 0xff;
10861 regidx = bit >> 5;
10862 bit &= 0x1f;
10863 mc_filter[regidx] |= (1 << bit);
10864 }
10865
10866 for (i = 0; i < MC_HASH_SIZE; i++)
10867 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10868 mc_filter[i]);
10869 }
10870 }
10871
10872 bp->rx_mode = rx_mode;
10873 bnx2x_set_storm_rx_mode(bp);
10874}
10875
10876/* called with rtnl_lock */
a2fbb9ea
ET
10877static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10878{
10879 struct sockaddr *addr = p;
10880 struct bnx2x *bp = netdev_priv(dev);
10881
34f80b04 10882 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10883 return -EINVAL;
10884
10885 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10886 if (netif_running(dev)) {
10887 if (CHIP_IS_E1(bp))
3101c2bc 10888 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10889 else
3101c2bc 10890 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10891 }
a2fbb9ea
ET
10892
10893 return 0;
10894}
10895
c18487ee 10896/* called with rtnl_lock */
a2fbb9ea
ET
10897static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10898{
10899 struct mii_ioctl_data *data = if_mii(ifr);
10900 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10901 int port = BP_PORT(bp);
a2fbb9ea
ET
10902 int err;
10903
10904 switch (cmd) {
10905 case SIOCGMIIPHY:
34f80b04 10906 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10907
c14423fe 10908 /* fallthrough */
c18487ee 10909
a2fbb9ea 10910 case SIOCGMIIREG: {
c18487ee 10911 u16 mii_regval;
a2fbb9ea 10912
c18487ee
YR
10913 if (!netif_running(dev))
10914 return -EAGAIN;
a2fbb9ea 10915
34f80b04 10916 mutex_lock(&bp->port.phy_mutex);
3196a88a 10917 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10918 DEFAULT_PHY_DEV_ADDR,
10919 (data->reg_num & 0x1f), &mii_regval);
10920 data->val_out = mii_regval;
34f80b04 10921 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10922 return err;
10923 }
10924
10925 case SIOCSMIIREG:
10926 if (!capable(CAP_NET_ADMIN))
10927 return -EPERM;
10928
c18487ee
YR
10929 if (!netif_running(dev))
10930 return -EAGAIN;
10931
34f80b04 10932 mutex_lock(&bp->port.phy_mutex);
3196a88a 10933 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10934 DEFAULT_PHY_DEV_ADDR,
10935 (data->reg_num & 0x1f), data->val_in);
34f80b04 10936 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10937 return err;
10938
10939 default:
10940 /* do nothing */
10941 break;
10942 }
10943
10944 return -EOPNOTSUPP;
10945}
10946
34f80b04 10947/* called with rtnl_lock */
a2fbb9ea
ET
10948static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10949{
10950 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10951 int rc = 0;
a2fbb9ea
ET
10952
10953 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10954 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10955 return -EINVAL;
10956
10957 /* This does not race with packet allocation
c14423fe 10958 * because the actual alloc size is
a2fbb9ea
ET
10959 * only updated as part of load
10960 */
10961 dev->mtu = new_mtu;
10962
10963 if (netif_running(dev)) {
34f80b04
EG
10964 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10965 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10966 }
34f80b04
EG
10967
10968 return rc;
a2fbb9ea
ET
10969}
10970
10971static void bnx2x_tx_timeout(struct net_device *dev)
10972{
10973 struct bnx2x *bp = netdev_priv(dev);
10974
10975#ifdef BNX2X_STOP_ON_ERROR
10976 if (!bp->panic)
10977 bnx2x_panic();
10978#endif
10979 /* This allows the netif to be shutdown gracefully before resetting */
10980 schedule_work(&bp->reset_task);
10981}
10982
10983#ifdef BCM_VLAN
34f80b04 10984/* called with rtnl_lock */
a2fbb9ea
ET
10985static void bnx2x_vlan_rx_register(struct net_device *dev,
10986 struct vlan_group *vlgrp)
10987{
10988 struct bnx2x *bp = netdev_priv(dev);
10989
10990 bp->vlgrp = vlgrp;
0c6671b0
EG
10991
10992 /* Set flags according to the required capabilities */
10993 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10994
10995 if (dev->features & NETIF_F_HW_VLAN_TX)
10996 bp->flags |= HW_VLAN_TX_FLAG;
10997
10998 if (dev->features & NETIF_F_HW_VLAN_RX)
10999 bp->flags |= HW_VLAN_RX_FLAG;
11000
a2fbb9ea 11001 if (netif_running(dev))
49d66772 11002 bnx2x_set_client_config(bp);
a2fbb9ea 11003}
34f80b04 11004
a2fbb9ea
ET
11005#endif
11006
11007#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11008static void poll_bnx2x(struct net_device *dev)
11009{
11010 struct bnx2x *bp = netdev_priv(dev);
11011
11012 disable_irq(bp->pdev->irq);
11013 bnx2x_interrupt(bp->pdev->irq, dev);
11014 enable_irq(bp->pdev->irq);
11015}
11016#endif
11017
c64213cd
SH
11018static const struct net_device_ops bnx2x_netdev_ops = {
11019 .ndo_open = bnx2x_open,
11020 .ndo_stop = bnx2x_close,
11021 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11022 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11023 .ndo_set_mac_address = bnx2x_change_mac_addr,
11024 .ndo_validate_addr = eth_validate_addr,
11025 .ndo_do_ioctl = bnx2x_ioctl,
11026 .ndo_change_mtu = bnx2x_change_mtu,
11027 .ndo_tx_timeout = bnx2x_tx_timeout,
11028#ifdef BCM_VLAN
11029 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11030#endif
11031#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11032 .ndo_poll_controller = poll_bnx2x,
11033#endif
11034};
11035
34f80b04
EG
11036static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11037 struct net_device *dev)
a2fbb9ea
ET
11038{
11039 struct bnx2x *bp;
11040 int rc;
11041
11042 SET_NETDEV_DEV(dev, &pdev->dev);
11043 bp = netdev_priv(dev);
11044
34f80b04
EG
11045 bp->dev = dev;
11046 bp->pdev = pdev;
a2fbb9ea 11047 bp->flags = 0;
34f80b04 11048 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11049
11050 rc = pci_enable_device(pdev);
11051 if (rc) {
11052 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11053 goto err_out;
11054 }
11055
11056 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11057 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11058 " aborting\n");
11059 rc = -ENODEV;
11060 goto err_out_disable;
11061 }
11062
11063 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11064 printk(KERN_ERR PFX "Cannot find second PCI device"
11065 " base address, aborting\n");
11066 rc = -ENODEV;
11067 goto err_out_disable;
11068 }
11069
34f80b04
EG
11070 if (atomic_read(&pdev->enable_cnt) == 1) {
11071 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11072 if (rc) {
11073 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11074 " aborting\n");
11075 goto err_out_disable;
11076 }
a2fbb9ea 11077
34f80b04
EG
11078 pci_set_master(pdev);
11079 pci_save_state(pdev);
11080 }
a2fbb9ea
ET
11081
11082 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11083 if (bp->pm_cap == 0) {
11084 printk(KERN_ERR PFX "Cannot find power management"
11085 " capability, aborting\n");
11086 rc = -EIO;
11087 goto err_out_release;
11088 }
11089
11090 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11091 if (bp->pcie_cap == 0) {
11092 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11093 " aborting\n");
11094 rc = -EIO;
11095 goto err_out_release;
11096 }
11097
6a35528a 11098 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11099 bp->flags |= USING_DAC_FLAG;
6a35528a 11100 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11101 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11102 " failed, aborting\n");
11103 rc = -EIO;
11104 goto err_out_release;
11105 }
11106
284901a9 11107 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11108 printk(KERN_ERR PFX "System does not support DMA,"
11109 " aborting\n");
11110 rc = -EIO;
11111 goto err_out_release;
11112 }
11113
34f80b04
EG
11114 dev->mem_start = pci_resource_start(pdev, 0);
11115 dev->base_addr = dev->mem_start;
11116 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11117
11118 dev->irq = pdev->irq;
11119
275f165f 11120 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11121 if (!bp->regview) {
11122 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11123 rc = -ENOMEM;
11124 goto err_out_release;
11125 }
11126
34f80b04
EG
11127 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11128 min_t(u64, BNX2X_DB_SIZE,
11129 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11130 if (!bp->doorbells) {
11131 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11132 rc = -ENOMEM;
11133 goto err_out_unmap;
11134 }
11135
11136 bnx2x_set_power_state(bp, PCI_D0);
11137
34f80b04
EG
11138 /* clean indirect addresses */
11139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11140 PCICFG_VENDOR_ID_OFFSET);
11141 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11142 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11143 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11144 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11145
34f80b04 11146 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11147
c64213cd 11148 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11149 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11150 dev->features |= NETIF_F_SG;
11151 dev->features |= NETIF_F_HW_CSUM;
11152 if (bp->flags & USING_DAC_FLAG)
11153 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11154 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11155 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11156#ifdef BCM_VLAN
11157 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11158 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11159
11160 dev->vlan_features |= NETIF_F_SG;
11161 dev->vlan_features |= NETIF_F_HW_CSUM;
11162 if (bp->flags & USING_DAC_FLAG)
11163 dev->vlan_features |= NETIF_F_HIGHDMA;
11164 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11165 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11166#endif
a2fbb9ea
ET
11167
11168 return 0;
11169
11170err_out_unmap:
11171 if (bp->regview) {
11172 iounmap(bp->regview);
11173 bp->regview = NULL;
11174 }
a2fbb9ea
ET
11175 if (bp->doorbells) {
11176 iounmap(bp->doorbells);
11177 bp->doorbells = NULL;
11178 }
11179
11180err_out_release:
34f80b04
EG
11181 if (atomic_read(&pdev->enable_cnt) == 1)
11182 pci_release_regions(pdev);
a2fbb9ea
ET
11183
11184err_out_disable:
11185 pci_disable_device(pdev);
11186 pci_set_drvdata(pdev, NULL);
11187
11188err_out:
11189 return rc;
11190}
11191
25047950
ET
11192static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11193{
11194 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11195
11196 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11197 return val;
11198}
11199
11200/* return value of 1=2.5GHz 2=5GHz */
11201static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11202{
11203 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11204
11205 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11206 return val;
11207}
94a78b79
VZ
11208static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11209{
11210 struct bnx2x_fw_file_hdr *fw_hdr;
11211 struct bnx2x_fw_file_section *sections;
11212 u16 *ops_offsets;
11213 u32 offset, len, num_ops;
11214 int i;
11215 const struct firmware *firmware = bp->firmware;
11216 const u8 * fw_ver;
11217
11218 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11219 return -EINVAL;
11220
11221 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11222 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11223
11224 /* Make sure none of the offsets and sizes make us read beyond
11225 * the end of the firmware data */
11226 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11227 offset = be32_to_cpu(sections[i].offset);
11228 len = be32_to_cpu(sections[i].len);
11229 if (offset + len > firmware->size) {
11230 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11231 return -EINVAL;
11232 }
11233 }
11234
11235 /* Likewise for the init_ops offsets */
11236 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11237 ops_offsets = (u16 *)(firmware->data + offset);
11238 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11239
11240 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11241 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11242 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11243 return -EINVAL;
11244 }
11245 }
11246
11247 /* Check FW version */
11248 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11249 fw_ver = firmware->data + offset;
11250 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11251 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11252 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11253 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11254 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11255 " Should be %d.%d.%d.%d\n",
11256 fw_ver[0], fw_ver[1], fw_ver[2],
11257 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11258 BCM_5710_FW_MINOR_VERSION,
11259 BCM_5710_FW_REVISION_VERSION,
11260 BCM_5710_FW_ENGINEERING_VERSION);
11261 return -EINVAL;
11262 }
11263
11264 return 0;
11265}
11266
11267static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11268{
11269 u32 i;
11270 const __be32 *source = (const __be32*)_source;
11271 u32 *target = (u32*)_target;
11272
11273 for (i = 0; i < n/4; i++)
11274 target[i] = be32_to_cpu(source[i]);
11275}
11276
11277/*
11278 Ops array is stored in the following format:
11279 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11280 */
11281static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11282{
11283 u32 i, j, tmp;
11284 const __be32 *source = (const __be32*)_source;
11285 struct raw_op *target = (struct raw_op*)_target;
11286
11287 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11288 tmp = be32_to_cpu(source[j]);
11289 target[i].op = (tmp >> 24) & 0xff;
11290 target[i].offset = tmp & 0xffffff;
11291 target[i].raw_data = be32_to_cpu(source[j+1]);
11292 }
11293}
11294static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11295{
11296 u32 i;
11297 u16 *target = (u16*)_target;
11298 const __be16 *source = (const __be16*)_source;
11299
11300 for (i = 0; i < n/2; i++)
11301 target[i] = be16_to_cpu(source[i]);
11302}
11303
11304#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11305 do { \
11306 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11307 bp->arr = kmalloc(len, GFP_KERNEL); \
11308 if (!bp->arr) { \
11309 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11310 goto lbl; \
11311 } \
11312 func(bp->firmware->data + \
11313 be32_to_cpu(fw_hdr->arr.offset), \
11314 (u8*)bp->arr, len); \
11315 } while (0)
11316
11317
11318static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11319{
11320 char fw_file_name[40] = {0};
11321 int rc, offset;
11322 struct bnx2x_fw_file_hdr *fw_hdr;
11323
11324 /* Create a FW file name */
11325 if (CHIP_IS_E1(bp))
11326 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11327 else
11328 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11329
11330 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11331 BCM_5710_FW_MAJOR_VERSION,
11332 BCM_5710_FW_MINOR_VERSION,
11333 BCM_5710_FW_REVISION_VERSION,
11334 BCM_5710_FW_ENGINEERING_VERSION);
11335
11336 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11337
11338 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11339 if (rc) {
11340 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11341 goto request_firmware_exit;
11342 }
11343
11344 rc = bnx2x_check_firmware(bp);
11345 if (rc) {
11346 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11347 goto request_firmware_exit;
11348 }
11349
11350 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11351
11352 /* Initialize the pointers to the init arrays */
11353 /* Blob */
11354 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11355
11356 /* Opcodes */
11357 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11358
11359 /* Offsets */
11360 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11361
11362 /* STORMs firmware */
11363 bp->tsem_int_table_data = bp->firmware->data +
11364 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11365 bp->tsem_pram_data = bp->firmware->data +
11366 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11367 bp->usem_int_table_data = bp->firmware->data +
11368 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11369 bp->usem_pram_data = bp->firmware->data +
11370 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11371 bp->xsem_int_table_data = bp->firmware->data +
11372 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11373 bp->xsem_pram_data = bp->firmware->data +
11374 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11375 bp->csem_int_table_data = bp->firmware->data +
11376 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11377 bp->csem_pram_data = bp->firmware->data +
11378 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11379
11380 return 0;
11381init_offsets_alloc_err:
11382 kfree(bp->init_ops);
11383init_ops_alloc_err:
11384 kfree(bp->init_data);
11385request_firmware_exit:
11386 release_firmware(bp->firmware);
11387
11388 return rc;
11389}
11390
11391
25047950 11392
a2fbb9ea
ET
11393static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11394 const struct pci_device_id *ent)
11395{
11396 static int version_printed;
11397 struct net_device *dev = NULL;
11398 struct bnx2x *bp;
25047950 11399 int rc;
a2fbb9ea
ET
11400
11401 if (version_printed++ == 0)
11402 printk(KERN_INFO "%s", version);
11403
11404 /* dev zeroed in init_etherdev */
555f6c78 11405 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11406 if (!dev) {
11407 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11408 return -ENOMEM;
34f80b04 11409 }
a2fbb9ea 11410
a2fbb9ea
ET
11411 bp = netdev_priv(dev);
11412 bp->msglevel = debug;
11413
34f80b04 11414 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11415 if (rc < 0) {
11416 free_netdev(dev);
11417 return rc;
11418 }
11419
a2fbb9ea
ET
11420 pci_set_drvdata(pdev, dev);
11421
34f80b04 11422 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11423 if (rc)
11424 goto init_one_exit;
11425
94a78b79
VZ
11426 /* Set init arrays */
11427 rc = bnx2x_init_firmware(bp, &pdev->dev);
11428 if (rc) {
11429 printk(KERN_ERR PFX "Error loading firmware\n");
11430 goto init_one_exit;
11431 }
11432
693fc0d1 11433 rc = register_netdev(dev);
34f80b04 11434 if (rc) {
693fc0d1 11435 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11436 goto init_one_exit;
11437 }
11438
25047950 11439 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11440 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11441 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11442 bnx2x_get_pcie_width(bp),
11443 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11444 dev->base_addr, bp->pdev->irq);
e174961c 11445 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11446
a2fbb9ea 11447 return 0;
34f80b04
EG
11448
11449init_one_exit:
11450 if (bp->regview)
11451 iounmap(bp->regview);
11452
11453 if (bp->doorbells)
11454 iounmap(bp->doorbells);
11455
11456 free_netdev(dev);
11457
11458 if (atomic_read(&pdev->enable_cnt) == 1)
11459 pci_release_regions(pdev);
11460
11461 pci_disable_device(pdev);
11462 pci_set_drvdata(pdev, NULL);
11463
11464 return rc;
a2fbb9ea
ET
11465}
11466
11467static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11468{
11469 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11470 struct bnx2x *bp;
11471
11472 if (!dev) {
228241eb
ET
11473 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11474 return;
11475 }
228241eb 11476 bp = netdev_priv(dev);
a2fbb9ea 11477
a2fbb9ea
ET
11478 unregister_netdev(dev);
11479
94a78b79
VZ
11480 kfree(bp->init_ops_offsets);
11481 kfree(bp->init_ops);
11482 kfree(bp->init_data);
11483 release_firmware(bp->firmware);
11484
a2fbb9ea
ET
11485 if (bp->regview)
11486 iounmap(bp->regview);
11487
11488 if (bp->doorbells)
11489 iounmap(bp->doorbells);
11490
11491 free_netdev(dev);
34f80b04
EG
11492
11493 if (atomic_read(&pdev->enable_cnt) == 1)
11494 pci_release_regions(pdev);
11495
a2fbb9ea
ET
11496 pci_disable_device(pdev);
11497 pci_set_drvdata(pdev, NULL);
11498}
11499
11500static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11501{
11502 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11503 struct bnx2x *bp;
11504
34f80b04
EG
11505 if (!dev) {
11506 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11507 return -ENODEV;
11508 }
11509 bp = netdev_priv(dev);
a2fbb9ea 11510
34f80b04 11511 rtnl_lock();
a2fbb9ea 11512
34f80b04 11513 pci_save_state(pdev);
228241eb 11514
34f80b04
EG
11515 if (!netif_running(dev)) {
11516 rtnl_unlock();
11517 return 0;
11518 }
a2fbb9ea
ET
11519
11520 netif_device_detach(dev);
a2fbb9ea 11521
da5a662a 11522 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11523
a2fbb9ea 11524 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11525
34f80b04
EG
11526 rtnl_unlock();
11527
a2fbb9ea
ET
11528 return 0;
11529}
11530
11531static int bnx2x_resume(struct pci_dev *pdev)
11532{
11533 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11534 struct bnx2x *bp;
a2fbb9ea
ET
11535 int rc;
11536
228241eb
ET
11537 if (!dev) {
11538 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11539 return -ENODEV;
11540 }
228241eb 11541 bp = netdev_priv(dev);
a2fbb9ea 11542
34f80b04
EG
11543 rtnl_lock();
11544
228241eb 11545 pci_restore_state(pdev);
34f80b04
EG
11546
11547 if (!netif_running(dev)) {
11548 rtnl_unlock();
11549 return 0;
11550 }
11551
a2fbb9ea
ET
11552 bnx2x_set_power_state(bp, PCI_D0);
11553 netif_device_attach(dev);
11554
da5a662a 11555 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11556
34f80b04
EG
11557 rtnl_unlock();
11558
11559 return rc;
a2fbb9ea
ET
11560}
11561
f8ef6e44
YG
11562static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11563{
11564 int i;
11565
11566 bp->state = BNX2X_STATE_ERROR;
11567
11568 bp->rx_mode = BNX2X_RX_MODE_NONE;
11569
11570 bnx2x_netif_stop(bp, 0);
11571
11572 del_timer_sync(&bp->timer);
11573 bp->stats_state = STATS_STATE_DISABLED;
11574 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11575
11576 /* Release IRQs */
11577 bnx2x_free_irq(bp);
11578
11579 if (CHIP_IS_E1(bp)) {
11580 struct mac_configuration_cmd *config =
11581 bnx2x_sp(bp, mcast_config);
11582
8d9c5f34 11583 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11584 CAM_INVALIDATE(config->config_table[i]);
11585 }
11586
11587 /* Free SKBs, SGEs, TPA pool and driver internals */
11588 bnx2x_free_skbs(bp);
555f6c78 11589 for_each_rx_queue(bp, i)
f8ef6e44 11590 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11591 for_each_rx_queue(bp, i)
7cde1c8b 11592 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11593 bnx2x_free_mem(bp);
11594
11595 bp->state = BNX2X_STATE_CLOSED;
11596
11597 netif_carrier_off(bp->dev);
11598
11599 return 0;
11600}
11601
11602static void bnx2x_eeh_recover(struct bnx2x *bp)
11603{
11604 u32 val;
11605
11606 mutex_init(&bp->port.phy_mutex);
11607
11608 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11609 bp->link_params.shmem_base = bp->common.shmem_base;
11610 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11611
11612 if (!bp->common.shmem_base ||
11613 (bp->common.shmem_base < 0xA0000) ||
11614 (bp->common.shmem_base >= 0xC0000)) {
11615 BNX2X_DEV_INFO("MCP not active\n");
11616 bp->flags |= NO_MCP_FLAG;
11617 return;
11618 }
11619
11620 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11621 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11622 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11623 BNX2X_ERR("BAD MCP validity signature\n");
11624
11625 if (!BP_NOMCP(bp)) {
11626 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11627 & DRV_MSG_SEQ_NUMBER_MASK);
11628 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11629 }
11630}
11631
493adb1f
WX
11632/**
11633 * bnx2x_io_error_detected - called when PCI error is detected
11634 * @pdev: Pointer to PCI device
11635 * @state: The current pci connection state
11636 *
11637 * This function is called after a PCI bus error affecting
11638 * this device has been detected.
11639 */
11640static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11641 pci_channel_state_t state)
11642{
11643 struct net_device *dev = pci_get_drvdata(pdev);
11644 struct bnx2x *bp = netdev_priv(dev);
11645
11646 rtnl_lock();
11647
11648 netif_device_detach(dev);
11649
11650 if (netif_running(dev))
f8ef6e44 11651 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11652
11653 pci_disable_device(pdev);
11654
11655 rtnl_unlock();
11656
11657 /* Request a slot reset */
11658 return PCI_ERS_RESULT_NEED_RESET;
11659}
11660
11661/**
11662 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11663 * @pdev: Pointer to PCI device
11664 *
11665 * Restart the card from scratch, as if from a cold-boot.
11666 */
11667static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11668{
11669 struct net_device *dev = pci_get_drvdata(pdev);
11670 struct bnx2x *bp = netdev_priv(dev);
11671
11672 rtnl_lock();
11673
11674 if (pci_enable_device(pdev)) {
11675 dev_err(&pdev->dev,
11676 "Cannot re-enable PCI device after reset\n");
11677 rtnl_unlock();
11678 return PCI_ERS_RESULT_DISCONNECT;
11679 }
11680
11681 pci_set_master(pdev);
11682 pci_restore_state(pdev);
11683
11684 if (netif_running(dev))
11685 bnx2x_set_power_state(bp, PCI_D0);
11686
11687 rtnl_unlock();
11688
11689 return PCI_ERS_RESULT_RECOVERED;
11690}
11691
11692/**
11693 * bnx2x_io_resume - called when traffic can start flowing again
11694 * @pdev: Pointer to PCI device
11695 *
11696 * This callback is called when the error recovery driver tells us that
11697 * its OK to resume normal operation.
11698 */
11699static void bnx2x_io_resume(struct pci_dev *pdev)
11700{
11701 struct net_device *dev = pci_get_drvdata(pdev);
11702 struct bnx2x *bp = netdev_priv(dev);
11703
11704 rtnl_lock();
11705
f8ef6e44
YG
11706 bnx2x_eeh_recover(bp);
11707
493adb1f 11708 if (netif_running(dev))
f8ef6e44 11709 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11710
11711 netif_device_attach(dev);
11712
11713 rtnl_unlock();
11714}
11715
11716static struct pci_error_handlers bnx2x_err_handler = {
11717 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11718 .slot_reset = bnx2x_io_slot_reset,
11719 .resume = bnx2x_io_resume,
493adb1f
WX
11720};
11721
a2fbb9ea 11722static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11723 .name = DRV_MODULE_NAME,
11724 .id_table = bnx2x_pci_tbl,
11725 .probe = bnx2x_init_one,
11726 .remove = __devexit_p(bnx2x_remove_one),
11727 .suspend = bnx2x_suspend,
11728 .resume = bnx2x_resume,
11729 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11730};
11731
11732static int __init bnx2x_init(void)
11733{
dd21ca6d
SG
11734 int ret;
11735
1cf167f2
EG
11736 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11737 if (bnx2x_wq == NULL) {
11738 printk(KERN_ERR PFX "Cannot create workqueue\n");
11739 return -ENOMEM;
11740 }
11741
dd21ca6d
SG
11742 ret = pci_register_driver(&bnx2x_pci_driver);
11743 if (ret) {
11744 printk(KERN_ERR PFX "Cannot register driver\n");
11745 destroy_workqueue(bnx2x_wq);
11746 }
11747 return ret;
a2fbb9ea
ET
11748}
11749
11750static void __exit bnx2x_cleanup(void)
11751{
11752 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11753
11754 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11755}
11756
11757module_init(bnx2x_init);
11758module_exit(bnx2x_cleanup);
11759
94a78b79 11760