]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Removing old FW files
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
573dd788
EG
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
9898f86d 104static int poll;
a2fbb9ea 105module_param(poll, int, 0);
9898f86d 106MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
107
108static int mrrs = -1;
109module_param(mrrs, int, 0);
110MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
9898f86d 112static int debug;
a2fbb9ea 113module_param(debug, int, 0);
9898f86d
EG
114MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 117
1cf167f2 118static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
119
120enum bnx2x_board_type {
121 BCM57710 = 0,
34f80b04
EG
122 BCM57711 = 1,
123 BCM57711E = 2,
a2fbb9ea
ET
124};
125
34f80b04 126/* indexed by board_type, above */
53a10565 127static struct {
a2fbb9ea
ET
128 char *name;
129} board_info[] __devinitdata = {
34f80b04
EG
130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
133};
134
34f80b04 135
a2fbb9ea
ET
136static const struct pci_device_id bnx2x_pci_tbl[] = {
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
155static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea
ET
174
175static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
183static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184 int idx)
185{
186 u32 cmd_offset;
187 int i;
188
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
ad8d3948
EG
193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
195 }
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
197}
198
ad8d3948
EG
199void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200 u32 len32)
a2fbb9ea 201{
ad8d3948 202 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
204 int cnt = 200;
205
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212 return;
213 }
214
215 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
216
217 memset(dmae, 0, sizeof(struct dmae_command));
218
219 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222#ifdef __BIG_ENDIAN
223 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224#else
225 DMAE_CMD_ENDIANITY_DW_SWAP |
226#endif
34f80b04
EG
227 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
229 dmae->src_addr_lo = U64_LO(dma_addr);
230 dmae->src_addr_hi = U64_HI(dma_addr);
231 dmae->dst_addr_lo = dst_addr >> 2;
232 dmae->dst_addr_hi = 0;
233 dmae->len = len32;
234 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 236 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 237
c3eefaf6 238 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
239 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 245 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
246 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
248
249 *wb_comp = 0;
250
34f80b04 251 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
252
253 udelay(5);
ad8d3948
EG
254
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
ad8d3948 258 if (!cnt) {
c3eefaf6 259 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
260 break;
261 }
ad8d3948 262 cnt--;
12469401
YG
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
265 msleep(100);
266 else
267 udelay(5);
a2fbb9ea 268 }
ad8d3948
EG
269
270 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
271}
272
c18487ee 273void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 274{
ad8d3948 275 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
277 int cnt = 200;
278
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
281 int i;
282
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287 return;
288 }
289
290 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
291
292 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293 memset(dmae, 0, sizeof(struct dmae_command));
294
295 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298#ifdef __BIG_ENDIAN
299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
300#else
301 DMAE_CMD_ENDIANITY_DW_SWAP |
302#endif
34f80b04
EG
303 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
305 dmae->src_addr_lo = src_addr >> 2;
306 dmae->src_addr_hi = 0;
307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309 dmae->len = len32;
310 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 312 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 313
c3eefaf6 314 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
315 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
321
322 *wb_comp = 0;
323
34f80b04 324 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
325
326 udelay(5);
ad8d3948
EG
327
328 while (*wb_comp != DMAE_COMP_VAL) {
329
ad8d3948 330 if (!cnt) {
c3eefaf6 331 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
332 break;
333 }
ad8d3948 334 cnt--;
12469401
YG
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
337 msleep(100);
338 else
339 udelay(5);
a2fbb9ea 340 }
ad8d3948 341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
344
345 mutex_unlock(&bp->dmae_mutex);
346}
347
348/* used only for slowpath so not inlined */
349static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350{
351 u32 wb_write[2];
352
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 356}
a2fbb9ea 357
ad8d3948
EG
358#ifdef USE_WB_RD
359static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360{
361 u32 wb_data[2];
362
363 REG_RD_DMAE(bp, reg, wb_data, 2);
364
365 return HILO_U64(wb_data[0], wb_data[1]);
366}
367#endif
368
a2fbb9ea
ET
369static int bnx2x_mc_assert(struct bnx2x *bp)
370{
a2fbb9ea 371 char last_idx;
34f80b04
EG
372 int i, rc = 0;
373 u32 row0, row1, row2, row3;
374
375 /* XSTORM */
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
378 if (last_idx)
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
397 rc++;
398 } else {
399 break;
400 }
401 }
402
403 /* TSTORM */
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
406 if (last_idx)
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
425 rc++;
426 } else {
427 break;
428 }
429 }
430
431 /* CSTORM */
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
434 if (last_idx)
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
453 rc++;
454 } else {
455 break;
456 }
457 }
458
459 /* USTORM */
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
462 if (last_idx)
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
481 rc++;
482 } else {
483 break;
a2fbb9ea
ET
484 }
485 }
34f80b04 486
a2fbb9ea
ET
487 return rc;
488}
c14423fe 489
a2fbb9ea
ET
490static void bnx2x_fw_dump(struct bnx2x *bp)
491{
492 u32 mark, offset;
4781bfad 493 __be32 data[9];
a2fbb9ea
ET
494 int word;
495
496 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 497 mark = ((mark + 0x3) & ~0x3);
ad361c98 498 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 499
ad361c98 500 printk(KERN_ERR PFX);
a2fbb9ea
ET
501 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502 for (word = 0; word < 8; word++)
503 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504 offset + 4*word));
505 data[8] = 0x0;
49d66772 506 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
507 }
508 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511 offset + 4*word));
512 data[8] = 0x0;
49d66772 513 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 514 }
ad361c98 515 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
516}
517
518static void bnx2x_panic_dump(struct bnx2x *bp)
519{
520 int i;
521 u16 j, start, end;
522
66e855f3
YG
523 bp->stats_state = STATS_STATE_DISABLED;
524 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
a2fbb9ea
ET
526 BNX2X_ERR("begin crash dump -----------------\n");
527
8440d2b6
EG
528 /* Indices */
529 /* Common */
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536 /* Rx */
537 for_each_rx_queue(bp, i) {
a2fbb9ea 538 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 539
c3eefaf6 540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 543 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
544 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp->rx_sge_prod, fp->last_max_sge,
549 le16_to_cpu(fp->fp_u_idx),
550 fp->status_blk->u_status_block.status_block_index);
551 }
a2fbb9ea 552
8440d2b6
EG
553 /* Tx */
554 for_each_tx_queue(bp, i) {
555 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 556
c3eefaf6 557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 563 fp->status_blk->c_status_block.status_block_index,
ca00392c 564 fp->tx_db.data.prod);
8440d2b6 565 }
a2fbb9ea 566
8440d2b6
EG
567 /* Rings */
568 /* Rx */
569 for_each_rx_queue(bp, i) {
570 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
571
572 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 574 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
575 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
c3eefaf6
EG
578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
580 }
581
3196a88a
EG
582 start = RX_SGE(fp->rx_sge_prod);
583 end = RX_SGE(fp->last_max_sge);
8440d2b6 584 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
585 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
c3eefaf6
EG
588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
590 }
591
a2fbb9ea
ET
592 start = RCQ_BD(fp->rx_comp_cons - 10);
593 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 594 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
595 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
c3eefaf6
EG
597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
599 }
600 }
601
8440d2b6
EG
602 /* Tx */
603 for_each_tx_queue(bp, i) {
604 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608 for (j = start; j != end; j = TX_BD(j + 1)) {
609 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
c3eefaf6
EG
611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
613 }
614
615 start = TX_BD(fp->tx_bd_cons - 10);
616 end = TX_BD(fp->tx_bd_cons + 254);
617 for (j = start; j != end; j = TX_BD(j + 1)) {
618 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
c3eefaf6
EG
620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
622 }
623 }
a2fbb9ea 624
34f80b04 625 bnx2x_fw_dump(bp);
a2fbb9ea
ET
626 bnx2x_mc_assert(bp);
627 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
628}
629
615f8fd9 630static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 631{
34f80b04 632 int port = BP_PORT(bp);
a2fbb9ea
ET
633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 636 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
637
638 if (msix) {
8badd27a
EG
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
641 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
643 } else if (msi) {
644 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
648 } else {
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
651 HC_CONFIG_0_REG_INT_LINE_EN_0 |
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 653
8badd27a
EG
654 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655 val, port, addr);
615f8fd9
ET
656
657 REG_WR(bp, addr, val);
658
a2fbb9ea
ET
659 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660 }
661
8badd27a
EG
662 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
664
665 REG_WR(bp, addr, val);
37dbbf32
EG
666 /*
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
668 */
669 mmiowb();
670 barrier();
34f80b04
EG
671
672 if (CHIP_IS_E1H(bp)) {
673 /* init leading/trailing edge */
674 if (IS_E1HMF(bp)) {
8badd27a 675 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 676 if (bp->port.pmf)
4acac6a5
EG
677 /* enable nig and gpio3 attention */
678 val |= 0x1100;
34f80b04
EG
679 } else
680 val = 0xffff;
681
682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684 }
37dbbf32
EG
685
686 /* Make sure that interrupts are indeed enabled from here on */
687 mmiowb();
a2fbb9ea
ET
688}
689
615f8fd9 690static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 691{
34f80b04 692 int port = BP_PORT(bp);
a2fbb9ea
ET
693 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694 u32 val = REG_RD(bp, addr);
695
696 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698 HC_CONFIG_0_REG_INT_LINE_EN_0 |
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702 val, port, addr);
703
8badd27a
EG
704 /* flush all outstanding writes */
705 mmiowb();
706
a2fbb9ea
ET
707 REG_WR(bp, addr, val);
708 if (REG_RD(bp, addr) != val)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 710
a2fbb9ea
ET
711}
712
f8ef6e44 713static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 714{
a2fbb9ea 715 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 716 int i, offset;
a2fbb9ea 717
34f80b04 718 /* disable interrupt handling */
a2fbb9ea 719 atomic_inc(&bp->intr_sem);
e1510706
EG
720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
f8ef6e44
YG
722 if (disable_hw)
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp);
a2fbb9ea
ET
725
726 /* make sure all ISRs are done */
727 if (msix) {
8badd27a
EG
728 synchronize_irq(bp->msix_table[0].vector);
729 offset = 1;
a2fbb9ea 730 for_each_queue(bp, i)
8badd27a 731 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
732 } else
733 synchronize_irq(bp->pdev->irq);
734
735 /* make sure sp_task is not running */
1cf167f2
EG
736 cancel_delayed_work(&bp->sp_task);
737 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
738}
739
34f80b04 740/* fast path */
a2fbb9ea
ET
741
742/*
34f80b04 743 * General service functions
a2fbb9ea
ET
744 */
745
34f80b04 746static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
747 u8 storm, u16 index, u8 op, u8 update)
748{
5c862848
EG
749 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
751 struct igu_ack_register igu_ack;
752
753 igu_ack.status_block_index = index;
754 igu_ack.sb_id_and_flags =
34f80b04 755 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
756 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
5c862848
EG
760 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32 *)&igu_ack), hc_addr);
762 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
763
764 /* Make sure that ACK is written */
765 mmiowb();
766 barrier();
a2fbb9ea
ET
767}
768
769static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770{
771 struct host_status_block *fpsb = fp->status_blk;
772 u16 rc = 0;
773
774 barrier(); /* status block is written to by the chip */
775 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777 rc |= 1;
778 }
779 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781 rc |= 2;
782 }
783 return rc;
784}
785
a2fbb9ea
ET
786static u16 bnx2x_ack_int(struct bnx2x *bp)
787{
5c862848
EG
788 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789 COMMAND_REG_SIMD_MASK);
790 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 791
5c862848
EG
792 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793 result, hc_addr);
a2fbb9ea 794
a2fbb9ea
ET
795 return result;
796}
797
798
799/*
800 * fast path service functions
801 */
802
e8b5fc51
VZ
803static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804{
805 /* Tell compiler that consumer and producer can change */
806 barrier();
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
808}
809
a2fbb9ea
ET
810/* free skb in the packet ring at pos idx
811 * return idx of last bd freed
812 */
813static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814 u16 idx)
815{
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 819 struct sk_buff *skb = tx_buf->skb;
34f80b04 820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
821 int nbd;
822
823 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
824 idx, tx_buf, skb);
825
826 /* unmap first bd */
827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 831
ca00392c 832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 833#ifdef BNX2X_STOP_ON_ERROR
ca00392c 834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 835 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
836 bnx2x_panic();
837 }
838#endif
ca00392c 839 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 840
ca00392c
EG
841 /* Get the next bd */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 843
ca00392c
EG
844 /* Skip a parse bd... */
845 --nbd;
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850 --nbd;
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
852 }
853
854 /* now free frags */
855 while (nbd > 0) {
856
857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
861 if (--nbd)
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863 }
864
865 /* release skb */
53e5e96e 866 WARN_ON(!skb);
ca00392c 867 dev_kfree_skb_any(skb);
a2fbb9ea
ET
868 tx_buf->first_bd = 0;
869 tx_buf->skb = NULL;
870
34f80b04 871 return new_cons;
a2fbb9ea
ET
872}
873
34f80b04 874static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 875{
34f80b04
EG
876 s16 used;
877 u16 prod;
878 u16 cons;
a2fbb9ea 879
34f80b04 880 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
881 prod = fp->tx_bd_prod;
882 cons = fp->tx_bd_cons;
883
34f80b04
EG
884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 887
34f80b04 888#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
889 WARN_ON(used < 0);
890 WARN_ON(used > fp->bp->tx_ring_size);
891 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 892#endif
a2fbb9ea 893
34f80b04 894 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
895}
896
7961f791 897static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
898{
899 struct bnx2x *bp = fp->bp;
555f6c78 900 struct netdev_queue *txq;
a2fbb9ea
ET
901 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902 int done = 0;
903
904#ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
906 return;
907#endif
908
ca00392c 909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911 sw_cons = fp->tx_pkt_cons;
912
913 while (sw_cons != hw_cons) {
914 u16 pkt_cons;
915
916 pkt_cons = TX_BD(sw_cons);
917
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
34f80b04 920 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
921 hw_cons, sw_cons, pkt_cons);
922
34f80b04 923/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
924 rmb();
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926 }
927*/
928 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929 sw_cons++;
930 done++;
a2fbb9ea
ET
931 }
932
933 fp->tx_pkt_cons = sw_cons;
934 fp->tx_bd_cons = bd_cons;
935
a2fbb9ea 936 /* TBD need a thresh? */
555f6c78 937 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 938
6044735d
EG
939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
943 * forever.
944 */
945 smp_mb();
946
555f6c78 947 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 948 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 950 netif_tx_wake_queue(txq);
a2fbb9ea
ET
951 }
952}
953
3196a88a 954
a2fbb9ea
ET
955static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956 union eth_rx_cqe *rr_cqe)
957{
958 struct bnx2x *bp = fp->bp;
959 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
34f80b04 962 DP(BNX2X_MSG_SP,
a2fbb9ea 963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 964 fp->index, cid, command, bp->state,
34f80b04 965 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
966
967 bp->spq_left++;
968
0626b899 969 if (fp->index) {
a2fbb9ea
ET
970 switch (command | fp->state) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972 BNX2X_FP_STATE_OPENING):
973 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974 cid);
975 fp->state = BNX2X_FP_STATE_OPEN;
976 break;
977
978 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980 cid);
981 fp->state = BNX2X_FP_STATE_HALTED;
982 break;
983
984 default:
34f80b04
EG
985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command, fp->state);
987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990 return;
991 }
c14423fe 992
a2fbb9ea
ET
993 switch (command | bp->state) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996 bp->state = BNX2X_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
a2fbb9ea 1005 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1006 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1007 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1008 break;
1009
3196a88a 1010
a2fbb9ea 1011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1013 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1014 bp->set_mac_pending = 0;
a2fbb9ea
ET
1015 break;
1016
49d66772 1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1020 break;
1021
a2fbb9ea 1022 default:
34f80b04 1023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1024 command, bp->state);
34f80b04 1025 break;
a2fbb9ea 1026 }
34f80b04 1027 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1028}
1029
7a9b2557
VZ
1030static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1032{
1033 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034 struct page *page = sw_buf->page;
1035 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037 /* Skip "next page" elements */
1038 if (!page)
1039 return;
1040
1041 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1042 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1043 __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045 sw_buf->page = NULL;
1046 sge->addr_hi = 0;
1047 sge->addr_lo = 0;
1048}
1049
1050static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051 struct bnx2x_fastpath *fp, int last)
1052{
1053 int i;
1054
1055 for (i = 0; i < last; i++)
1056 bnx2x_free_rx_sge(bp, fp, i);
1057}
1058
1059static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1061{
1062 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065 dma_addr_t mapping;
1066
1067 if (unlikely(page == NULL))
1068 return -ENOMEM;
1069
4f40f2cb 1070 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1071 PCI_DMA_FROMDEVICE);
8d8bb39b 1072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074 return -ENOMEM;
1075 }
1076
1077 sw_buf->page = page;
1078 pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083 return 0;
1084}
1085
a2fbb9ea
ET
1086static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, u16 index)
1088{
1089 struct sk_buff *skb;
1090 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092 dma_addr_t mapping;
1093
1094 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095 if (unlikely(skb == NULL))
1096 return -ENOMEM;
1097
437cf2f1 1098 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1099 PCI_DMA_FROMDEVICE);
8d8bb39b 1100 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1101 dev_kfree_skb(skb);
1102 return -ENOMEM;
1103 }
1104
1105 rx_buf->skb = skb;
1106 pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111 return 0;
1112}
1113
1114/* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1118 */
1119static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120 struct sk_buff *skb, u16 cons, u16 prod)
1121{
1122 struct bnx2x *bp = fp->bp;
1123 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128 pci_dma_sync_single_for_device(bp->pdev,
1129 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1130 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1131
1132 prod_rx_buf->skb = cons_rx_buf->skb;
1133 pci_unmap_addr_set(prod_rx_buf, mapping,
1134 pci_unmap_addr(cons_rx_buf, mapping));
1135 *prod_bd = *cons_bd;
1136}
1137
7a9b2557
VZ
1138static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139 u16 idx)
1140{
1141 u16 last_max = fp->last_max_sge;
1142
1143 if (SUB_S16(idx, last_max) > 0)
1144 fp->last_max_sge = idx;
1145}
1146
1147static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148{
1149 int i, j;
1150
1151 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152 int idx = RX_SGE_CNT * i - 1;
1153
1154 for (j = 0; j < 2; j++) {
1155 SGE_MASK_CLEAR_BIT(fp, idx);
1156 idx--;
1157 }
1158 }
1159}
1160
1161static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162 struct eth_fast_path_rx_cqe *fp_cqe)
1163{
1164 struct bnx2x *bp = fp->bp;
4f40f2cb 1165 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1166 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1167 SGE_PAGE_SHIFT;
7a9b2557
VZ
1168 u16 last_max, last_elem, first_elem;
1169 u16 delta = 0;
1170 u16 i;
1171
1172 if (!sge_len)
1173 return;
1174
1175 /* First mark all used pages */
1176 for (i = 0; i < sge_len; i++)
1177 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp->sge_mask));
1184 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 last_max = RX_SGE(fp->last_max_sge);
1187 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190 /* If ring is not full */
1191 if (last_elem + 1 != first_elem)
1192 last_elem++;
1193
1194 /* Now update the prod */
1195 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196 if (likely(fp->sge_mask[i]))
1197 break;
1198
1199 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200 delta += RX_SGE_MASK_ELEM_SZ;
1201 }
1202
1203 if (delta > 0) {
1204 fp->rx_sge_prod += delta;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp);
1207 }
1208
1209 DP(NETIF_MSG_RX_STATUS,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp->last_max_sge, fp->rx_sge_prod);
1212}
1213
1214static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215{
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp->sge_mask, 0xff,
1218 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
33471629
EG
1220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1225}
1226
1227static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228 struct sk_buff *skb, u16 cons, u16 prod)
1229{
1230 struct bnx2x *bp = fp->bp;
1231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234 dma_addr_t mapping;
1235
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1239 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1240 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249 fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251 /* point prod_bd to new skb */
1252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255#ifdef BNX2X_STOP_ON_ERROR
1256 fp->tpa_queue_used |= (1 << queue);
1257#ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259#else
1260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261#endif
1262 fp->tpa_queue_used);
1263#endif
1264}
1265
1266static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 struct sk_buff *skb,
1268 struct eth_fast_path_rx_cqe *fp_cqe,
1269 u16 cqe_idx)
1270{
1271 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273 u32 i, frag_len, frag_size, pages;
1274 int err;
1275 int j;
1276
1277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1279
1280 /* This is needed in order to enable forwarding support */
1281 if (frag_size)
4f40f2cb 1282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1283 max(frag_size, (u32)len_on_bd));
1284
1285#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1286 if (pages >
1287 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289 pages, cqe_idx);
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe->pkt_len, len_on_bd);
1292 bnx2x_panic();
1293 return -EINVAL;
1294 }
1295#endif
1296
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1304 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1305 old_rx_pg = *rx_pg;
1306
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310 if (unlikely(err)) {
de832a55 1311 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1312 return err;
1313 }
1314
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1317 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1318
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322 skb->data_len += frag_len;
1323 skb->truesize += frag_len;
1324 skb->len += frag_len;
1325
1326 frag_size -= frag_len;
1327 }
1328
1329 return 0;
1330}
1331
1332static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334 u16 cqe_idx)
1335{
1336 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337 struct sk_buff *skb = rx_buf->skb;
1338 /* alloc new skb */
1339 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343 fails. */
1344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1345 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1346
7a9b2557 1347 if (likely(new_skb)) {
66e855f3
YG
1348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
0c6671b0
EG
1350#ifdef BCM_VLAN
1351 int is_vlan_cqe =
1352 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353 PARSING_FLAGS_VLAN);
1354 int is_not_hwaccel_vlan_cqe =
1355 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356#endif
7a9b2557
VZ
1357
1358 prefetch(skb);
1359 prefetch(((char *)(skb)) + 128);
1360
7a9b2557
VZ
1361#ifdef BNX2X_STOP_ON_ERROR
1362 if (pad + len > bp->rx_buf_size) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad, len, bp->rx_buf_size);
1366 bnx2x_panic();
1367 return;
1368 }
1369#endif
1370
1371 skb_reserve(skb, pad);
1372 skb_put(skb, len);
1373
1374 skb->protocol = eth_type_trans(skb, bp->dev);
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377 {
1378 struct iphdr *iph;
1379
1380 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1381#ifdef BCM_VLAN
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe))
1385 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386#endif
7a9b2557
VZ
1387 iph->check = 0;
1388 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389 }
1390
1391 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392 &cqe->fast_path_cqe, cqe_idx)) {
1393#ifdef BCM_VLAN
0c6671b0
EG
1394 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1396 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397 le16_to_cpu(cqe->fast_path_cqe.
1398 vlan_tag));
1399 else
1400#endif
1401 netif_receive_skb(skb);
1402 } else {
1403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1405 dev_kfree_skb(skb);
1406 }
1407
7a9b2557
VZ
1408
1409 /* put new skb in bin */
1410 fp->tpa_pool[queue].skb = new_skb;
1411
1412 } else {
66e855f3 1413 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1414 DP(NETIF_MSG_RX_STATUS,
1415 "Failed to allocate new skb - dropping packet!\n");
de832a55 1416 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1417 }
1418
1419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420}
1421
1422static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423 struct bnx2x_fastpath *fp,
1424 u16 bd_prod, u16 rx_comp_prod,
1425 u16 rx_sge_prod)
1426{
8d9c5f34 1427 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1428 int i;
1429
1430 /* Update producers */
1431 rx_prods.bd_prod = bd_prod;
1432 rx_prods.cqe_prod = rx_comp_prod;
1433 rx_prods.sge_prod = rx_sge_prod;
1434
58f4c4cf
EG
1435 /*
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1438 * is updated.
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1442 */
1443 wmb();
1444
8d9c5f34
EG
1445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1448 ((u32 *)&rx_prods)[i]);
1449
58f4c4cf
EG
1450 mmiowb(); /* keep prod updates ordered */
1451
7a9b2557 1452 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1455}
1456
a2fbb9ea
ET
1457static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458{
1459 struct bnx2x *bp = fp->bp;
34f80b04 1460 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1461 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462 int rx_pkt = 0;
1463
1464#ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp->panic))
1466 return 0;
1467#endif
1468
34f80b04
EG
1469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
a2fbb9ea
ET
1471 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473 hw_comp_cons++;
1474
1475 bd_cons = fp->rx_bd_cons;
1476 bd_prod = fp->rx_bd_prod;
34f80b04 1477 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1478 sw_comp_cons = fp->rx_comp_cons;
1479 sw_comp_prod = fp->rx_comp_prod;
1480
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1483 */
1484 rmb();
1485
1486 DP(NETIF_MSG_RX_STATUS,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1488 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1489
1490 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1491 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1492 struct sk_buff *skb;
1493 union eth_rx_cqe *cqe;
34f80b04
EG
1494 u8 cqe_fp_flags;
1495 u16 len, pad;
a2fbb9ea
ET
1496
1497 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498 bd_prod = RX_BD(bd_prod);
1499 bd_cons = RX_BD(bd_cons);
1500
1501 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1502 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1503
a2fbb9ea 1504 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1505 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1506 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1507 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1508 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1510
1511 /* is this a slowpath msg? */
34f80b04 1512 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1513 bnx2x_sp_event(fp, cqe);
1514 goto next_cqe;
1515
1516 /* this is an rx packet */
1517 } else {
1518 rx_buf = &fp->rx_buf_ring[bd_cons];
1519 skb = rx_buf->skb;
a2fbb9ea
ET
1520 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521 pad = cqe->fast_path_cqe.placement_offset;
1522
7a9b2557
VZ
1523 /* If CQE is marked both TPA_START and TPA_END
1524 it is a non-TPA CQE */
1525 if ((!fp->disable_tpa) &&
1526 (TPA_TYPE(cqe_fp_flags) !=
1527 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1528 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1529
1530 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531 DP(NETIF_MSG_RX_STATUS,
1532 "calling tpa_start on queue %d\n",
1533 queue);
1534
1535 bnx2x_tpa_start(fp, queue, skb,
1536 bd_cons, bd_prod);
1537 goto next_rx;
1538 }
1539
1540 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541 DP(NETIF_MSG_RX_STATUS,
1542 "calling tpa_stop on queue %d\n",
1543 queue);
1544
1545 if (!BNX2X_RX_SUM_FIX(cqe))
1546 BNX2X_ERR("STOP on none TCP "
1547 "data\n");
1548
1549 /* This is a size of the linear data
1550 on this skb */
1551 len = le16_to_cpu(cqe->fast_path_cqe.
1552 len_on_bd);
1553 bnx2x_tpa_stop(bp, fp, queue, pad,
1554 len, cqe, comp_ring_cons);
1555#ifdef BNX2X_STOP_ON_ERROR
1556 if (bp->panic)
17cb4006 1557 return 0;
7a9b2557
VZ
1558#endif
1559
1560 bnx2x_update_sge_prod(fp,
1561 &cqe->fast_path_cqe);
1562 goto next_cqe;
1563 }
1564 }
1565
a2fbb9ea
ET
1566 pci_dma_sync_single_for_device(bp->pdev,
1567 pci_unmap_addr(rx_buf, mapping),
1568 pad + RX_COPY_THRESH,
1569 PCI_DMA_FROMDEVICE);
1570 prefetch(skb);
1571 prefetch(((char *)(skb)) + 128);
1572
1573 /* is this an error packet? */
34f80b04 1574 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1575 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1576 "ERROR flags %x rx packet %u\n",
1577 cqe_fp_flags, sw_comp_cons);
de832a55 1578 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1579 goto reuse_rx;
1580 }
1581
1582 /* Since we don't have a jumbo ring
1583 * copy small packets if mtu > 1500
1584 */
1585 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586 (len <= RX_COPY_THRESH)) {
1587 struct sk_buff *new_skb;
1588
1589 new_skb = netdev_alloc_skb(bp->dev,
1590 len + pad);
1591 if (new_skb == NULL) {
1592 DP(NETIF_MSG_RX_ERR,
34f80b04 1593 "ERROR packet dropped "
a2fbb9ea 1594 "because of alloc failure\n");
de832a55 1595 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1596 goto reuse_rx;
1597 }
1598
1599 /* aligned copy */
1600 skb_copy_from_linear_data_offset(skb, pad,
1601 new_skb->data + pad, len);
1602 skb_reserve(new_skb, pad);
1603 skb_put(new_skb, len);
1604
1605 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1606
1607 skb = new_skb;
1608
1609 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610 pci_unmap_single(bp->pdev,
1611 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1612 bp->rx_buf_size,
a2fbb9ea
ET
1613 PCI_DMA_FROMDEVICE);
1614 skb_reserve(skb, pad);
1615 skb_put(skb, len);
1616
1617 } else {
1618 DP(NETIF_MSG_RX_ERR,
34f80b04 1619 "ERROR packet dropped because "
a2fbb9ea 1620 "of alloc failure\n");
de832a55 1621 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1622reuse_rx:
1623 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1624 goto next_rx;
1625 }
1626
1627 skb->protocol = eth_type_trans(skb, bp->dev);
1628
1629 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1630 if (bp->rx_csum) {
1adcd8be
EG
1631 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1633 else
de832a55 1634 fp->eth_q_stats.hw_csum_err++;
66e855f3 1635 }
a2fbb9ea
ET
1636 }
1637
748e5439 1638 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1639#ifdef BCM_VLAN
0c6671b0 1640 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1641 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1643 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1645 else
1646#endif
34f80b04 1647 netif_receive_skb(skb);
a2fbb9ea 1648
a2fbb9ea
ET
1649
1650next_rx:
1651 rx_buf->skb = NULL;
1652
1653 bd_cons = NEXT_RX_IDX(bd_cons);
1654 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1655 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1656 rx_pkt++;
a2fbb9ea
ET
1657next_cqe:
1658 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1660
34f80b04 1661 if (rx_pkt == budget)
a2fbb9ea
ET
1662 break;
1663 } /* while */
1664
1665 fp->rx_bd_cons = bd_cons;
34f80b04 1666 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1667 fp->rx_comp_cons = sw_comp_cons;
1668 fp->rx_comp_prod = sw_comp_prod;
1669
7a9b2557
VZ
1670 /* Update producers */
1671 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1672 fp->rx_sge_prod);
a2fbb9ea
ET
1673
1674 fp->rx_pkt += rx_pkt;
1675 fp->rx_calls++;
1676
1677 return rx_pkt;
1678}
1679
1680static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1681{
1682 struct bnx2x_fastpath *fp = fp_cookie;
1683 struct bnx2x *bp = fp->bp;
a2fbb9ea 1684
da5a662a
VZ
1685 /* Return here if interrupt is disabled */
1686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1688 return IRQ_HANDLED;
1689 }
1690
34f80b04 1691 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1692 fp->index, fp->sb_id);
0626b899 1693 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1694
1695#ifdef BNX2X_STOP_ON_ERROR
1696 if (unlikely(bp->panic))
1697 return IRQ_HANDLED;
1698#endif
ca00392c
EG
1699 /* Handle Rx or Tx according to MSI-X vector */
1700 if (fp->is_rx_queue) {
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1703
ca00392c 1704 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1705
ca00392c
EG
1706 } else {
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709
1710 bnx2x_update_fpsb_idx(fp);
1711 rmb();
1712 bnx2x_tx_int(fp);
1713
1714 /* Re-enable interrupts */
1715 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1719 }
34f80b04 1720
a2fbb9ea
ET
1721 return IRQ_HANDLED;
1722}
1723
1724static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1725{
555f6c78 1726 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1727 u16 status = bnx2x_ack_int(bp);
34f80b04 1728 u16 mask;
ca00392c 1729 int i;
a2fbb9ea 1730
34f80b04 1731 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1732 if (unlikely(status == 0)) {
1733 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1734 return IRQ_NONE;
1735 }
f5372251 1736 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1737
34f80b04 1738 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1739 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1741 return IRQ_HANDLED;
1742 }
1743
3196a88a
EG
1744#ifdef BNX2X_STOP_ON_ERROR
1745 if (unlikely(bp->panic))
1746 return IRQ_HANDLED;
1747#endif
1748
ca00392c
EG
1749 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1751
ca00392c
EG
1752 mask = 0x2 << fp->sb_id;
1753 if (status & mask) {
1754 /* Handle Rx or Tx according to SB id */
1755 if (fp->is_rx_queue) {
1756 prefetch(fp->rx_cons_sb);
1757 prefetch(&fp->status_blk->u_status_block.
1758 status_block_index);
a2fbb9ea 1759
ca00392c 1760 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1761
ca00392c
EG
1762 } else {
1763 prefetch(fp->tx_cons_sb);
1764 prefetch(&fp->status_blk->c_status_block.
1765 status_block_index);
1766
1767 bnx2x_update_fpsb_idx(fp);
1768 rmb();
1769 bnx2x_tx_int(fp);
1770
1771 /* Re-enable interrupts */
1772 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773 le16_to_cpu(fp->fp_u_idx),
1774 IGU_INT_NOP, 1);
1775 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776 le16_to_cpu(fp->fp_c_idx),
1777 IGU_INT_ENABLE, 1);
1778 }
1779 status &= ~mask;
1780 }
a2fbb9ea
ET
1781 }
1782
a2fbb9ea 1783
34f80b04 1784 if (unlikely(status & 0x1)) {
1cf167f2 1785 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1786
1787 status &= ~0x1;
1788 if (!status)
1789 return IRQ_HANDLED;
1790 }
1791
34f80b04
EG
1792 if (status)
1793 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1794 status);
a2fbb9ea 1795
c18487ee 1796 return IRQ_HANDLED;
a2fbb9ea
ET
1797}
1798
c18487ee 1799/* end of fast path */
a2fbb9ea 1800
bb2a0f7a 1801static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1802
c18487ee
YR
1803/* Link */
1804
1805/*
1806 * General service functions
1807 */
a2fbb9ea 1808
4a37fb66 1809static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1810{
1811 u32 lock_status;
1812 u32 resource_bit = (1 << resource);
4a37fb66
YG
1813 int func = BP_FUNC(bp);
1814 u32 hw_lock_control_reg;
c18487ee 1815 int cnt;
a2fbb9ea 1816
c18487ee
YR
1817 /* Validating that the resource is within range */
1818 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1819 DP(NETIF_MSG_HW,
1820 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1822 return -EINVAL;
1823 }
a2fbb9ea 1824
4a37fb66
YG
1825 if (func <= 5) {
1826 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1827 } else {
1828 hw_lock_control_reg =
1829 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1830 }
1831
c18487ee 1832 /* Validating that the resource is not already taken */
4a37fb66 1833 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1834 if (lock_status & resource_bit) {
1835 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1836 lock_status, resource_bit);
1837 return -EEXIST;
1838 }
a2fbb9ea 1839
46230476
EG
1840 /* Try for 5 second every 5ms */
1841 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1842 /* Try to acquire the lock */
4a37fb66
YG
1843 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1845 if (lock_status & resource_bit)
1846 return 0;
a2fbb9ea 1847
c18487ee 1848 msleep(5);
a2fbb9ea 1849 }
c18487ee
YR
1850 DP(NETIF_MSG_HW, "Timeout\n");
1851 return -EAGAIN;
1852}
a2fbb9ea 1853
4a37fb66 1854static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1855{
1856 u32 lock_status;
1857 u32 resource_bit = (1 << resource);
4a37fb66
YG
1858 int func = BP_FUNC(bp);
1859 u32 hw_lock_control_reg;
a2fbb9ea 1860
c18487ee
YR
1861 /* Validating that the resource is within range */
1862 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1863 DP(NETIF_MSG_HW,
1864 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1866 return -EINVAL;
1867 }
1868
4a37fb66
YG
1869 if (func <= 5) {
1870 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1871 } else {
1872 hw_lock_control_reg =
1873 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1874 }
1875
c18487ee 1876 /* Validating that the resource is currently taken */
4a37fb66 1877 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1878 if (!(lock_status & resource_bit)) {
1879 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1880 lock_status, resource_bit);
1881 return -EFAULT;
a2fbb9ea
ET
1882 }
1883
4a37fb66 1884 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1885 return 0;
1886}
1887
1888/* HW Lock for shared dual port PHYs */
4a37fb66 1889static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1890{
34f80b04 1891 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1892
46c6a674
EG
1893 if (bp->port.need_hw_lock)
1894 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1895}
a2fbb9ea 1896
4a37fb66 1897static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1898{
46c6a674
EG
1899 if (bp->port.need_hw_lock)
1900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1901
34f80b04 1902 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1903}
a2fbb9ea 1904
4acac6a5
EG
1905int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1906{
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910 int gpio_shift = gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912 u32 gpio_mask = (1 << gpio_shift);
1913 u32 gpio_reg;
1914 int value;
1915
1916 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1918 return -EINVAL;
1919 }
1920
1921 /* read GPIO value */
1922 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1923
1924 /* get the requested pin value */
1925 if ((gpio_reg & gpio_mask) == gpio_mask)
1926 value = 1;
1927 else
1928 value = 0;
1929
1930 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1931
1932 return value;
1933}
1934
17de50b7 1935int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
a2fbb9ea 1944
c18487ee
YR
1945 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1947 return -EINVAL;
1948 }
a2fbb9ea 1949
4a37fb66 1950 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1951 /* read GPIO and mask except the float bits */
1952 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1953
c18487ee
YR
1954 switch (mode) {
1955 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957 gpio_num, gpio_shift);
1958 /* clear FLOAT and set CLR */
1959 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1961 break;
a2fbb9ea 1962
c18487ee
YR
1963 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965 gpio_num, gpio_shift);
1966 /* clear FLOAT and set SET */
1967 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1969 break;
a2fbb9ea 1970
17de50b7 1971 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1972 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973 gpio_num, gpio_shift);
1974 /* set FLOAT */
1975 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1976 break;
a2fbb9ea 1977
c18487ee
YR
1978 default:
1979 break;
a2fbb9ea
ET
1980 }
1981
c18487ee 1982 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1983 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1984
c18487ee 1985 return 0;
a2fbb9ea
ET
1986}
1987
4acac6a5
EG
1988int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989{
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1996 u32 gpio_reg;
1997
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000 return -EINVAL;
2001 }
2002
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004 /* read GPIO int */
2005 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2006
2007 switch (mode) {
2008 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010 "output low\n", gpio_num, gpio_shift);
2011 /* clear SET and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2014 break;
2015
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018 "output high\n", gpio_num, gpio_shift);
2019 /* clear CLR and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022 break;
2023
2024 default:
2025 break;
2026 }
2027
2028 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030
2031 return 0;
2032}
2033
c18487ee 2034static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2035{
c18487ee
YR
2036 u32 spio_mask = (1 << spio_num);
2037 u32 spio_reg;
a2fbb9ea 2038
c18487ee
YR
2039 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040 (spio_num > MISC_REGISTERS_SPIO_7)) {
2041 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2042 return -EINVAL;
a2fbb9ea
ET
2043 }
2044
4a37fb66 2045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2046 /* read SPIO and mask except the float bits */
2047 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2048
c18487ee 2049 switch (mode) {
6378c025 2050 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2051 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052 /* clear FLOAT and set CLR */
2053 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2055 break;
a2fbb9ea 2056
6378c025 2057 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2058 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059 /* clear FLOAT and set SET */
2060 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2062 break;
a2fbb9ea 2063
c18487ee
YR
2064 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2066 /* set FLOAT */
2067 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2068 break;
a2fbb9ea 2069
c18487ee
YR
2070 default:
2071 break;
a2fbb9ea
ET
2072 }
2073
c18487ee 2074 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2076
a2fbb9ea
ET
2077 return 0;
2078}
2079
c18487ee 2080static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2081{
ad33ea3a
EG
2082 switch (bp->link_vars.ieee_fc &
2083 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2084 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2085 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2086 ADVERTISED_Pause);
2087 break;
356e2385 2088
c18487ee 2089 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2090 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2091 ADVERTISED_Pause);
2092 break;
356e2385 2093
c18487ee 2094 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2095 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2096 break;
356e2385 2097
c18487ee 2098 default:
34f80b04 2099 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2100 ADVERTISED_Pause);
2101 break;
2102 }
2103}
f1410647 2104
c18487ee
YR
2105static void bnx2x_link_report(struct bnx2x *bp)
2106{
2107 if (bp->link_vars.link_up) {
2108 if (bp->state == BNX2X_STATE_OPEN)
2109 netif_carrier_on(bp->dev);
2110 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2111
c18487ee 2112 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2113
c18487ee
YR
2114 if (bp->link_vars.duplex == DUPLEX_FULL)
2115 printk("full duplex");
2116 else
2117 printk("half duplex");
f1410647 2118
c0700f90
DM
2119 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2120 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2121 printk(", receive ");
356e2385
EG
2122 if (bp->link_vars.flow_ctrl &
2123 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2124 printk("& transmit ");
2125 } else {
2126 printk(", transmit ");
2127 }
2128 printk("flow control ON");
2129 }
2130 printk("\n");
f1410647 2131
c18487ee
YR
2132 } else { /* link_down */
2133 netif_carrier_off(bp->dev);
2134 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2135 }
c18487ee
YR
2136}
2137
b5bf9068 2138static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2139{
19680c48
EG
2140 if (!BP_NOMCP(bp)) {
2141 u8 rc;
a2fbb9ea 2142
19680c48 2143 /* Initialize link parameters structure variables */
8c99e7b0
YR
2144 /* It is recommended to turn off RX FC for jumbo frames
2145 for better performance */
2146 if (IS_E1HMF(bp))
c0700f90 2147 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2148 else if (bp->dev->mtu > 5000)
c0700f90 2149 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2150 else
c0700f90 2151 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2152
4a37fb66 2153 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2154
2155 if (load_mode == LOAD_DIAG)
2156 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2157
19680c48 2158 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2159
4a37fb66 2160 bnx2x_release_phy_lock(bp);
a2fbb9ea 2161
3c96c68b
EG
2162 bnx2x_calc_fc_adv(bp);
2163
b5bf9068
EG
2164 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2165 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2166 bnx2x_link_report(bp);
b5bf9068 2167 }
34f80b04 2168
19680c48
EG
2169 return rc;
2170 }
f5372251 2171 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2172 return -EINVAL;
a2fbb9ea
ET
2173}
2174
c18487ee 2175static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2176{
19680c48 2177 if (!BP_NOMCP(bp)) {
4a37fb66 2178 bnx2x_acquire_phy_lock(bp);
19680c48 2179 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2180 bnx2x_release_phy_lock(bp);
a2fbb9ea 2181
19680c48
EG
2182 bnx2x_calc_fc_adv(bp);
2183 } else
f5372251 2184 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2185}
a2fbb9ea 2186
c18487ee
YR
2187static void bnx2x__link_reset(struct bnx2x *bp)
2188{
19680c48 2189 if (!BP_NOMCP(bp)) {
4a37fb66 2190 bnx2x_acquire_phy_lock(bp);
589abe3a 2191 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2192 bnx2x_release_phy_lock(bp);
19680c48 2193 } else
f5372251 2194 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2195}
a2fbb9ea 2196
c18487ee
YR
2197static u8 bnx2x_link_test(struct bnx2x *bp)
2198{
2199 u8 rc;
a2fbb9ea 2200
4a37fb66 2201 bnx2x_acquire_phy_lock(bp);
c18487ee 2202 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2203 bnx2x_release_phy_lock(bp);
a2fbb9ea 2204
c18487ee
YR
2205 return rc;
2206}
a2fbb9ea 2207
8a1c38d1 2208static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2209{
8a1c38d1
EG
2210 u32 r_param = bp->link_vars.line_speed / 8;
2211 u32 fair_periodic_timeout_usec;
2212 u32 t_fair;
34f80b04 2213
8a1c38d1
EG
2214 memset(&(bp->cmng.rs_vars), 0,
2215 sizeof(struct rate_shaping_vars_per_port));
2216 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2217
8a1c38d1
EG
2218 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2219 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2220
8a1c38d1
EG
2221 /* this is the threshold below which no timer arming will occur
2222 1.25 coefficient is for the threshold to be a little bigger
2223 than the real time, to compensate for timer in-accuracy */
2224 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2225 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2226
8a1c38d1
EG
2227 /* resolution of fairness timer */
2228 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2229 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2230 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2231
8a1c38d1
EG
2232 /* this is the threshold below which we won't arm the timer anymore */
2233 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2234
8a1c38d1
EG
2235 /* we multiply by 1e3/8 to get bytes/msec.
2236 We don't want the credits to pass a credit
2237 of the t_fair*FAIR_MEM (algorithm resolution) */
2238 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2239 /* since each tick is 4 usec */
2240 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2241}
2242
8a1c38d1 2243static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2244{
2245 struct rate_shaping_vars_per_vn m_rs_vn;
2246 struct fairness_vars_per_vn m_fair_vn;
2247 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2248 u16 vn_min_rate, vn_max_rate;
2249 int i;
2250
2251 /* If function is hidden - set min and max to zeroes */
2252 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2253 vn_min_rate = 0;
2254 vn_max_rate = 0;
2255
2256 } else {
2257 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2258 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2259 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2260 if current min rate is zero - set it to 1.
33471629 2261 This is a requirement of the algorithm. */
8a1c38d1 2262 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2263 vn_min_rate = DEF_MIN_RATE;
2264 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2265 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2266 }
2267
8a1c38d1
EG
2268 DP(NETIF_MSG_IFUP,
2269 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2270 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2271
2272 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2273 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2274
2275 /* global vn counter - maximal Mbps for this vn */
2276 m_rs_vn.vn_counter.rate = vn_max_rate;
2277
2278 /* quota - number of bytes transmitted in this period */
2279 m_rs_vn.vn_counter.quota =
2280 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2281
8a1c38d1 2282 if (bp->vn_weight_sum) {
34f80b04
EG
2283 /* credit for each period of the fairness algorithm:
2284 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2285 vn_weight_sum should not be larger than 10000, thus
2286 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2287 than zero */
34f80b04 2288 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2289 max((u32)(vn_min_rate * (T_FAIR_COEF /
2290 (8 * bp->vn_weight_sum))),
2291 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2292 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2293 m_fair_vn.vn_credit_delta);
2294 }
2295
34f80b04
EG
2296 /* Store it to internal memory */
2297 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2298 REG_WR(bp, BAR_XSTRORM_INTMEM +
2299 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2300 ((u32 *)(&m_rs_vn))[i]);
2301
2302 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2303 REG_WR(bp, BAR_XSTRORM_INTMEM +
2304 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2305 ((u32 *)(&m_fair_vn))[i]);
2306}
2307
8a1c38d1 2308
c18487ee
YR
2309/* This function is called upon link interrupt */
2310static void bnx2x_link_attn(struct bnx2x *bp)
2311{
bb2a0f7a
YG
2312 /* Make sure that we are synced with the current statistics */
2313 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2314
c18487ee 2315 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2316
bb2a0f7a
YG
2317 if (bp->link_vars.link_up) {
2318
1c06328c
EG
2319 /* dropless flow control */
2320 if (CHIP_IS_E1H(bp)) {
2321 int port = BP_PORT(bp);
2322 u32 pause_enabled = 0;
2323
2324 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2325 pause_enabled = 1;
2326
2327 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2328 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2329 pause_enabled);
2330 }
2331
bb2a0f7a
YG
2332 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2333 struct host_port_stats *pstats;
2334
2335 pstats = bnx2x_sp(bp, port_stats);
2336 /* reset old bmac stats */
2337 memset(&(pstats->mac_stx[0]), 0,
2338 sizeof(struct mac_stx));
2339 }
2340 if ((bp->state == BNX2X_STATE_OPEN) ||
2341 (bp->state == BNX2X_STATE_DISABLED))
2342 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2343 }
2344
c18487ee
YR
2345 /* indicate link status */
2346 bnx2x_link_report(bp);
34f80b04
EG
2347
2348 if (IS_E1HMF(bp)) {
8a1c38d1 2349 int port = BP_PORT(bp);
34f80b04 2350 int func;
8a1c38d1 2351 int vn;
34f80b04
EG
2352
2353 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2354 if (vn == BP_E1HVN(bp))
2355 continue;
2356
8a1c38d1 2357 func = ((vn << 1) | port);
34f80b04
EG
2358
2359 /* Set the attention towards other drivers
2360 on the same port */
2361 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2362 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2363 }
34f80b04 2364
8a1c38d1
EG
2365 if (bp->link_vars.link_up) {
2366 int i;
2367
2368 /* Init rate shaping and fairness contexts */
2369 bnx2x_init_port_minmax(bp);
34f80b04 2370
34f80b04 2371 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2372 bnx2x_init_vn_minmax(bp, 2*vn + port);
2373
2374 /* Store it to internal memory */
2375 for (i = 0;
2376 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2377 REG_WR(bp, BAR_XSTRORM_INTMEM +
2378 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2379 ((u32 *)(&bp->cmng))[i]);
2380 }
34f80b04 2381 }
c18487ee 2382}
a2fbb9ea 2383
c18487ee
YR
2384static void bnx2x__link_status_update(struct bnx2x *bp)
2385{
2386 if (bp->state != BNX2X_STATE_OPEN)
2387 return;
a2fbb9ea 2388
c18487ee 2389 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2390
bb2a0f7a
YG
2391 if (bp->link_vars.link_up)
2392 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2393 else
2394 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2395
c18487ee
YR
2396 /* indicate link status */
2397 bnx2x_link_report(bp);
a2fbb9ea 2398}
a2fbb9ea 2399
34f80b04
EG
2400static void bnx2x_pmf_update(struct bnx2x *bp)
2401{
2402 int port = BP_PORT(bp);
2403 u32 val;
2404
2405 bp->port.pmf = 1;
2406 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2407
2408 /* enable nig attention */
2409 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2410 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2411 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2412
2413 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2414}
2415
c18487ee 2416/* end of Link */
a2fbb9ea
ET
2417
2418/* slow path */
2419
2420/*
2421 * General service functions
2422 */
2423
2424/* the slow path queue is odd since completions arrive on the fastpath ring */
2425static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2426 u32 data_hi, u32 data_lo, int common)
2427{
34f80b04 2428 int func = BP_FUNC(bp);
a2fbb9ea 2429
34f80b04
EG
2430 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2431 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2432 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2433 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2434 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2435
2436#ifdef BNX2X_STOP_ON_ERROR
2437 if (unlikely(bp->panic))
2438 return -EIO;
2439#endif
2440
34f80b04 2441 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2442
2443 if (!bp->spq_left) {
2444 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2445 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2446 bnx2x_panic();
2447 return -EBUSY;
2448 }
f1410647 2449
a2fbb9ea
ET
2450 /* CID needs port number to be encoded int it */
2451 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2452 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2453 HW_CID(bp, cid)));
2454 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2455 if (common)
2456 bp->spq_prod_bd->hdr.type |=
2457 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2458
2459 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2460 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2461
2462 bp->spq_left--;
2463
2464 if (bp->spq_prod_bd == bp->spq_last_bd) {
2465 bp->spq_prod_bd = bp->spq;
2466 bp->spq_prod_idx = 0;
2467 DP(NETIF_MSG_TIMER, "end of spq\n");
2468
2469 } else {
2470 bp->spq_prod_bd++;
2471 bp->spq_prod_idx++;
2472 }
2473
37dbbf32
EG
2474 /* Make sure that BD data is updated before writing the producer */
2475 wmb();
2476
34f80b04 2477 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2478 bp->spq_prod_idx);
2479
37dbbf32
EG
2480 mmiowb();
2481
34f80b04 2482 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2483 return 0;
2484}
2485
2486/* acquire split MCP access lock register */
4a37fb66 2487static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2488{
a2fbb9ea 2489 u32 i, j, val;
34f80b04 2490 int rc = 0;
a2fbb9ea
ET
2491
2492 might_sleep();
2493 i = 100;
2494 for (j = 0; j < i*10; j++) {
2495 val = (1UL << 31);
2496 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2497 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2498 if (val & (1L << 31))
2499 break;
2500
2501 msleep(5);
2502 }
a2fbb9ea 2503 if (!(val & (1L << 31))) {
19680c48 2504 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2505 rc = -EBUSY;
2506 }
2507
2508 return rc;
2509}
2510
4a37fb66
YG
2511/* release split MCP access lock register */
2512static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2513{
2514 u32 val = 0;
2515
2516 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2517}
2518
2519static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2520{
2521 struct host_def_status_block *def_sb = bp->def_status_blk;
2522 u16 rc = 0;
2523
2524 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2525 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2526 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2527 rc |= 1;
2528 }
2529 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2530 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2531 rc |= 2;
2532 }
2533 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2534 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2535 rc |= 4;
2536 }
2537 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2538 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2539 rc |= 8;
2540 }
2541 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2542 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2543 rc |= 16;
2544 }
2545 return rc;
2546}
2547
2548/*
2549 * slow path service functions
2550 */
2551
2552static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2553{
34f80b04 2554 int port = BP_PORT(bp);
5c862848
EG
2555 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2556 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2557 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2558 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2559 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2560 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2561 u32 aeu_mask;
87942b46 2562 u32 nig_mask = 0;
a2fbb9ea 2563
a2fbb9ea
ET
2564 if (bp->attn_state & asserted)
2565 BNX2X_ERR("IGU ERROR\n");
2566
3fcaf2e5
EG
2567 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2568 aeu_mask = REG_RD(bp, aeu_addr);
2569
a2fbb9ea 2570 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2571 aeu_mask, asserted);
2572 aeu_mask &= ~(asserted & 0xff);
2573 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2574
3fcaf2e5
EG
2575 REG_WR(bp, aeu_addr, aeu_mask);
2576 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2577
3fcaf2e5 2578 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2579 bp->attn_state |= asserted;
3fcaf2e5 2580 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2581
2582 if (asserted & ATTN_HARD_WIRED_MASK) {
2583 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2584
a5e9a7cf
EG
2585 bnx2x_acquire_phy_lock(bp);
2586
877e9aa4 2587 /* save nig interrupt mask */
87942b46 2588 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2589 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2590
c18487ee 2591 bnx2x_link_attn(bp);
a2fbb9ea
ET
2592
2593 /* handle unicore attn? */
2594 }
2595 if (asserted & ATTN_SW_TIMER_4_FUNC)
2596 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2597
2598 if (asserted & GPIO_2_FUNC)
2599 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2600
2601 if (asserted & GPIO_3_FUNC)
2602 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2603
2604 if (asserted & GPIO_4_FUNC)
2605 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2606
2607 if (port == 0) {
2608 if (asserted & ATTN_GENERAL_ATTN_1) {
2609 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2610 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2611 }
2612 if (asserted & ATTN_GENERAL_ATTN_2) {
2613 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2614 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2615 }
2616 if (asserted & ATTN_GENERAL_ATTN_3) {
2617 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2618 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2619 }
2620 } else {
2621 if (asserted & ATTN_GENERAL_ATTN_4) {
2622 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2623 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2624 }
2625 if (asserted & ATTN_GENERAL_ATTN_5) {
2626 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2627 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2628 }
2629 if (asserted & ATTN_GENERAL_ATTN_6) {
2630 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2631 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2632 }
2633 }
2634
2635 } /* if hardwired */
2636
5c862848
EG
2637 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2638 asserted, hc_addr);
2639 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2640
2641 /* now set back the mask */
a5e9a7cf 2642 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2643 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2644 bnx2x_release_phy_lock(bp);
2645 }
a2fbb9ea
ET
2646}
2647
fd4ef40d
EG
2648static inline void bnx2x_fan_failure(struct bnx2x *bp)
2649{
2650 int port = BP_PORT(bp);
2651
2652 /* mark the failure */
2653 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2654 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2655 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2656 bp->link_params.ext_phy_config);
2657
2658 /* log the failure */
2659 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2660 " the driver to shutdown the card to prevent permanent"
2661 " damage. Please contact Dell Support for assistance\n",
2662 bp->dev->name);
2663}
877e9aa4 2664static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2665{
34f80b04 2666 int port = BP_PORT(bp);
877e9aa4 2667 int reg_offset;
4d295db0 2668 u32 val, swap_val, swap_override;
877e9aa4 2669
34f80b04
EG
2670 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2671 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2672
34f80b04 2673 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2674
2675 val = REG_RD(bp, reg_offset);
2676 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2677 REG_WR(bp, reg_offset, val);
2678
2679 BNX2X_ERR("SPIO5 hw attention\n");
2680
fd4ef40d 2681 /* Fan failure attention */
35b19ba5
EG
2682 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2683 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2684 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2685 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2686 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2687 /* The PHY reset is controlled by GPIO 1 */
2688 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2689 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2690 break;
2691
4d295db0
EG
2692 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2693 /* The PHY reset is controlled by GPIO 1 */
2694 /* fake the port number to cancel the swap done in
2695 set_gpio() */
2696 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2697 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2698 port = (swap_val && swap_override) ^ 1;
2699 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2700 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2701 break;
2702
877e9aa4
ET
2703 default:
2704 break;
2705 }
fd4ef40d 2706 bnx2x_fan_failure(bp);
877e9aa4 2707 }
34f80b04 2708
589abe3a
EG
2709 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2710 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2711 bnx2x_acquire_phy_lock(bp);
2712 bnx2x_handle_module_detect_int(&bp->link_params);
2713 bnx2x_release_phy_lock(bp);
2714 }
2715
34f80b04
EG
2716 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2717
2718 val = REG_RD(bp, reg_offset);
2719 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2720 REG_WR(bp, reg_offset, val);
2721
2722 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2723 (attn & HW_INTERRUT_ASSERT_SET_0));
2724 bnx2x_panic();
2725 }
877e9aa4
ET
2726}
2727
2728static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2729{
2730 u32 val;
2731
0626b899 2732 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2733
2734 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2735 BNX2X_ERR("DB hw attention 0x%x\n", val);
2736 /* DORQ discard attention */
2737 if (val & 0x2)
2738 BNX2X_ERR("FATAL error from DORQ\n");
2739 }
34f80b04
EG
2740
2741 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2742
2743 int port = BP_PORT(bp);
2744 int reg_offset;
2745
2746 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2747 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2748
2749 val = REG_RD(bp, reg_offset);
2750 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2751 REG_WR(bp, reg_offset, val);
2752
2753 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2754 (attn & HW_INTERRUT_ASSERT_SET_1));
2755 bnx2x_panic();
2756 }
877e9aa4
ET
2757}
2758
2759static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2760{
2761 u32 val;
2762
2763 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2764
2765 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2766 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2767 /* CFC error attention */
2768 if (val & 0x2)
2769 BNX2X_ERR("FATAL error from CFC\n");
2770 }
2771
2772 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2773
2774 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2775 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2776 /* RQ_USDMDP_FIFO_OVERFLOW */
2777 if (val & 0x18000)
2778 BNX2X_ERR("FATAL error from PXP\n");
2779 }
34f80b04
EG
2780
2781 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2782
2783 int port = BP_PORT(bp);
2784 int reg_offset;
2785
2786 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2787 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2788
2789 val = REG_RD(bp, reg_offset);
2790 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2791 REG_WR(bp, reg_offset, val);
2792
2793 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2794 (attn & HW_INTERRUT_ASSERT_SET_2));
2795 bnx2x_panic();
2796 }
877e9aa4
ET
2797}
2798
2799static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2800{
34f80b04
EG
2801 u32 val;
2802
877e9aa4
ET
2803 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2804
34f80b04
EG
2805 if (attn & BNX2X_PMF_LINK_ASSERT) {
2806 int func = BP_FUNC(bp);
2807
2808 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2809 bnx2x__link_status_update(bp);
2810 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2811 DRV_STATUS_PMF)
2812 bnx2x_pmf_update(bp);
2813
2814 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2815
2816 BNX2X_ERR("MC assert!\n");
2817 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2818 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2819 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2820 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2821 bnx2x_panic();
2822
2823 } else if (attn & BNX2X_MCP_ASSERT) {
2824
2825 BNX2X_ERR("MCP assert!\n");
2826 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2827 bnx2x_fw_dump(bp);
877e9aa4
ET
2828
2829 } else
2830 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2831 }
2832
2833 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2834 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2835 if (attn & BNX2X_GRC_TIMEOUT) {
2836 val = CHIP_IS_E1H(bp) ?
2837 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2838 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2839 }
2840 if (attn & BNX2X_GRC_RSV) {
2841 val = CHIP_IS_E1H(bp) ?
2842 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2843 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2844 }
877e9aa4 2845 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2846 }
2847}
2848
2849static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2850{
a2fbb9ea
ET
2851 struct attn_route attn;
2852 struct attn_route group_mask;
34f80b04 2853 int port = BP_PORT(bp);
877e9aa4 2854 int index;
a2fbb9ea
ET
2855 u32 reg_addr;
2856 u32 val;
3fcaf2e5 2857 u32 aeu_mask;
a2fbb9ea
ET
2858
2859 /* need to take HW lock because MCP or other port might also
2860 try to handle this event */
4a37fb66 2861 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2862
2863 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2864 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2865 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2866 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2867 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2868 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2869
2870 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2871 if (deasserted & (1 << index)) {
2872 group_mask = bp->attn_group[index];
2873
34f80b04
EG
2874 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2875 index, group_mask.sig[0], group_mask.sig[1],
2876 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2877
877e9aa4
ET
2878 bnx2x_attn_int_deasserted3(bp,
2879 attn.sig[3] & group_mask.sig[3]);
2880 bnx2x_attn_int_deasserted1(bp,
2881 attn.sig[1] & group_mask.sig[1]);
2882 bnx2x_attn_int_deasserted2(bp,
2883 attn.sig[2] & group_mask.sig[2]);
2884 bnx2x_attn_int_deasserted0(bp,
2885 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2886
a2fbb9ea
ET
2887 if ((attn.sig[0] & group_mask.sig[0] &
2888 HW_PRTY_ASSERT_SET_0) ||
2889 (attn.sig[1] & group_mask.sig[1] &
2890 HW_PRTY_ASSERT_SET_1) ||
2891 (attn.sig[2] & group_mask.sig[2] &
2892 HW_PRTY_ASSERT_SET_2))
6378c025 2893 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2894 }
2895 }
2896
4a37fb66 2897 bnx2x_release_alr(bp);
a2fbb9ea 2898
5c862848 2899 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2900
2901 val = ~deasserted;
3fcaf2e5
EG
2902 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2903 val, reg_addr);
5c862848 2904 REG_WR(bp, reg_addr, val);
a2fbb9ea 2905
a2fbb9ea 2906 if (~bp->attn_state & deasserted)
3fcaf2e5 2907 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2908
2909 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2910 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2911
3fcaf2e5
EG
2912 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2913 aeu_mask = REG_RD(bp, reg_addr);
2914
2915 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2916 aeu_mask, deasserted);
2917 aeu_mask |= (deasserted & 0xff);
2918 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2919
3fcaf2e5
EG
2920 REG_WR(bp, reg_addr, aeu_mask);
2921 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2922
2923 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2924 bp->attn_state &= ~deasserted;
2925 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2926}
2927
2928static void bnx2x_attn_int(struct bnx2x *bp)
2929{
2930 /* read local copy of bits */
68d59484
EG
2931 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2932 attn_bits);
2933 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2934 attn_bits_ack);
a2fbb9ea
ET
2935 u32 attn_state = bp->attn_state;
2936
2937 /* look for changed bits */
2938 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2939 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2940
2941 DP(NETIF_MSG_HW,
2942 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2943 attn_bits, attn_ack, asserted, deasserted);
2944
2945 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2946 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2947
2948 /* handle bits that were raised */
2949 if (asserted)
2950 bnx2x_attn_int_asserted(bp, asserted);
2951
2952 if (deasserted)
2953 bnx2x_attn_int_deasserted(bp, deasserted);
2954}
2955
2956static void bnx2x_sp_task(struct work_struct *work)
2957{
1cf167f2 2958 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2959 u16 status;
2960
34f80b04 2961
a2fbb9ea
ET
2962 /* Return here if interrupt is disabled */
2963 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2964 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2965 return;
2966 }
2967
2968 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2969/* if (status == 0) */
2970/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2971
3196a88a 2972 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2973
877e9aa4
ET
2974 /* HW attentions */
2975 if (status & 0x1)
a2fbb9ea 2976 bnx2x_attn_int(bp);
a2fbb9ea 2977
68d59484 2978 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2979 IGU_INT_NOP, 1);
2980 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2981 IGU_INT_NOP, 1);
2982 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2983 IGU_INT_NOP, 1);
2984 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2985 IGU_INT_NOP, 1);
2986 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2987 IGU_INT_ENABLE, 1);
877e9aa4 2988
a2fbb9ea
ET
2989}
2990
2991static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2992{
2993 struct net_device *dev = dev_instance;
2994 struct bnx2x *bp = netdev_priv(dev);
2995
2996 /* Return here if interrupt is disabled */
2997 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2998 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2999 return IRQ_HANDLED;
3000 }
3001
8d9c5f34 3002 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3003
3004#ifdef BNX2X_STOP_ON_ERROR
3005 if (unlikely(bp->panic))
3006 return IRQ_HANDLED;
3007#endif
3008
1cf167f2 3009 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3010
3011 return IRQ_HANDLED;
3012}
3013
3014/* end of slow path */
3015
3016/* Statistics */
3017
3018/****************************************************************************
3019* Macros
3020****************************************************************************/
3021
a2fbb9ea
ET
3022/* sum[hi:lo] += add[hi:lo] */
3023#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3024 do { \
3025 s_lo += a_lo; \
f5ba6772 3026 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3027 } while (0)
3028
3029/* difference = minuend - subtrahend */
3030#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3031 do { \
bb2a0f7a
YG
3032 if (m_lo < s_lo) { \
3033 /* underflow */ \
a2fbb9ea 3034 d_hi = m_hi - s_hi; \
bb2a0f7a 3035 if (d_hi > 0) { \
6378c025 3036 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3037 d_hi--; \
3038 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3039 } else { \
6378c025 3040 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3041 d_hi = 0; \
3042 d_lo = 0; \
3043 } \
bb2a0f7a
YG
3044 } else { \
3045 /* m_lo >= s_lo */ \
a2fbb9ea 3046 if (m_hi < s_hi) { \
bb2a0f7a
YG
3047 d_hi = 0; \
3048 d_lo = 0; \
3049 } else { \
6378c025 3050 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3051 d_hi = m_hi - s_hi; \
3052 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3053 } \
3054 } \
3055 } while (0)
3056
bb2a0f7a 3057#define UPDATE_STAT64(s, t) \
a2fbb9ea 3058 do { \
bb2a0f7a
YG
3059 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3060 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3061 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3062 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3063 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3064 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3065 } while (0)
3066
bb2a0f7a 3067#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3068 do { \
bb2a0f7a
YG
3069 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3070 diff.lo, new->s##_lo, old->s##_lo); \
3071 ADD_64(estats->t##_hi, diff.hi, \
3072 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3073 } while (0)
3074
3075/* sum[hi:lo] += add */
3076#define ADD_EXTEND_64(s_hi, s_lo, a) \
3077 do { \
3078 s_lo += a; \
3079 s_hi += (s_lo < a) ? 1 : 0; \
3080 } while (0)
3081
bb2a0f7a 3082#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3083 do { \
bb2a0f7a
YG
3084 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3085 pstats->mac_stx[1].s##_lo, \
3086 new->s); \
a2fbb9ea
ET
3087 } while (0)
3088
bb2a0f7a 3089#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3090 do { \
4781bfad
EG
3091 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3092 old_tclient->s = tclient->s; \
de832a55
EG
3093 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3094 } while (0)
3095
3096#define UPDATE_EXTEND_USTAT(s, t) \
3097 do { \
3098 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3099 old_uclient->s = uclient->s; \
3100 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3101 } while (0)
3102
3103#define UPDATE_EXTEND_XSTAT(s, t) \
3104 do { \
4781bfad
EG
3105 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3106 old_xclient->s = xclient->s; \
de832a55
EG
3107 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3108 } while (0)
3109
3110/* minuend -= subtrahend */
3111#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3112 do { \
3113 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3114 } while (0)
3115
3116/* minuend[hi:lo] -= subtrahend */
3117#define SUB_EXTEND_64(m_hi, m_lo, s) \
3118 do { \
3119 SUB_64(m_hi, 0, m_lo, s); \
3120 } while (0)
3121
3122#define SUB_EXTEND_USTAT(s, t) \
3123 do { \
3124 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3125 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3126 } while (0)
3127
3128/*
3129 * General service functions
3130 */
3131
3132static inline long bnx2x_hilo(u32 *hiref)
3133{
3134 u32 lo = *(hiref + 1);
3135#if (BITS_PER_LONG == 64)
3136 u32 hi = *hiref;
3137
3138 return HILO_U64(hi, lo);
3139#else
3140 return lo;
3141#endif
3142}
3143
3144/*
3145 * Init service functions
3146 */
3147
bb2a0f7a
YG
3148static void bnx2x_storm_stats_post(struct bnx2x *bp)
3149{
3150 if (!bp->stats_pending) {
3151 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3152 int i, rc;
bb2a0f7a
YG
3153
3154 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3155 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3156 for_each_queue(bp, i)
3157 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3158
3159 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3160 ((u32 *)&ramrod_data)[1],
3161 ((u32 *)&ramrod_data)[0], 0);
3162 if (rc == 0) {
3163 /* stats ramrod has it's own slot on the spq */
3164 bp->spq_left++;
3165 bp->stats_pending = 1;
3166 }
3167 }
3168}
3169
3170static void bnx2x_stats_init(struct bnx2x *bp)
3171{
3172 int port = BP_PORT(bp);
de832a55 3173 int i;
bb2a0f7a 3174
de832a55 3175 bp->stats_pending = 0;
bb2a0f7a
YG
3176 bp->executer_idx = 0;
3177 bp->stats_counter = 0;
3178
3179 /* port stats */
3180 if (!BP_NOMCP(bp))
3181 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3182 else
3183 bp->port.port_stx = 0;
3184 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3185
3186 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3187 bp->port.old_nig_stats.brb_discard =
3188 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3189 bp->port.old_nig_stats.brb_truncate =
3190 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3191 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3192 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3193 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3194 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3195
3196 /* function stats */
de832a55
EG
3197 for_each_queue(bp, i) {
3198 struct bnx2x_fastpath *fp = &bp->fp[i];
3199
3200 memset(&fp->old_tclient, 0,
3201 sizeof(struct tstorm_per_client_stats));
3202 memset(&fp->old_uclient, 0,
3203 sizeof(struct ustorm_per_client_stats));
3204 memset(&fp->old_xclient, 0,
3205 sizeof(struct xstorm_per_client_stats));
3206 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3207 }
3208
bb2a0f7a 3209 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3210 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3211
3212 bp->stats_state = STATS_STATE_DISABLED;
3213 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3214 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3215}
3216
3217static void bnx2x_hw_stats_post(struct bnx2x *bp)
3218{
3219 struct dmae_command *dmae = &bp->stats_dmae;
3220 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3221
3222 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3223 if (CHIP_REV_IS_SLOW(bp))
3224 return;
bb2a0f7a
YG
3225
3226 /* loader */
3227 if (bp->executer_idx) {
3228 int loader_idx = PMF_DMAE_C(bp);
3229
3230 memset(dmae, 0, sizeof(struct dmae_command));
3231
3232 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3233 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3234 DMAE_CMD_DST_RESET |
3235#ifdef __BIG_ENDIAN
3236 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3237#else
3238 DMAE_CMD_ENDIANITY_DW_SWAP |
3239#endif
3240 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3241 DMAE_CMD_PORT_0) |
3242 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3243 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3244 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3245 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3246 sizeof(struct dmae_command) *
3247 (loader_idx + 1)) >> 2;
3248 dmae->dst_addr_hi = 0;
3249 dmae->len = sizeof(struct dmae_command) >> 2;
3250 if (CHIP_IS_E1(bp))
3251 dmae->len--;
3252 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3253 dmae->comp_addr_hi = 0;
3254 dmae->comp_val = 1;
3255
3256 *stats_comp = 0;
3257 bnx2x_post_dmae(bp, dmae, loader_idx);
3258
3259 } else if (bp->func_stx) {
3260 *stats_comp = 0;
3261 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3262 }
3263}
3264
3265static int bnx2x_stats_comp(struct bnx2x *bp)
3266{
3267 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3268 int cnt = 10;
3269
3270 might_sleep();
3271 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3272 if (!cnt) {
3273 BNX2X_ERR("timeout waiting for stats finished\n");
3274 break;
3275 }
3276 cnt--;
12469401 3277 msleep(1);
bb2a0f7a
YG
3278 }
3279 return 1;
3280}
3281
3282/*
3283 * Statistics service functions
3284 */
3285
3286static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3287{
3288 struct dmae_command *dmae;
3289 u32 opcode;
3290 int loader_idx = PMF_DMAE_C(bp);
3291 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3292
3293 /* sanity */
3294 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3295 BNX2X_ERR("BUG!\n");
3296 return;
3297 }
3298
3299 bp->executer_idx = 0;
3300
3301 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3302 DMAE_CMD_C_ENABLE |
3303 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3304#ifdef __BIG_ENDIAN
3305 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3306#else
3307 DMAE_CMD_ENDIANITY_DW_SWAP |
3308#endif
3309 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3311
3312 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3314 dmae->src_addr_lo = bp->port.port_stx >> 2;
3315 dmae->src_addr_hi = 0;
3316 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3317 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3318 dmae->len = DMAE_LEN32_RD_MAX;
3319 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3320 dmae->comp_addr_hi = 0;
3321 dmae->comp_val = 1;
3322
3323 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3325 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3326 dmae->src_addr_hi = 0;
7a9b2557
VZ
3327 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3328 DMAE_LEN32_RD_MAX * 4);
3329 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3330 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3331 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3332 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3333 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3334 dmae->comp_val = DMAE_COMP_VAL;
3335
3336 *stats_comp = 0;
3337 bnx2x_hw_stats_post(bp);
3338 bnx2x_stats_comp(bp);
3339}
3340
3341static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3342{
3343 struct dmae_command *dmae;
34f80b04 3344 int port = BP_PORT(bp);
bb2a0f7a 3345 int vn = BP_E1HVN(bp);
a2fbb9ea 3346 u32 opcode;
bb2a0f7a 3347 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3348 u32 mac_addr;
bb2a0f7a
YG
3349 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3350
3351 /* sanity */
3352 if (!bp->link_vars.link_up || !bp->port.pmf) {
3353 BNX2X_ERR("BUG!\n");
3354 return;
3355 }
a2fbb9ea
ET
3356
3357 bp->executer_idx = 0;
bb2a0f7a
YG
3358
3359 /* MCP */
3360 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3361 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3362 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3363#ifdef __BIG_ENDIAN
bb2a0f7a 3364 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3365#else
bb2a0f7a 3366 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3367#endif
bb2a0f7a
YG
3368 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3369 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3370
bb2a0f7a 3371 if (bp->port.port_stx) {
a2fbb9ea
ET
3372
3373 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3374 dmae->opcode = opcode;
bb2a0f7a
YG
3375 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3376 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3377 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3378 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3379 dmae->len = sizeof(struct host_port_stats) >> 2;
3380 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3381 dmae->comp_addr_hi = 0;
3382 dmae->comp_val = 1;
a2fbb9ea
ET
3383 }
3384
bb2a0f7a
YG
3385 if (bp->func_stx) {
3386
3387 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388 dmae->opcode = opcode;
3389 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3390 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3391 dmae->dst_addr_lo = bp->func_stx >> 2;
3392 dmae->dst_addr_hi = 0;
3393 dmae->len = sizeof(struct host_func_stats) >> 2;
3394 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395 dmae->comp_addr_hi = 0;
3396 dmae->comp_val = 1;
a2fbb9ea
ET
3397 }
3398
bb2a0f7a 3399 /* MAC */
a2fbb9ea
ET
3400 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3401 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3402 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3403#ifdef __BIG_ENDIAN
3404 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3405#else
3406 DMAE_CMD_ENDIANITY_DW_SWAP |
3407#endif
bb2a0f7a
YG
3408 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3409 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3410
c18487ee 3411 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3412
3413 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3414 NIG_REG_INGRESS_BMAC0_MEM);
3415
3416 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3417 BIGMAC_REGISTER_TX_STAT_GTBYT */
3418 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3419 dmae->opcode = opcode;
3420 dmae->src_addr_lo = (mac_addr +
3421 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3422 dmae->src_addr_hi = 0;
3423 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3424 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3425 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3426 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3427 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3428 dmae->comp_addr_hi = 0;
3429 dmae->comp_val = 1;
3430
3431 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3432 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3433 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3434 dmae->opcode = opcode;
3435 dmae->src_addr_lo = (mac_addr +
3436 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3437 dmae->src_addr_hi = 0;
3438 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3439 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3440 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3441 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3442 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3443 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3444 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3445 dmae->comp_addr_hi = 0;
3446 dmae->comp_val = 1;
3447
c18487ee 3448 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3449
3450 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3451
3452 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3453 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454 dmae->opcode = opcode;
3455 dmae->src_addr_lo = (mac_addr +
3456 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3457 dmae->src_addr_hi = 0;
3458 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3459 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3460 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3461 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3462 dmae->comp_addr_hi = 0;
3463 dmae->comp_val = 1;
3464
3465 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3466 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3467 dmae->opcode = opcode;
3468 dmae->src_addr_lo = (mac_addr +
3469 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3470 dmae->src_addr_hi = 0;
3471 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3472 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3473 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3474 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3475 dmae->len = 1;
3476 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3477 dmae->comp_addr_hi = 0;
3478 dmae->comp_val = 1;
3479
3480 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3481 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3482 dmae->opcode = opcode;
3483 dmae->src_addr_lo = (mac_addr +
3484 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3485 dmae->src_addr_hi = 0;
3486 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3487 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3488 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3489 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3490 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3491 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3492 dmae->comp_addr_hi = 0;
3493 dmae->comp_val = 1;
3494 }
3495
3496 /* NIG */
bb2a0f7a
YG
3497 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3498 dmae->opcode = opcode;
3499 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3500 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3501 dmae->src_addr_hi = 0;
3502 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3503 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3504 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3505 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3506 dmae->comp_addr_hi = 0;
3507 dmae->comp_val = 1;
3508
3509 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3510 dmae->opcode = opcode;
3511 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3512 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3513 dmae->src_addr_hi = 0;
3514 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3515 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3516 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3517 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3518 dmae->len = (2*sizeof(u32)) >> 2;
3519 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3520 dmae->comp_addr_hi = 0;
3521 dmae->comp_val = 1;
3522
a2fbb9ea
ET
3523 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3524 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3525 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3526 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3527#ifdef __BIG_ENDIAN
3528 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3529#else
3530 DMAE_CMD_ENDIANITY_DW_SWAP |
3531#endif
bb2a0f7a
YG
3532 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3533 (vn << DMAE_CMD_E1HVN_SHIFT));
3534 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3535 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3536 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3537 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3538 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3539 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3540 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3541 dmae->len = (2*sizeof(u32)) >> 2;
3542 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3543 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3544 dmae->comp_val = DMAE_COMP_VAL;
3545
3546 *stats_comp = 0;
a2fbb9ea
ET
3547}
3548
bb2a0f7a 3549static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3550{
bb2a0f7a
YG
3551 struct dmae_command *dmae = &bp->stats_dmae;
3552 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3553
bb2a0f7a
YG
3554 /* sanity */
3555 if (!bp->func_stx) {
3556 BNX2X_ERR("BUG!\n");
3557 return;
3558 }
a2fbb9ea 3559
bb2a0f7a
YG
3560 bp->executer_idx = 0;
3561 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3562
bb2a0f7a
YG
3563 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3564 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3565 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3566#ifdef __BIG_ENDIAN
3567 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3568#else
3569 DMAE_CMD_ENDIANITY_DW_SWAP |
3570#endif
3571 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3572 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3573 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3574 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3575 dmae->dst_addr_lo = bp->func_stx >> 2;
3576 dmae->dst_addr_hi = 0;
3577 dmae->len = sizeof(struct host_func_stats) >> 2;
3578 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3579 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3580 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3581
bb2a0f7a
YG
3582 *stats_comp = 0;
3583}
a2fbb9ea 3584
bb2a0f7a
YG
3585static void bnx2x_stats_start(struct bnx2x *bp)
3586{
3587 if (bp->port.pmf)
3588 bnx2x_port_stats_init(bp);
3589
3590 else if (bp->func_stx)
3591 bnx2x_func_stats_init(bp);
3592
3593 bnx2x_hw_stats_post(bp);
3594 bnx2x_storm_stats_post(bp);
3595}
3596
3597static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3598{
3599 bnx2x_stats_comp(bp);
3600 bnx2x_stats_pmf_update(bp);
3601 bnx2x_stats_start(bp);
3602}
3603
3604static void bnx2x_stats_restart(struct bnx2x *bp)
3605{
3606 bnx2x_stats_comp(bp);
3607 bnx2x_stats_start(bp);
3608}
3609
3610static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3611{
3612 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3613 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3614 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3615 struct {
3616 u32 lo;
3617 u32 hi;
3618 } diff;
bb2a0f7a
YG
3619
3620 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3621 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3622 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3623 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3624 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3625 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3626 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3627 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3628 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3629 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3630 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3631 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3632 UPDATE_STAT64(tx_stat_gt127,
3633 tx_stat_etherstatspkts65octetsto127octets);
3634 UPDATE_STAT64(tx_stat_gt255,
3635 tx_stat_etherstatspkts128octetsto255octets);
3636 UPDATE_STAT64(tx_stat_gt511,
3637 tx_stat_etherstatspkts256octetsto511octets);
3638 UPDATE_STAT64(tx_stat_gt1023,
3639 tx_stat_etherstatspkts512octetsto1023octets);
3640 UPDATE_STAT64(tx_stat_gt1518,
3641 tx_stat_etherstatspkts1024octetsto1522octets);
3642 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3643 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3644 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3645 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3646 UPDATE_STAT64(tx_stat_gterr,
3647 tx_stat_dot3statsinternalmactransmiterrors);
3648 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3649
3650 estats->pause_frames_received_hi =
3651 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3652 estats->pause_frames_received_lo =
3653 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3654
3655 estats->pause_frames_sent_hi =
3656 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3657 estats->pause_frames_sent_lo =
3658 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3659}
3660
3661static void bnx2x_emac_stats_update(struct bnx2x *bp)
3662{
3663 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3664 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3665 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3666
3667 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3668 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3669 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3670 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3671 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3672 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3673 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3674 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3675 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3676 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3677 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3678 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3679 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3680 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3681 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3682 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3683 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3684 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3685 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3686 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3687 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3688 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3689 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3690 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3691 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3692 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3693 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3694 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3695 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3696 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3697 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3698
3699 estats->pause_frames_received_hi =
3700 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3701 estats->pause_frames_received_lo =
3702 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3703 ADD_64(estats->pause_frames_received_hi,
3704 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3705 estats->pause_frames_received_lo,
3706 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3707
3708 estats->pause_frames_sent_hi =
3709 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3710 estats->pause_frames_sent_lo =
3711 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3712 ADD_64(estats->pause_frames_sent_hi,
3713 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3714 estats->pause_frames_sent_lo,
3715 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3716}
3717
3718static int bnx2x_hw_stats_update(struct bnx2x *bp)
3719{
3720 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3721 struct nig_stats *old = &(bp->port.old_nig_stats);
3722 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3723 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3724 struct {
3725 u32 lo;
3726 u32 hi;
3727 } diff;
de832a55 3728 u32 nig_timer_max;
bb2a0f7a
YG
3729
3730 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3731 bnx2x_bmac_stats_update(bp);
3732
3733 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3734 bnx2x_emac_stats_update(bp);
3735
3736 else { /* unreached */
c3eefaf6 3737 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3738 return -1;
3739 }
a2fbb9ea 3740
bb2a0f7a
YG
3741 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3742 new->brb_discard - old->brb_discard);
66e855f3
YG
3743 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3744 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3745
bb2a0f7a
YG
3746 UPDATE_STAT64_NIG(egress_mac_pkt0,
3747 etherstatspkts1024octetsto1522octets);
3748 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3749
bb2a0f7a 3750 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3751
bb2a0f7a
YG
3752 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3753 sizeof(struct mac_stx));
3754 estats->brb_drop_hi = pstats->brb_drop_hi;
3755 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3756
bb2a0f7a 3757 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3758
de832a55
EG
3759 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3760 if (nig_timer_max != estats->nig_timer_max) {
3761 estats->nig_timer_max = nig_timer_max;
3762 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3763 }
3764
bb2a0f7a 3765 return 0;
a2fbb9ea
ET
3766}
3767
bb2a0f7a 3768static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3769{
3770 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3771 struct tstorm_per_port_stats *tport =
de832a55 3772 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3773 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3774 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3775 int i;
3776
3777 memset(&(fstats->total_bytes_received_hi), 0,
3778 sizeof(struct host_func_stats) - 2*sizeof(u32));
3779 estats->error_bytes_received_hi = 0;
3780 estats->error_bytes_received_lo = 0;
3781 estats->etherstatsoverrsizepkts_hi = 0;
3782 estats->etherstatsoverrsizepkts_lo = 0;
3783 estats->no_buff_discard_hi = 0;
3784 estats->no_buff_discard_lo = 0;
a2fbb9ea 3785
ca00392c 3786 for_each_rx_queue(bp, i) {
de832a55
EG
3787 struct bnx2x_fastpath *fp = &bp->fp[i];
3788 int cl_id = fp->cl_id;
3789 struct tstorm_per_client_stats *tclient =
3790 &stats->tstorm_common.client_statistics[cl_id];
3791 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3792 struct ustorm_per_client_stats *uclient =
3793 &stats->ustorm_common.client_statistics[cl_id];
3794 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3795 struct xstorm_per_client_stats *xclient =
3796 &stats->xstorm_common.client_statistics[cl_id];
3797 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3798 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3799 u32 diff;
3800
3801 /* are storm stats valid? */
3802 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3803 bp->stats_counter) {
de832a55
EG
3804 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3805 " xstorm counter (%d) != stats_counter (%d)\n",
3806 i, xclient->stats_counter, bp->stats_counter);
3807 return -1;
3808 }
3809 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3810 bp->stats_counter) {
de832a55
EG
3811 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3812 " tstorm counter (%d) != stats_counter (%d)\n",
3813 i, tclient->stats_counter, bp->stats_counter);
3814 return -2;
3815 }
3816 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3817 bp->stats_counter) {
3818 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3819 " ustorm counter (%d) != stats_counter (%d)\n",
3820 i, uclient->stats_counter, bp->stats_counter);
3821 return -4;
3822 }
a2fbb9ea 3823
de832a55 3824 qstats->total_bytes_received_hi =
ca00392c 3825 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 3826 qstats->total_bytes_received_lo =
ca00392c
EG
3827 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3828
3829 ADD_64(qstats->total_bytes_received_hi,
3830 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3831 qstats->total_bytes_received_lo,
3832 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3833
3834 ADD_64(qstats->total_bytes_received_hi,
3835 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3836 qstats->total_bytes_received_lo,
3837 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3838
3839 qstats->valid_bytes_received_hi =
3840 qstats->total_bytes_received_hi;
de832a55 3841 qstats->valid_bytes_received_lo =
ca00392c 3842 qstats->total_bytes_received_lo;
bb2a0f7a 3843
de832a55 3844 qstats->error_bytes_received_hi =
bb2a0f7a 3845 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3846 qstats->error_bytes_received_lo =
bb2a0f7a 3847 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3848
de832a55
EG
3849 ADD_64(qstats->total_bytes_received_hi,
3850 qstats->error_bytes_received_hi,
3851 qstats->total_bytes_received_lo,
3852 qstats->error_bytes_received_lo);
3853
3854 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3855 total_unicast_packets_received);
3856 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3857 total_multicast_packets_received);
3858 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3859 total_broadcast_packets_received);
3860 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3861 etherstatsoverrsizepkts);
3862 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3863
3864 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3865 total_unicast_packets_received);
3866 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3867 total_multicast_packets_received);
3868 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3869 total_broadcast_packets_received);
3870 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3871 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3872 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3873
3874 qstats->total_bytes_transmitted_hi =
ca00392c 3875 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 3876 qstats->total_bytes_transmitted_lo =
ca00392c
EG
3877 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3878
3879 ADD_64(qstats->total_bytes_transmitted_hi,
3880 le32_to_cpu(xclient->multicast_bytes_sent.hi),
3881 qstats->total_bytes_transmitted_lo,
3882 le32_to_cpu(xclient->multicast_bytes_sent.lo));
3883
3884 ADD_64(qstats->total_bytes_transmitted_hi,
3885 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
3886 qstats->total_bytes_transmitted_lo,
3887 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 3888
de832a55
EG
3889 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3890 total_unicast_packets_transmitted);
3891 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3892 total_multicast_packets_transmitted);
3893 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3894 total_broadcast_packets_transmitted);
3895
3896 old_tclient->checksum_discard = tclient->checksum_discard;
3897 old_tclient->ttl0_discard = tclient->ttl0_discard;
3898
3899 ADD_64(fstats->total_bytes_received_hi,
3900 qstats->total_bytes_received_hi,
3901 fstats->total_bytes_received_lo,
3902 qstats->total_bytes_received_lo);
3903 ADD_64(fstats->total_bytes_transmitted_hi,
3904 qstats->total_bytes_transmitted_hi,
3905 fstats->total_bytes_transmitted_lo,
3906 qstats->total_bytes_transmitted_lo);
3907 ADD_64(fstats->total_unicast_packets_received_hi,
3908 qstats->total_unicast_packets_received_hi,
3909 fstats->total_unicast_packets_received_lo,
3910 qstats->total_unicast_packets_received_lo);
3911 ADD_64(fstats->total_multicast_packets_received_hi,
3912 qstats->total_multicast_packets_received_hi,
3913 fstats->total_multicast_packets_received_lo,
3914 qstats->total_multicast_packets_received_lo);
3915 ADD_64(fstats->total_broadcast_packets_received_hi,
3916 qstats->total_broadcast_packets_received_hi,
3917 fstats->total_broadcast_packets_received_lo,
3918 qstats->total_broadcast_packets_received_lo);
3919 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3920 qstats->total_unicast_packets_transmitted_hi,
3921 fstats->total_unicast_packets_transmitted_lo,
3922 qstats->total_unicast_packets_transmitted_lo);
3923 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3924 qstats->total_multicast_packets_transmitted_hi,
3925 fstats->total_multicast_packets_transmitted_lo,
3926 qstats->total_multicast_packets_transmitted_lo);
3927 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3928 qstats->total_broadcast_packets_transmitted_hi,
3929 fstats->total_broadcast_packets_transmitted_lo,
3930 qstats->total_broadcast_packets_transmitted_lo);
3931 ADD_64(fstats->valid_bytes_received_hi,
3932 qstats->valid_bytes_received_hi,
3933 fstats->valid_bytes_received_lo,
3934 qstats->valid_bytes_received_lo);
3935
3936 ADD_64(estats->error_bytes_received_hi,
3937 qstats->error_bytes_received_hi,
3938 estats->error_bytes_received_lo,
3939 qstats->error_bytes_received_lo);
3940 ADD_64(estats->etherstatsoverrsizepkts_hi,
3941 qstats->etherstatsoverrsizepkts_hi,
3942 estats->etherstatsoverrsizepkts_lo,
3943 qstats->etherstatsoverrsizepkts_lo);
3944 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3945 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3946 }
3947
3948 ADD_64(fstats->total_bytes_received_hi,
3949 estats->rx_stat_ifhcinbadoctets_hi,
3950 fstats->total_bytes_received_lo,
3951 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3952
3953 memcpy(estats, &(fstats->total_bytes_received_hi),
3954 sizeof(struct host_func_stats) - 2*sizeof(u32));
3955
de832a55
EG
3956 ADD_64(estats->etherstatsoverrsizepkts_hi,
3957 estats->rx_stat_dot3statsframestoolong_hi,
3958 estats->etherstatsoverrsizepkts_lo,
3959 estats->rx_stat_dot3statsframestoolong_lo);
3960 ADD_64(estats->error_bytes_received_hi,
3961 estats->rx_stat_ifhcinbadoctets_hi,
3962 estats->error_bytes_received_lo,
3963 estats->rx_stat_ifhcinbadoctets_lo);
3964
3965 if (bp->port.pmf) {
3966 estats->mac_filter_discard =
3967 le32_to_cpu(tport->mac_filter_discard);
3968 estats->xxoverflow_discard =
3969 le32_to_cpu(tport->xxoverflow_discard);
3970 estats->brb_truncate_discard =
bb2a0f7a 3971 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3972 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3973 }
bb2a0f7a
YG
3974
3975 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3976
de832a55
EG
3977 bp->stats_pending = 0;
3978
a2fbb9ea
ET
3979 return 0;
3980}
3981
bb2a0f7a 3982static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3983{
bb2a0f7a 3984 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3985 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3986 int i;
a2fbb9ea
ET
3987
3988 nstats->rx_packets =
3989 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3990 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3991 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3992
3993 nstats->tx_packets =
3994 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3995 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3996 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3997
de832a55 3998 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3999
0e39e645 4000 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4001
de832a55 4002 nstats->rx_dropped = estats->mac_discard;
ca00392c 4003 for_each_rx_queue(bp, i)
de832a55
EG
4004 nstats->rx_dropped +=
4005 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4006
a2fbb9ea
ET
4007 nstats->tx_dropped = 0;
4008
4009 nstats->multicast =
de832a55 4010 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4011
bb2a0f7a 4012 nstats->collisions =
de832a55 4013 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4014
4015 nstats->rx_length_errors =
de832a55
EG
4016 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4017 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4018 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4019 bnx2x_hilo(&estats->brb_truncate_hi);
4020 nstats->rx_crc_errors =
4021 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4022 nstats->rx_frame_errors =
4023 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4024 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4025 nstats->rx_missed_errors = estats->xxoverflow_discard;
4026
4027 nstats->rx_errors = nstats->rx_length_errors +
4028 nstats->rx_over_errors +
4029 nstats->rx_crc_errors +
4030 nstats->rx_frame_errors +
0e39e645
ET
4031 nstats->rx_fifo_errors +
4032 nstats->rx_missed_errors;
a2fbb9ea 4033
bb2a0f7a 4034 nstats->tx_aborted_errors =
de832a55
EG
4035 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4036 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4037 nstats->tx_carrier_errors =
4038 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4039 nstats->tx_fifo_errors = 0;
4040 nstats->tx_heartbeat_errors = 0;
4041 nstats->tx_window_errors = 0;
4042
4043 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4044 nstats->tx_carrier_errors +
4045 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4046}
4047
4048static void bnx2x_drv_stats_update(struct bnx2x *bp)
4049{
4050 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4051 int i;
4052
4053 estats->driver_xoff = 0;
4054 estats->rx_err_discard_pkt = 0;
4055 estats->rx_skb_alloc_failed = 0;
4056 estats->hw_csum_err = 0;
ca00392c 4057 for_each_rx_queue(bp, i) {
de832a55
EG
4058 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4059
4060 estats->driver_xoff += qstats->driver_xoff;
4061 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4062 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4063 estats->hw_csum_err += qstats->hw_csum_err;
4064 }
a2fbb9ea
ET
4065}
4066
bb2a0f7a 4067static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4068{
bb2a0f7a 4069 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4070
bb2a0f7a
YG
4071 if (*stats_comp != DMAE_COMP_VAL)
4072 return;
4073
4074 if (bp->port.pmf)
de832a55 4075 bnx2x_hw_stats_update(bp);
a2fbb9ea 4076
de832a55
EG
4077 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4078 BNX2X_ERR("storm stats were not updated for 3 times\n");
4079 bnx2x_panic();
4080 return;
a2fbb9ea
ET
4081 }
4082
de832a55
EG
4083 bnx2x_net_stats_update(bp);
4084 bnx2x_drv_stats_update(bp);
4085
a2fbb9ea 4086 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4087 struct bnx2x_fastpath *fp0_rx = bp->fp;
4088 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4089 struct tstorm_per_client_stats *old_tclient =
4090 &bp->fp->old_tclient;
4091 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4092 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4093 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4094 int i;
a2fbb9ea
ET
4095
4096 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4097 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4098 " tx pkt (%lx)\n",
ca00392c
EG
4099 bnx2x_tx_avail(fp0_tx),
4100 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4101 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4102 " rx pkt (%lx)\n",
ca00392c
EG
4103 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4104 fp0_rx->rx_comp_cons),
4105 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4106 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4107 "brb truncate %u\n",
4108 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4109 qstats->driver_xoff,
4110 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4111 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4112 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4113 "mac_discard %u mac_filter_discard %u "
4114 "xxovrflow_discard %u brb_truncate_discard %u "
4115 "ttl0_discard %u\n",
4781bfad 4116 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4117 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4118 bnx2x_hilo(&qstats->no_buff_discard_hi),
4119 estats->mac_discard, estats->mac_filter_discard,
4120 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4121 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4122
4123 for_each_queue(bp, i) {
4124 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4125 bnx2x_fp(bp, i, tx_pkt),
4126 bnx2x_fp(bp, i, rx_pkt),
4127 bnx2x_fp(bp, i, rx_calls));
4128 }
4129 }
4130
bb2a0f7a
YG
4131 bnx2x_hw_stats_post(bp);
4132 bnx2x_storm_stats_post(bp);
4133}
a2fbb9ea 4134
bb2a0f7a
YG
4135static void bnx2x_port_stats_stop(struct bnx2x *bp)
4136{
4137 struct dmae_command *dmae;
4138 u32 opcode;
4139 int loader_idx = PMF_DMAE_C(bp);
4140 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4141
bb2a0f7a 4142 bp->executer_idx = 0;
a2fbb9ea 4143
bb2a0f7a
YG
4144 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4145 DMAE_CMD_C_ENABLE |
4146 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4147#ifdef __BIG_ENDIAN
bb2a0f7a 4148 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4149#else
bb2a0f7a 4150 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4151#endif
bb2a0f7a
YG
4152 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4153 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4154
4155 if (bp->port.port_stx) {
4156
4157 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4158 if (bp->func_stx)
4159 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4160 else
4161 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4162 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4163 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4164 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4165 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4166 dmae->len = sizeof(struct host_port_stats) >> 2;
4167 if (bp->func_stx) {
4168 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4169 dmae->comp_addr_hi = 0;
4170 dmae->comp_val = 1;
4171 } else {
4172 dmae->comp_addr_lo =
4173 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4174 dmae->comp_addr_hi =
4175 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4176 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4177
bb2a0f7a
YG
4178 *stats_comp = 0;
4179 }
a2fbb9ea
ET
4180 }
4181
bb2a0f7a
YG
4182 if (bp->func_stx) {
4183
4184 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4185 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4186 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4187 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4188 dmae->dst_addr_lo = bp->func_stx >> 2;
4189 dmae->dst_addr_hi = 0;
4190 dmae->len = sizeof(struct host_func_stats) >> 2;
4191 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4192 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4193 dmae->comp_val = DMAE_COMP_VAL;
4194
4195 *stats_comp = 0;
a2fbb9ea 4196 }
bb2a0f7a
YG
4197}
4198
4199static void bnx2x_stats_stop(struct bnx2x *bp)
4200{
4201 int update = 0;
4202
4203 bnx2x_stats_comp(bp);
4204
4205 if (bp->port.pmf)
4206 update = (bnx2x_hw_stats_update(bp) == 0);
4207
4208 update |= (bnx2x_storm_stats_update(bp) == 0);
4209
4210 if (update) {
4211 bnx2x_net_stats_update(bp);
a2fbb9ea 4212
bb2a0f7a
YG
4213 if (bp->port.pmf)
4214 bnx2x_port_stats_stop(bp);
4215
4216 bnx2x_hw_stats_post(bp);
4217 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4218 }
4219}
4220
bb2a0f7a
YG
4221static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4222{
4223}
4224
4225static const struct {
4226 void (*action)(struct bnx2x *bp);
4227 enum bnx2x_stats_state next_state;
4228} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4229/* state event */
4230{
4231/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4232/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4233/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4234/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4235},
4236{
4237/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4238/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4239/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4240/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4241}
4242};
4243
4244static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4245{
4246 enum bnx2x_stats_state state = bp->stats_state;
4247
4248 bnx2x_stats_stm[state][event].action(bp);
4249 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4250
4251 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4252 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4253 state, event, bp->stats_state);
4254}
4255
a2fbb9ea
ET
4256static void bnx2x_timer(unsigned long data)
4257{
4258 struct bnx2x *bp = (struct bnx2x *) data;
4259
4260 if (!netif_running(bp->dev))
4261 return;
4262
4263 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4264 goto timer_restart;
a2fbb9ea
ET
4265
4266 if (poll) {
4267 struct bnx2x_fastpath *fp = &bp->fp[0];
4268 int rc;
4269
7961f791 4270 bnx2x_tx_int(fp);
a2fbb9ea
ET
4271 rc = bnx2x_rx_int(fp, 1000);
4272 }
4273
34f80b04
EG
4274 if (!BP_NOMCP(bp)) {
4275 int func = BP_FUNC(bp);
a2fbb9ea
ET
4276 u32 drv_pulse;
4277 u32 mcp_pulse;
4278
4279 ++bp->fw_drv_pulse_wr_seq;
4280 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4281 /* TBD - add SYSTEM_TIME */
4282 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4283 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4284
34f80b04 4285 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4286 MCP_PULSE_SEQ_MASK);
4287 /* The delta between driver pulse and mcp response
4288 * should be 1 (before mcp response) or 0 (after mcp response)
4289 */
4290 if ((drv_pulse != mcp_pulse) &&
4291 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4292 /* someone lost a heartbeat... */
4293 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4294 drv_pulse, mcp_pulse);
4295 }
4296 }
4297
bb2a0f7a
YG
4298 if ((bp->state == BNX2X_STATE_OPEN) ||
4299 (bp->state == BNX2X_STATE_DISABLED))
4300 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4301
f1410647 4302timer_restart:
a2fbb9ea
ET
4303 mod_timer(&bp->timer, jiffies + bp->current_interval);
4304}
4305
4306/* end of Statistics */
4307
4308/* nic init */
4309
4310/*
4311 * nic init service functions
4312 */
4313
34f80b04 4314static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4315{
34f80b04
EG
4316 int port = BP_PORT(bp);
4317
ca00392c
EG
4318 /* "CSTORM" */
4319 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4320 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4321 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4322 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4323 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4324 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4325}
4326
5c862848
EG
4327static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4328 dma_addr_t mapping, int sb_id)
34f80b04
EG
4329{
4330 int port = BP_PORT(bp);
bb2a0f7a 4331 int func = BP_FUNC(bp);
a2fbb9ea 4332 int index;
34f80b04 4333 u64 section;
a2fbb9ea
ET
4334
4335 /* USTORM */
4336 section = ((u64)mapping) + offsetof(struct host_status_block,
4337 u_status_block);
34f80b04 4338 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4339
ca00392c
EG
4340 REG_WR(bp, BAR_CSTRORM_INTMEM +
4341 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4342 REG_WR(bp, BAR_CSTRORM_INTMEM +
4343 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4344 U64_HI(section));
ca00392c
EG
4345 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4346 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4347
4348 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4349 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4350 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4351
4352 /* CSTORM */
4353 section = ((u64)mapping) + offsetof(struct host_status_block,
4354 c_status_block);
34f80b04 4355 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4356
4357 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4358 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4359 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4360 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4361 U64_HI(section));
7a9b2557 4362 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4363 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4364
4365 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4366 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4367 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4368
4369 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4370}
4371
4372static void bnx2x_zero_def_sb(struct bnx2x *bp)
4373{
4374 int func = BP_FUNC(bp);
a2fbb9ea 4375
ca00392c 4376 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4377 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4378 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4379 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4380 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4381 sizeof(struct cstorm_def_status_block_u)/4);
4382 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4383 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4384 sizeof(struct cstorm_def_status_block_c)/4);
4385 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4386 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4387 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4388}
4389
4390static void bnx2x_init_def_sb(struct bnx2x *bp,
4391 struct host_def_status_block *def_sb,
34f80b04 4392 dma_addr_t mapping, int sb_id)
a2fbb9ea 4393{
34f80b04
EG
4394 int port = BP_PORT(bp);
4395 int func = BP_FUNC(bp);
a2fbb9ea
ET
4396 int index, val, reg_offset;
4397 u64 section;
4398
4399 /* ATTN */
4400 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4401 atten_status_block);
34f80b04 4402 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4403
49d66772
ET
4404 bp->attn_state = 0;
4405
a2fbb9ea
ET
4406 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4407 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4408
34f80b04 4409 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4410 bp->attn_group[index].sig[0] = REG_RD(bp,
4411 reg_offset + 0x10*index);
4412 bp->attn_group[index].sig[1] = REG_RD(bp,
4413 reg_offset + 0x4 + 0x10*index);
4414 bp->attn_group[index].sig[2] = REG_RD(bp,
4415 reg_offset + 0x8 + 0x10*index);
4416 bp->attn_group[index].sig[3] = REG_RD(bp,
4417 reg_offset + 0xc + 0x10*index);
4418 }
4419
a2fbb9ea
ET
4420 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4421 HC_REG_ATTN_MSG0_ADDR_L);
4422
4423 REG_WR(bp, reg_offset, U64_LO(section));
4424 REG_WR(bp, reg_offset + 4, U64_HI(section));
4425
4426 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4427
4428 val = REG_RD(bp, reg_offset);
34f80b04 4429 val |= sb_id;
a2fbb9ea
ET
4430 REG_WR(bp, reg_offset, val);
4431
4432 /* USTORM */
4433 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4434 u_def_status_block);
34f80b04 4435 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4436
ca00392c
EG
4437 REG_WR(bp, BAR_CSTRORM_INTMEM +
4438 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4439 REG_WR(bp, BAR_CSTRORM_INTMEM +
4440 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4441 U64_HI(section));
ca00392c
EG
4442 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4443 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4444
4445 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4446 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4447 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4448
4449 /* CSTORM */
4450 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4451 c_def_status_block);
34f80b04 4452 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4453
4454 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4455 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4456 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4457 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4458 U64_HI(section));
5c862848 4459 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4460 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4461
4462 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4463 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4464 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4465
4466 /* TSTORM */
4467 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4468 t_def_status_block);
34f80b04 4469 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4470
4471 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4472 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4473 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4474 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4475 U64_HI(section));
5c862848 4476 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4477 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4478
4479 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4480 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4481 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4482
4483 /* XSTORM */
4484 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4485 x_def_status_block);
34f80b04 4486 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4487
4488 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4489 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4490 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4491 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4492 U64_HI(section));
5c862848 4493 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4494 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4495
4496 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4497 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4498 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4499
bb2a0f7a 4500 bp->stats_pending = 0;
66e855f3 4501 bp->set_mac_pending = 0;
bb2a0f7a 4502
34f80b04 4503 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4504}
4505
4506static void bnx2x_update_coalesce(struct bnx2x *bp)
4507{
34f80b04 4508 int port = BP_PORT(bp);
a2fbb9ea
ET
4509 int i;
4510
4511 for_each_queue(bp, i) {
34f80b04 4512 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4513
4514 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4515 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4516 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4517 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4518 bp->rx_ticks/12);
ca00392c
EG
4519 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4520 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4521 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4522 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4523
4524 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4525 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4526 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4527 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4528 bp->tx_ticks/12);
a2fbb9ea 4529 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4530 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4531 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4532 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4533 }
4534}
4535
7a9b2557
VZ
4536static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4537 struct bnx2x_fastpath *fp, int last)
4538{
4539 int i;
4540
4541 for (i = 0; i < last; i++) {
4542 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4543 struct sk_buff *skb = rx_buf->skb;
4544
4545 if (skb == NULL) {
4546 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4547 continue;
4548 }
4549
4550 if (fp->tpa_state[i] == BNX2X_TPA_START)
4551 pci_unmap_single(bp->pdev,
4552 pci_unmap_addr(rx_buf, mapping),
356e2385 4553 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4554
4555 dev_kfree_skb(skb);
4556 rx_buf->skb = NULL;
4557 }
4558}
4559
a2fbb9ea
ET
4560static void bnx2x_init_rx_rings(struct bnx2x *bp)
4561{
7a9b2557 4562 int func = BP_FUNC(bp);
32626230
EG
4563 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4564 ETH_MAX_AGGREGATION_QUEUES_E1H;
4565 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4566 int i, j;
a2fbb9ea 4567
87942b46 4568 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4569 DP(NETIF_MSG_IFUP,
4570 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4571
7a9b2557 4572 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4573
555f6c78 4574 for_each_rx_queue(bp, j) {
32626230 4575 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4576
32626230 4577 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4578 fp->tpa_pool[i].skb =
4579 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4580 if (!fp->tpa_pool[i].skb) {
4581 BNX2X_ERR("Failed to allocate TPA "
4582 "skb pool for queue[%d] - "
4583 "disabling TPA on this "
4584 "queue!\n", j);
4585 bnx2x_free_tpa_pool(bp, fp, i);
4586 fp->disable_tpa = 1;
4587 break;
4588 }
4589 pci_unmap_addr_set((struct sw_rx_bd *)
4590 &bp->fp->tpa_pool[i],
4591 mapping, 0);
4592 fp->tpa_state[i] = BNX2X_TPA_STOP;
4593 }
4594 }
4595 }
4596
555f6c78 4597 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4598 struct bnx2x_fastpath *fp = &bp->fp[j];
4599
4600 fp->rx_bd_cons = 0;
4601 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4602 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4603
ca00392c
EG
4604 /* Mark queue as Rx */
4605 fp->is_rx_queue = 1;
4606
7a9b2557
VZ
4607 /* "next page" elements initialization */
4608 /* SGE ring */
4609 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4610 struct eth_rx_sge *sge;
4611
4612 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4613 sge->addr_hi =
4614 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4615 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4616 sge->addr_lo =
4617 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4618 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4619 }
4620
4621 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4622
7a9b2557 4623 /* RX BD ring */
a2fbb9ea
ET
4624 for (i = 1; i <= NUM_RX_RINGS; i++) {
4625 struct eth_rx_bd *rx_bd;
4626
4627 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4628 rx_bd->addr_hi =
4629 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4630 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4631 rx_bd->addr_lo =
4632 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4633 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4634 }
4635
34f80b04 4636 /* CQ ring */
a2fbb9ea
ET
4637 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4638 struct eth_rx_cqe_next_page *nextpg;
4639
4640 nextpg = (struct eth_rx_cqe_next_page *)
4641 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4642 nextpg->addr_hi =
4643 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4644 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4645 nextpg->addr_lo =
4646 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4647 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4648 }
4649
7a9b2557
VZ
4650 /* Allocate SGEs and initialize the ring elements */
4651 for (i = 0, ring_prod = 0;
4652 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4653
7a9b2557
VZ
4654 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4655 BNX2X_ERR("was only able to allocate "
4656 "%d rx sges\n", i);
4657 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4658 /* Cleanup already allocated elements */
4659 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4660 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4661 fp->disable_tpa = 1;
4662 ring_prod = 0;
4663 break;
4664 }
4665 ring_prod = NEXT_SGE_IDX(ring_prod);
4666 }
4667 fp->rx_sge_prod = ring_prod;
4668
4669 /* Allocate BDs and initialize BD ring */
66e855f3 4670 fp->rx_comp_cons = 0;
7a9b2557 4671 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4672 for (i = 0; i < bp->rx_ring_size; i++) {
4673 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4674 BNX2X_ERR("was only able to allocate "
de832a55
EG
4675 "%d rx skbs on queue[%d]\n", i, j);
4676 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4677 break;
4678 }
4679 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4680 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4681 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4682 }
4683
7a9b2557
VZ
4684 fp->rx_bd_prod = ring_prod;
4685 /* must not have more available CQEs than BDs */
4686 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4687 cqe_ring_prod);
a2fbb9ea
ET
4688 fp->rx_pkt = fp->rx_calls = 0;
4689
7a9b2557
VZ
4690 /* Warning!
4691 * this will generate an interrupt (to the TSTORM)
4692 * must only be done after chip is initialized
4693 */
4694 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4695 fp->rx_sge_prod);
a2fbb9ea
ET
4696 if (j != 0)
4697 continue;
4698
4699 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4700 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4701 U64_LO(fp->rx_comp_mapping));
4702 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4703 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4704 U64_HI(fp->rx_comp_mapping));
4705 }
4706}
4707
4708static void bnx2x_init_tx_ring(struct bnx2x *bp)
4709{
4710 int i, j;
4711
555f6c78 4712 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4713 struct bnx2x_fastpath *fp = &bp->fp[j];
4714
4715 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
4716 struct eth_tx_next_bd *tx_next_bd =
4717 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 4718
ca00392c 4719 tx_next_bd->addr_hi =
a2fbb9ea 4720 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4721 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 4722 tx_next_bd->addr_lo =
a2fbb9ea 4723 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4724 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4725 }
4726
ca00392c
EG
4727 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
4728 fp->tx_db.data.zero_fill1 = 0;
4729 fp->tx_db.data.prod = 0;
4730
a2fbb9ea
ET
4731 fp->tx_pkt_prod = 0;
4732 fp->tx_pkt_cons = 0;
4733 fp->tx_bd_prod = 0;
4734 fp->tx_bd_cons = 0;
4735 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4736 fp->tx_pkt = 0;
4737 }
4738}
4739
4740static void bnx2x_init_sp_ring(struct bnx2x *bp)
4741{
34f80b04 4742 int func = BP_FUNC(bp);
a2fbb9ea
ET
4743
4744 spin_lock_init(&bp->spq_lock);
4745
4746 bp->spq_left = MAX_SPQ_PENDING;
4747 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4748 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4749 bp->spq_prod_bd = bp->spq;
4750 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4751
34f80b04 4752 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4753 U64_LO(bp->spq_mapping));
34f80b04
EG
4754 REG_WR(bp,
4755 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4756 U64_HI(bp->spq_mapping));
4757
34f80b04 4758 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4759 bp->spq_prod_idx);
4760}
4761
4762static void bnx2x_init_context(struct bnx2x *bp)
4763{
4764 int i;
4765
ca00392c 4766 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
4767 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4768 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4769 u8 cl_id = fp->cl_id;
a2fbb9ea 4770
34f80b04
EG
4771 context->ustorm_st_context.common.sb_index_numbers =
4772 BNX2X_RX_SB_INDEX_NUM;
0626b899 4773 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 4774 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 4775 context->ustorm_st_context.common.flags =
de832a55
EG
4776 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4777 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4778 context->ustorm_st_context.common.statistics_counter_id =
4779 cl_id;
8d9c5f34 4780 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4781 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4782 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4783 bp->rx_buf_size;
34f80b04 4784 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4785 U64_HI(fp->rx_desc_mapping);
34f80b04 4786 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4787 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4788 if (!fp->disable_tpa) {
4789 context->ustorm_st_context.common.flags |=
ca00392c 4790 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 4791 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4792 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4793 (u32)0xffff);
7a9b2557
VZ
4794 context->ustorm_st_context.common.sge_page_base_hi =
4795 U64_HI(fp->rx_sge_mapping);
4796 context->ustorm_st_context.common.sge_page_base_lo =
4797 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
4798
4799 context->ustorm_st_context.common.max_sges_for_packet =
4800 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
4801 context->ustorm_st_context.common.max_sges_for_packet =
4802 ((context->ustorm_st_context.common.
4803 max_sges_for_packet + PAGES_PER_SGE - 1) &
4804 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
4805 }
4806
8d9c5f34
EG
4807 context->ustorm_ag_context.cdu_usage =
4808 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4809 CDU_REGION_NUMBER_UCM_AG,
4810 ETH_CONNECTION_TYPE);
4811
ca00392c
EG
4812 context->xstorm_ag_context.cdu_reserved =
4813 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4814 CDU_REGION_NUMBER_XCM_AG,
4815 ETH_CONNECTION_TYPE);
4816 }
4817
4818 for_each_tx_queue(bp, i) {
4819 struct bnx2x_fastpath *fp = &bp->fp[i];
4820 struct eth_context *context =
4821 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
4822
4823 context->cstorm_st_context.sb_index_number =
4824 C_SB_ETH_TX_CQ_INDEX;
4825 context->cstorm_st_context.status_block_id = fp->sb_id;
4826
8d9c5f34
EG
4827 context->xstorm_st_context.tx_bd_page_base_hi =
4828 U64_HI(fp->tx_desc_mapping);
4829 context->xstorm_st_context.tx_bd_page_base_lo =
4830 U64_LO(fp->tx_desc_mapping);
ca00392c 4831 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 4832 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
4833 }
4834}
4835
4836static void bnx2x_init_ind_table(struct bnx2x *bp)
4837{
26c8fa4d 4838 int func = BP_FUNC(bp);
a2fbb9ea
ET
4839 int i;
4840
555f6c78 4841 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4842 return;
4843
555f6c78
EG
4844 DP(NETIF_MSG_IFUP,
4845 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4846 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4847 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4848 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4849 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4850}
4851
49d66772
ET
4852static void bnx2x_set_client_config(struct bnx2x *bp)
4853{
49d66772 4854 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4855 int port = BP_PORT(bp);
4856 int i;
49d66772 4857
e7799c5f 4858 tstorm_client.mtu = bp->dev->mtu;
49d66772 4859 tstorm_client.config_flags =
de832a55
EG
4860 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4861 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4862#ifdef BCM_VLAN
0c6671b0 4863 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4864 tstorm_client.config_flags |=
8d9c5f34 4865 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4866 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4867 }
4868#endif
49d66772
ET
4869
4870 for_each_queue(bp, i) {
de832a55
EG
4871 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4872
49d66772 4873 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4874 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4875 ((u32 *)&tstorm_client)[0]);
4876 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4877 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4878 ((u32 *)&tstorm_client)[1]);
4879 }
4880
34f80b04
EG
4881 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4882 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4883}
4884
a2fbb9ea
ET
4885static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4886{
a2fbb9ea 4887 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4888 int mode = bp->rx_mode;
4889 int mask = (1 << BP_L_ID(bp));
4890 int func = BP_FUNC(bp);
581ce43d 4891 int port = BP_PORT(bp);
a2fbb9ea 4892 int i;
581ce43d
EG
4893 /* All but management unicast packets should pass to the host as well */
4894 u32 llh_mask =
4895 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4896 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4897 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4898 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4899
3196a88a 4900 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4901
4902 switch (mode) {
4903 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4904 tstorm_mac_filter.ucast_drop_all = mask;
4905 tstorm_mac_filter.mcast_drop_all = mask;
4906 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4907 break;
356e2385 4908
a2fbb9ea 4909 case BNX2X_RX_MODE_NORMAL:
34f80b04 4910 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4911 break;
356e2385 4912
a2fbb9ea 4913 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4914 tstorm_mac_filter.mcast_accept_all = mask;
4915 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4916 break;
356e2385 4917
a2fbb9ea 4918 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4919 tstorm_mac_filter.ucast_accept_all = mask;
4920 tstorm_mac_filter.mcast_accept_all = mask;
4921 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
4922 /* pass management unicast packets as well */
4923 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4924 break;
356e2385 4925
a2fbb9ea 4926 default:
34f80b04
EG
4927 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4928 break;
a2fbb9ea
ET
4929 }
4930
581ce43d
EG
4931 REG_WR(bp,
4932 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4933 llh_mask);
4934
a2fbb9ea
ET
4935 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4936 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4937 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4938 ((u32 *)&tstorm_mac_filter)[i]);
4939
34f80b04 4940/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4941 ((u32 *)&tstorm_mac_filter)[i]); */
4942 }
a2fbb9ea 4943
49d66772
ET
4944 if (mode != BNX2X_RX_MODE_NONE)
4945 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4946}
4947
471de716
EG
4948static void bnx2x_init_internal_common(struct bnx2x *bp)
4949{
4950 int i;
4951
4952 /* Zero this manually as its initialization is
4953 currently missing in the initTool */
4954 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4955 REG_WR(bp, BAR_USTRORM_INTMEM +
4956 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4957}
4958
4959static void bnx2x_init_internal_port(struct bnx2x *bp)
4960{
4961 int port = BP_PORT(bp);
4962
ca00392c
EG
4963 REG_WR(bp,
4964 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
4965 REG_WR(bp,
4966 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
4967 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4968 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4969}
4970
8a1c38d1
EG
4971/* Calculates the sum of vn_min_rates.
4972 It's needed for further normalizing of the min_rates.
4973 Returns:
4974 sum of vn_min_rates.
4975 or
4976 0 - if all the min_rates are 0.
4977 In the later case fainess algorithm should be deactivated.
4978 If not all min_rates are zero then those that are zeroes will be set to 1.
4979 */
4980static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4981{
4982 int all_zero = 1;
4983 int port = BP_PORT(bp);
4984 int vn;
4985
4986 bp->vn_weight_sum = 0;
4987 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4988 int func = 2*vn + port;
4989 u32 vn_cfg =
4990 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4991 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4992 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4993
4994 /* Skip hidden vns */
4995 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4996 continue;
4997
4998 /* If min rate is zero - set it to 1 */
4999 if (!vn_min_rate)
5000 vn_min_rate = DEF_MIN_RATE;
5001 else
5002 all_zero = 0;
5003
5004 bp->vn_weight_sum += vn_min_rate;
5005 }
5006
5007 /* ... only if all min rates are zeros - disable fairness */
5008 if (all_zero)
5009 bp->vn_weight_sum = 0;
5010}
5011
471de716 5012static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5013{
a2fbb9ea
ET
5014 struct tstorm_eth_function_common_config tstorm_config = {0};
5015 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5016 int port = BP_PORT(bp);
5017 int func = BP_FUNC(bp);
de832a55
EG
5018 int i, j;
5019 u32 offset;
471de716 5020 u16 max_agg_size;
a2fbb9ea
ET
5021
5022 if (is_multi(bp)) {
555f6c78 5023 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5024 tstorm_config.rss_result_mask = MULTI_MASK;
5025 }
ca00392c
EG
5026
5027 /* Enable TPA if needed */
5028 if (bp->flags & TPA_ENABLE_FLAG)
5029 tstorm_config.config_flags |=
5030 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5031
8d9c5f34
EG
5032 if (IS_E1HMF(bp))
5033 tstorm_config.config_flags |=
5034 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5035
34f80b04
EG
5036 tstorm_config.leading_client_id = BP_L_ID(bp);
5037
a2fbb9ea 5038 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5039 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5040 (*(u32 *)&tstorm_config));
5041
c14423fe 5042 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5043 bnx2x_set_storm_rx_mode(bp);
5044
de832a55
EG
5045 for_each_queue(bp, i) {
5046 u8 cl_id = bp->fp[i].cl_id;
5047
5048 /* reset xstorm per client statistics */
5049 offset = BAR_XSTRORM_INTMEM +
5050 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5051 for (j = 0;
5052 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5053 REG_WR(bp, offset + j*4, 0);
5054
5055 /* reset tstorm per client statistics */
5056 offset = BAR_TSTRORM_INTMEM +
5057 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5058 for (j = 0;
5059 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5060 REG_WR(bp, offset + j*4, 0);
5061
5062 /* reset ustorm per client statistics */
5063 offset = BAR_USTRORM_INTMEM +
5064 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5065 for (j = 0;
5066 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5067 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5068 }
5069
5070 /* Init statistics related context */
34f80b04 5071 stats_flags.collect_eth = 1;
a2fbb9ea 5072
66e855f3 5073 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5074 ((u32 *)&stats_flags)[0]);
66e855f3 5075 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5076 ((u32 *)&stats_flags)[1]);
5077
66e855f3 5078 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5079 ((u32 *)&stats_flags)[0]);
66e855f3 5080 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5081 ((u32 *)&stats_flags)[1]);
5082
de832a55
EG
5083 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5084 ((u32 *)&stats_flags)[0]);
5085 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5086 ((u32 *)&stats_flags)[1]);
5087
66e855f3 5088 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5089 ((u32 *)&stats_flags)[0]);
66e855f3 5090 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5091 ((u32 *)&stats_flags)[1]);
5092
66e855f3
YG
5093 REG_WR(bp, BAR_XSTRORM_INTMEM +
5094 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5095 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5096 REG_WR(bp, BAR_XSTRORM_INTMEM +
5097 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5098 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5099
5100 REG_WR(bp, BAR_TSTRORM_INTMEM +
5101 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5102 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5103 REG_WR(bp, BAR_TSTRORM_INTMEM +
5104 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5105 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5106
de832a55
EG
5107 REG_WR(bp, BAR_USTRORM_INTMEM +
5108 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5109 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5110 REG_WR(bp, BAR_USTRORM_INTMEM +
5111 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5112 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5113
34f80b04
EG
5114 if (CHIP_IS_E1H(bp)) {
5115 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5116 IS_E1HMF(bp));
5117 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5118 IS_E1HMF(bp));
5119 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5120 IS_E1HMF(bp));
5121 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5122 IS_E1HMF(bp));
5123
7a9b2557
VZ
5124 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5125 bp->e1hov);
34f80b04
EG
5126 }
5127
4f40f2cb
EG
5128 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5129 max_agg_size =
5130 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5131 SGE_PAGE_SIZE * PAGES_PER_SGE),
5132 (u32)0xffff);
555f6c78 5133 for_each_rx_queue(bp, i) {
7a9b2557 5134 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5135
5136 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5137 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5138 U64_LO(fp->rx_comp_mapping));
5139 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5140 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5141 U64_HI(fp->rx_comp_mapping));
5142
ca00392c
EG
5143 /* Next page */
5144 REG_WR(bp, BAR_USTRORM_INTMEM +
5145 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5146 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5147 REG_WR(bp, BAR_USTRORM_INTMEM +
5148 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5149 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5150
7a9b2557 5151 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5152 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5153 max_agg_size);
5154 }
8a1c38d1 5155
1c06328c
EG
5156 /* dropless flow control */
5157 if (CHIP_IS_E1H(bp)) {
5158 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5159
5160 rx_pause.bd_thr_low = 250;
5161 rx_pause.cqe_thr_low = 250;
5162 rx_pause.cos = 1;
5163 rx_pause.sge_thr_low = 0;
5164 rx_pause.bd_thr_high = 350;
5165 rx_pause.cqe_thr_high = 350;
5166 rx_pause.sge_thr_high = 0;
5167
5168 for_each_rx_queue(bp, i) {
5169 struct bnx2x_fastpath *fp = &bp->fp[i];
5170
5171 if (!fp->disable_tpa) {
5172 rx_pause.sge_thr_low = 150;
5173 rx_pause.sge_thr_high = 250;
5174 }
5175
5176
5177 offset = BAR_USTRORM_INTMEM +
5178 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5179 fp->cl_id);
5180 for (j = 0;
5181 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5182 j++)
5183 REG_WR(bp, offset + j*4,
5184 ((u32 *)&rx_pause)[j]);
5185 }
5186 }
5187
8a1c38d1
EG
5188 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5189
5190 /* Init rate shaping and fairness contexts */
5191 if (IS_E1HMF(bp)) {
5192 int vn;
5193
5194 /* During init there is no active link
5195 Until link is up, set link rate to 10Gbps */
5196 bp->link_vars.line_speed = SPEED_10000;
5197 bnx2x_init_port_minmax(bp);
5198
5199 bnx2x_calc_vn_weight_sum(bp);
5200
5201 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5202 bnx2x_init_vn_minmax(bp, 2*vn + port);
5203
5204 /* Enable rate shaping and fairness */
5205 bp->cmng.flags.cmng_enables =
5206 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5207 if (bp->vn_weight_sum)
5208 bp->cmng.flags.cmng_enables |=
5209 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5210 else
5211 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5212 " fairness will be disabled\n");
5213 } else {
5214 /* rate shaping and fairness are disabled */
5215 DP(NETIF_MSG_IFUP,
5216 "single function mode minmax will be disabled\n");
5217 }
5218
5219
5220 /* Store it to internal memory */
5221 if (bp->port.pmf)
5222 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5223 REG_WR(bp, BAR_XSTRORM_INTMEM +
5224 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5225 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5226}
5227
471de716
EG
5228static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5229{
5230 switch (load_code) {
5231 case FW_MSG_CODE_DRV_LOAD_COMMON:
5232 bnx2x_init_internal_common(bp);
5233 /* no break */
5234
5235 case FW_MSG_CODE_DRV_LOAD_PORT:
5236 bnx2x_init_internal_port(bp);
5237 /* no break */
5238
5239 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5240 bnx2x_init_internal_func(bp);
5241 break;
5242
5243 default:
5244 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5245 break;
5246 }
5247}
5248
5249static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5250{
5251 int i;
5252
5253 for_each_queue(bp, i) {
5254 struct bnx2x_fastpath *fp = &bp->fp[i];
5255
34f80b04 5256 fp->bp = bp;
a2fbb9ea 5257 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5258 fp->index = i;
34f80b04
EG
5259 fp->cl_id = BP_L_ID(bp) + i;
5260 fp->sb_id = fp->cl_id;
ca00392c
EG
5261 /* Suitable Rx and Tx SBs are served by the same client */
5262 if (i >= bp->num_rx_queues)
5263 fp->cl_id -= bp->num_rx_queues;
34f80b04 5264 DP(NETIF_MSG_IFUP,
f5372251
EG
5265 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5266 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5267 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5268 fp->sb_id);
5c862848 5269 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5270 }
5271
16119785
EG
5272 /* ensure status block indices were read */
5273 rmb();
5274
5275
5c862848
EG
5276 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5277 DEF_SB_ID);
5278 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5279 bnx2x_update_coalesce(bp);
5280 bnx2x_init_rx_rings(bp);
5281 bnx2x_init_tx_ring(bp);
5282 bnx2x_init_sp_ring(bp);
5283 bnx2x_init_context(bp);
471de716 5284 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5285 bnx2x_init_ind_table(bp);
0ef00459
EG
5286 bnx2x_stats_init(bp);
5287
5288 /* At this point, we are ready for interrupts */
5289 atomic_set(&bp->intr_sem, 0);
5290
5291 /* flush all before enabling interrupts */
5292 mb();
5293 mmiowb();
5294
615f8fd9 5295 bnx2x_int_enable(bp);
eb8da205
EG
5296
5297 /* Check for SPIO5 */
5298 bnx2x_attn_int_deasserted0(bp,
5299 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5300 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5301}
5302
5303/* end of nic init */
5304
5305/*
5306 * gzip service functions
5307 */
5308
5309static int bnx2x_gunzip_init(struct bnx2x *bp)
5310{
5311 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5312 &bp->gunzip_mapping);
5313 if (bp->gunzip_buf == NULL)
5314 goto gunzip_nomem1;
5315
5316 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5317 if (bp->strm == NULL)
5318 goto gunzip_nomem2;
5319
5320 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5321 GFP_KERNEL);
5322 if (bp->strm->workspace == NULL)
5323 goto gunzip_nomem3;
5324
5325 return 0;
5326
5327gunzip_nomem3:
5328 kfree(bp->strm);
5329 bp->strm = NULL;
5330
5331gunzip_nomem2:
5332 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5333 bp->gunzip_mapping);
5334 bp->gunzip_buf = NULL;
5335
5336gunzip_nomem1:
5337 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5338 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5339 return -ENOMEM;
5340}
5341
5342static void bnx2x_gunzip_end(struct bnx2x *bp)
5343{
5344 kfree(bp->strm->workspace);
5345
5346 kfree(bp->strm);
5347 bp->strm = NULL;
5348
5349 if (bp->gunzip_buf) {
5350 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5351 bp->gunzip_mapping);
5352 bp->gunzip_buf = NULL;
5353 }
5354}
5355
94a78b79 5356static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5357{
5358 int n, rc;
5359
5360 /* check gzip header */
94a78b79
VZ
5361 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5362 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5363 return -EINVAL;
94a78b79 5364 }
a2fbb9ea
ET
5365
5366 n = 10;
5367
34f80b04 5368#define FNAME 0x8
a2fbb9ea
ET
5369
5370 if (zbuf[3] & FNAME)
5371 while ((zbuf[n++] != 0) && (n < len));
5372
94a78b79 5373 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5374 bp->strm->avail_in = len - n;
5375 bp->strm->next_out = bp->gunzip_buf;
5376 bp->strm->avail_out = FW_BUF_SIZE;
5377
5378 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5379 if (rc != Z_OK)
5380 return rc;
5381
5382 rc = zlib_inflate(bp->strm, Z_FINISH);
5383 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5384 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5385 bp->dev->name, bp->strm->msg);
5386
5387 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5388 if (bp->gunzip_outlen & 0x3)
5389 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5390 " gunzip_outlen (%d) not aligned\n",
5391 bp->dev->name, bp->gunzip_outlen);
5392 bp->gunzip_outlen >>= 2;
5393
5394 zlib_inflateEnd(bp->strm);
5395
5396 if (rc == Z_STREAM_END)
5397 return 0;
5398
5399 return rc;
5400}
5401
5402/* nic load/unload */
5403
5404/*
34f80b04 5405 * General service functions
a2fbb9ea
ET
5406 */
5407
5408/* send a NIG loopback debug packet */
5409static void bnx2x_lb_pckt(struct bnx2x *bp)
5410{
a2fbb9ea 5411 u32 wb_write[3];
a2fbb9ea
ET
5412
5413 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5414 wb_write[0] = 0x55555555;
5415 wb_write[1] = 0x55555555;
34f80b04 5416 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5417 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5418
5419 /* NON-IP protocol */
a2fbb9ea
ET
5420 wb_write[0] = 0x09000000;
5421 wb_write[1] = 0x55555555;
34f80b04 5422 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5423 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5424}
5425
5426/* some of the internal memories
5427 * are not directly readable from the driver
5428 * to test them we send debug packets
5429 */
5430static int bnx2x_int_mem_test(struct bnx2x *bp)
5431{
5432 int factor;
5433 int count, i;
5434 u32 val = 0;
5435
ad8d3948 5436 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5437 factor = 120;
ad8d3948
EG
5438 else if (CHIP_REV_IS_EMUL(bp))
5439 factor = 200;
5440 else
a2fbb9ea 5441 factor = 1;
a2fbb9ea
ET
5442
5443 DP(NETIF_MSG_HW, "start part1\n");
5444
5445 /* Disable inputs of parser neighbor blocks */
5446 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5447 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5448 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5449 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5450
5451 /* Write 0 to parser credits for CFC search request */
5452 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5453
5454 /* send Ethernet packet */
5455 bnx2x_lb_pckt(bp);
5456
5457 /* TODO do i reset NIG statistic? */
5458 /* Wait until NIG register shows 1 packet of size 0x10 */
5459 count = 1000 * factor;
5460 while (count) {
34f80b04 5461
a2fbb9ea
ET
5462 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5463 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5464 if (val == 0x10)
5465 break;
5466
5467 msleep(10);
5468 count--;
5469 }
5470 if (val != 0x10) {
5471 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5472 return -1;
5473 }
5474
5475 /* Wait until PRS register shows 1 packet */
5476 count = 1000 * factor;
5477 while (count) {
5478 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5479 if (val == 1)
5480 break;
5481
5482 msleep(10);
5483 count--;
5484 }
5485 if (val != 0x1) {
5486 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5487 return -2;
5488 }
5489
5490 /* Reset and init BRB, PRS */
34f80b04 5491 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5492 msleep(50);
34f80b04 5493 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5494 msleep(50);
94a78b79
VZ
5495 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5496 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5497
5498 DP(NETIF_MSG_HW, "part2\n");
5499
5500 /* Disable inputs of parser neighbor blocks */
5501 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5502 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5503 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5504 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5505
5506 /* Write 0 to parser credits for CFC search request */
5507 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5508
5509 /* send 10 Ethernet packets */
5510 for (i = 0; i < 10; i++)
5511 bnx2x_lb_pckt(bp);
5512
5513 /* Wait until NIG register shows 10 + 1
5514 packets of size 11*0x10 = 0xb0 */
5515 count = 1000 * factor;
5516 while (count) {
34f80b04 5517
a2fbb9ea
ET
5518 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5519 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5520 if (val == 0xb0)
5521 break;
5522
5523 msleep(10);
5524 count--;
5525 }
5526 if (val != 0xb0) {
5527 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5528 return -3;
5529 }
5530
5531 /* Wait until PRS register shows 2 packets */
5532 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5533 if (val != 2)
5534 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5535
5536 /* Write 1 to parser credits for CFC search request */
5537 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5538
5539 /* Wait until PRS register shows 3 packets */
5540 msleep(10 * factor);
5541 /* Wait until NIG register shows 1 packet of size 0x10 */
5542 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5543 if (val != 3)
5544 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5545
5546 /* clear NIG EOP FIFO */
5547 for (i = 0; i < 11; i++)
5548 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5549 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5550 if (val != 1) {
5551 BNX2X_ERR("clear of NIG failed\n");
5552 return -4;
5553 }
5554
5555 /* Reset and init BRB, PRS, NIG */
5556 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5557 msleep(50);
5558 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5559 msleep(50);
94a78b79
VZ
5560 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5561 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5562#ifndef BCM_ISCSI
5563 /* set NIC mode */
5564 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5565#endif
5566
5567 /* Enable inputs of parser neighbor blocks */
5568 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5569 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5570 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5571 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5572
5573 DP(NETIF_MSG_HW, "done\n");
5574
5575 return 0; /* OK */
5576}
5577
5578static void enable_blocks_attention(struct bnx2x *bp)
5579{
5580 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5581 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5582 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5583 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5584 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5585 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5586 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5587 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5588 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5589/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5590/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5591 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5592 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5593 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5594/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5595/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5596 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5597 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5598 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5599 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5600/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5601/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5602 if (CHIP_REV_IS_FPGA(bp))
5603 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5604 else
5605 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5606 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5607 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5608 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5609/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5610/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5611 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5612 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5613/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5614 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5615}
5616
34f80b04 5617
81f75bbf
EG
5618static void bnx2x_reset_common(struct bnx2x *bp)
5619{
5620 /* reset_common */
5621 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5622 0xd3ffff7f);
5623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5624}
5625
fd4ef40d
EG
5626
5627static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5628{
5629 u32 val;
5630 u8 port;
5631 u8 is_required = 0;
5632
5633 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5634 SHARED_HW_CFG_FAN_FAILURE_MASK;
5635
5636 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5637 is_required = 1;
5638
5639 /*
5640 * The fan failure mechanism is usually related to the PHY type since
5641 * the power consumption of the board is affected by the PHY. Currently,
5642 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5643 */
5644 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5645 for (port = PORT_0; port < PORT_MAX; port++) {
5646 u32 phy_type =
5647 SHMEM_RD(bp, dev_info.port_hw_config[port].
5648 external_phy_config) &
5649 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5650 is_required |=
5651 ((phy_type ==
5652 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5653 (phy_type ==
5654 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5655 (phy_type ==
5656 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5657 }
5658
5659 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5660
5661 if (is_required == 0)
5662 return;
5663
5664 /* Fan failure is indicated by SPIO 5 */
5665 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5666 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5667
5668 /* set to active low mode */
5669 val = REG_RD(bp, MISC_REG_SPIO_INT);
5670 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5671 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5672 REG_WR(bp, MISC_REG_SPIO_INT, val);
5673
5674 /* enable interrupt to signal the IGU */
5675 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5676 val |= (1 << MISC_REGISTERS_SPIO_5);
5677 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5678}
5679
34f80b04 5680static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5681{
a2fbb9ea 5682 u32 val, i;
a2fbb9ea 5683
34f80b04 5684 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5685
81f75bbf 5686 bnx2x_reset_common(bp);
34f80b04
EG
5687 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5688 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5689
94a78b79 5690 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5691 if (CHIP_IS_E1H(bp))
5692 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5693
34f80b04
EG
5694 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5695 msleep(30);
5696 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5697
94a78b79 5698 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5699 if (CHIP_IS_E1(bp)) {
5700 /* enable HW interrupt from PXP on USDM overflow
5701 bit 16 on INT_MASK_0 */
5702 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5703 }
a2fbb9ea 5704
94a78b79 5705 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5706 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5707
5708#ifdef __BIG_ENDIAN
34f80b04
EG
5709 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5710 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5711 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5712 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5713 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5714 /* make sure this value is 0 */
5715 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5716
5717/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5718 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5719 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5720 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5721 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5722#endif
5723
34f80b04 5724 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5725#ifdef BCM_ISCSI
34f80b04
EG
5726 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5727 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5728 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5729#endif
5730
34f80b04
EG
5731 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5732 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5733
34f80b04
EG
5734 /* let the HW do it's magic ... */
5735 msleep(100);
5736 /* finish PXP init */
5737 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5738 if (val != 1) {
5739 BNX2X_ERR("PXP2 CFG failed\n");
5740 return -EBUSY;
5741 }
5742 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5743 if (val != 1) {
5744 BNX2X_ERR("PXP2 RD_INIT failed\n");
5745 return -EBUSY;
5746 }
a2fbb9ea 5747
34f80b04
EG
5748 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5749 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5750
94a78b79 5751 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5752
34f80b04
EG
5753 /* clean the DMAE memory */
5754 bp->dmae_ready = 1;
5755 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5756
94a78b79
VZ
5757 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5758 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5759 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5760 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5761
34f80b04
EG
5762 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5763 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5764 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5765 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5766
94a78b79 5767 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
5768 /* soft reset pulse */
5769 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5770 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5771
5772#ifdef BCM_ISCSI
94a78b79 5773 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5774#endif
a2fbb9ea 5775
94a78b79 5776 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5777 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5778 if (!CHIP_REV_IS_SLOW(bp)) {
5779 /* enable hw interrupt from doorbell Q */
5780 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5781 }
a2fbb9ea 5782
94a78b79
VZ
5783 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5784 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5785 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5786 /* set NIC mode */
5787 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5788 if (CHIP_IS_E1H(bp))
5789 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5790
94a78b79
VZ
5791 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5792 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5793 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5794 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5795
ca00392c
EG
5796 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5797 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5798 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5799 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5800
94a78b79
VZ
5801 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5802 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5803 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5804 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5805
34f80b04
EG
5806 /* sync semi rtc */
5807 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5808 0x80000000);
5809 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5810 0x80000000);
a2fbb9ea 5811
94a78b79
VZ
5812 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5813 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5814 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5815
34f80b04
EG
5816 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5817 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5818 REG_WR(bp, i, 0xc0cac01a);
5819 /* TODO: replace with something meaningful */
5820 }
94a78b79 5821 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 5822 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5823
34f80b04
EG
5824 if (sizeof(union cdu_context) != 1024)
5825 /* we currently assume that a context is 1024 bytes */
5826 printk(KERN_ALERT PFX "please adjust the size of"
5827 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5828
94a78b79 5829 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5830 val = (4 << 24) + (0 << 12) + 1024;
5831 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5832
94a78b79 5833 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5834 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5835 /* enable context validation interrupt from CFC */
5836 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5837
5838 /* set the thresholds to prevent CFC/CDU race */
5839 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5840
94a78b79
VZ
5841 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5842 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5843
94a78b79 5844 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5845 /* Reset PCIE errors for debug */
5846 REG_WR(bp, 0x2814, 0xffffffff);
5847 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5848
94a78b79 5849 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5850 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5851 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5852 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5853
94a78b79 5854 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
5855 if (CHIP_IS_E1H(bp)) {
5856 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5857 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5858 }
5859
5860 if (CHIP_REV_IS_SLOW(bp))
5861 msleep(200);
5862
5863 /* finish CFC init */
5864 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5865 if (val != 1) {
5866 BNX2X_ERR("CFC LL_INIT failed\n");
5867 return -EBUSY;
5868 }
5869 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5870 if (val != 1) {
5871 BNX2X_ERR("CFC AC_INIT failed\n");
5872 return -EBUSY;
5873 }
5874 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5875 if (val != 1) {
5876 BNX2X_ERR("CFC CAM_INIT failed\n");
5877 return -EBUSY;
5878 }
5879 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5880
34f80b04
EG
5881 /* read NIG statistic
5882 to see if this is our first up since powerup */
5883 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5884 val = *bnx2x_sp(bp, wb_data[0]);
5885
5886 /* do internal memory self test */
5887 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5888 BNX2X_ERR("internal mem self test failed\n");
5889 return -EBUSY;
5890 }
5891
35b19ba5 5892 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5893 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5894 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5895 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 5896 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
5897 bp->port.need_hw_lock = 1;
5898 break;
5899
34f80b04
EG
5900 default:
5901 break;
5902 }
f1410647 5903
fd4ef40d
EG
5904 bnx2x_setup_fan_failure_detection(bp);
5905
34f80b04
EG
5906 /* clear PXP2 attentions */
5907 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5908
34f80b04 5909 enable_blocks_attention(bp);
a2fbb9ea 5910
6bbca910
YR
5911 if (!BP_NOMCP(bp)) {
5912 bnx2x_acquire_phy_lock(bp);
5913 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5914 bnx2x_release_phy_lock(bp);
5915 } else
5916 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5917
34f80b04
EG
5918 return 0;
5919}
a2fbb9ea 5920
34f80b04
EG
5921static int bnx2x_init_port(struct bnx2x *bp)
5922{
5923 int port = BP_PORT(bp);
94a78b79 5924 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5925 u32 low, high;
34f80b04 5926 u32 val;
a2fbb9ea 5927
34f80b04
EG
5928 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5929
5930 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5931
94a78b79 5932 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5933 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
5934
5935 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5936 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5937 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
5938#ifdef BCM_ISCSI
5939 /* Port0 1
5940 * Port1 385 */
5941 i++;
5942 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5943 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5944 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5945 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5946
5947 /* Port0 2
5948 * Port1 386 */
5949 i++;
5950 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5951 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5952 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5953 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5954
5955 /* Port0 3
5956 * Port1 387 */
5957 i++;
5958 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5959 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5960 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5961 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5962#endif
94a78b79 5963 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5964
a2fbb9ea
ET
5965#ifdef BCM_ISCSI
5966 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5967 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5968
94a78b79 5969 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 5970#endif
94a78b79 5971 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5972
94a78b79 5973 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
5974 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5975 /* no pause for emulation and FPGA */
5976 low = 0;
5977 high = 513;
5978 } else {
5979 if (IS_E1HMF(bp))
5980 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5981 else if (bp->dev->mtu > 4096) {
5982 if (bp->flags & ONE_PORT_FLAG)
5983 low = 160;
5984 else {
5985 val = bp->dev->mtu;
5986 /* (24*1024 + val*4)/256 */
5987 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5988 }
5989 } else
5990 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5991 high = low + 56; /* 14*1024/256 */
5992 }
5993 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5994 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5995
5996
94a78b79 5997 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5998
94a78b79 5999 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6000 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6001 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6002 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6003
94a78b79
VZ
6004 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6005 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6006 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6007 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6008
94a78b79 6009 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6010 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6011
94a78b79 6012 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6013
6014 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6015 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6016
6017 /* update threshold */
34f80b04 6018 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6019 /* update init credit */
34f80b04 6020 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6021
6022 /* probe changes */
34f80b04 6023 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6024 msleep(5);
34f80b04 6025 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6026
6027#ifdef BCM_ISCSI
6028 /* tell the searcher where the T2 table is */
6029 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6030
6031 wb_write[0] = U64_LO(bp->t2_mapping);
6032 wb_write[1] = U64_HI(bp->t2_mapping);
6033 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6034 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6035 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6036 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6037
6038 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6039#endif
94a78b79 6040 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6041 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6042
6043 if (CHIP_IS_E1(bp)) {
6044 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6045 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6046 }
94a78b79 6047 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6048
94a78b79 6049 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6050 /* init aeu_mask_attn_func_0/1:
6051 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6052 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6053 * bits 4-7 are used for "per vn group attention" */
6054 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6055 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6056
94a78b79 6057 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6058 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6059 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6060 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6061 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6062
94a78b79 6063 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6064
6065 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6066
6067 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6068 /* 0x2 disable e1hov, 0x1 enable */
6069 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6070 (IS_E1HMF(bp) ? 0x1 : 0x2));
6071
1c06328c
EG
6072 /* support pause requests from USDM, TSDM and BRB */
6073 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6074
6075 {
6076 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6077 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6078 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6079 }
34f80b04
EG
6080 }
6081
94a78b79 6082 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6083 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6084
35b19ba5 6085 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6086 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6087 {
6088 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6089
6090 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6091 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6092
6093 /* The GPIO should be swapped if the swap register is
6094 set and active */
6095 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6096 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6097
6098 /* Select function upon port-swap configuration */
6099 if (port == 0) {
6100 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6101 aeu_gpio_mask = (swap_val && swap_override) ?
6102 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6103 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6104 } else {
6105 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6106 aeu_gpio_mask = (swap_val && swap_override) ?
6107 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6108 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6109 }
6110 val = REG_RD(bp, offset);
6111 /* add GPIO3 to group */
6112 val |= aeu_gpio_mask;
6113 REG_WR(bp, offset, val);
6114 }
6115 break;
6116
35b19ba5 6117 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6118 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6119 /* add SPIO 5 to group 0 */
4d295db0
EG
6120 {
6121 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6122 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6123 val = REG_RD(bp, reg_addr);
f1410647 6124 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6125 REG_WR(bp, reg_addr, val);
6126 }
f1410647
ET
6127 break;
6128
6129 default:
6130 break;
6131 }
6132
c18487ee 6133 bnx2x__link_reset(bp);
a2fbb9ea 6134
34f80b04
EG
6135 return 0;
6136}
6137
6138#define ILT_PER_FUNC (768/2)
6139#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6140/* the phys address is shifted right 12 bits and has an added
6141 1=valid bit added to the 53rd bit
6142 then since this is a wide register(TM)
6143 we split it into two 32 bit writes
6144 */
6145#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6146#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6147#define PXP_ONE_ILT(x) (((x) << 10) | x)
6148#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6149
6150#define CNIC_ILT_LINES 0
6151
6152static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6153{
6154 int reg;
6155
6156 if (CHIP_IS_E1H(bp))
6157 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6158 else /* E1 */
6159 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6160
6161 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6162}
6163
6164static int bnx2x_init_func(struct bnx2x *bp)
6165{
6166 int port = BP_PORT(bp);
6167 int func = BP_FUNC(bp);
8badd27a 6168 u32 addr, val;
34f80b04
EG
6169 int i;
6170
6171 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6172
8badd27a
EG
6173 /* set MSI reconfigure capability */
6174 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6175 val = REG_RD(bp, addr);
6176 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6177 REG_WR(bp, addr, val);
6178
34f80b04
EG
6179 i = FUNC_ILT_BASE(func);
6180
6181 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6182 if (CHIP_IS_E1H(bp)) {
6183 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6184 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6185 } else /* E1 */
6186 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6187 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6188
6189
6190 if (CHIP_IS_E1H(bp)) {
6191 for (i = 0; i < 9; i++)
6192 bnx2x_init_block(bp,
94a78b79 6193 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6194
6195 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6196 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6197 }
6198
6199 /* HC init per function */
6200 if (CHIP_IS_E1H(bp)) {
6201 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6202
6203 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6204 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6205 }
94a78b79 6206 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6207
c14423fe 6208 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6209 REG_WR(bp, 0x2114, 0xffffffff);
6210 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6211
34f80b04
EG
6212 return 0;
6213}
6214
6215static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6216{
6217 int i, rc = 0;
a2fbb9ea 6218
34f80b04
EG
6219 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6220 BP_FUNC(bp), load_code);
a2fbb9ea 6221
34f80b04
EG
6222 bp->dmae_ready = 0;
6223 mutex_init(&bp->dmae_mutex);
6224 bnx2x_gunzip_init(bp);
a2fbb9ea 6225
34f80b04
EG
6226 switch (load_code) {
6227 case FW_MSG_CODE_DRV_LOAD_COMMON:
6228 rc = bnx2x_init_common(bp);
6229 if (rc)
6230 goto init_hw_err;
6231 /* no break */
6232
6233 case FW_MSG_CODE_DRV_LOAD_PORT:
6234 bp->dmae_ready = 1;
6235 rc = bnx2x_init_port(bp);
6236 if (rc)
6237 goto init_hw_err;
6238 /* no break */
6239
6240 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6241 bp->dmae_ready = 1;
6242 rc = bnx2x_init_func(bp);
6243 if (rc)
6244 goto init_hw_err;
6245 break;
6246
6247 default:
6248 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6249 break;
6250 }
6251
6252 if (!BP_NOMCP(bp)) {
6253 int func = BP_FUNC(bp);
a2fbb9ea
ET
6254
6255 bp->fw_drv_pulse_wr_seq =
34f80b04 6256 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6257 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6258 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6259 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6260 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6261 } else
6262 bp->func_stx = 0;
a2fbb9ea 6263
34f80b04
EG
6264 /* this needs to be done before gunzip end */
6265 bnx2x_zero_def_sb(bp);
6266 for_each_queue(bp, i)
6267 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6268
6269init_hw_err:
6270 bnx2x_gunzip_end(bp);
6271
6272 return rc;
a2fbb9ea
ET
6273}
6274
c14423fe 6275/* send the MCP a request, block until there is a reply */
4d295db0 6276u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
a2fbb9ea 6277{
34f80b04 6278 int func = BP_FUNC(bp);
f1410647
ET
6279 u32 seq = ++bp->fw_seq;
6280 u32 rc = 0;
19680c48
EG
6281 u32 cnt = 1;
6282 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6283
34f80b04 6284 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6285 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6286
19680c48
EG
6287 do {
6288 /* let the FW do it's magic ... */
6289 msleep(delay);
a2fbb9ea 6290
19680c48 6291 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6292
19680c48
EG
6293 /* Give the FW up to 2 second (200*10ms) */
6294 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6295
6296 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6297 cnt*delay, rc, seq);
a2fbb9ea
ET
6298
6299 /* is this a reply to our command? */
6300 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6301 rc &= FW_MSG_CODE_MASK;
f1410647 6302
a2fbb9ea
ET
6303 } else {
6304 /* FW BUG! */
6305 BNX2X_ERR("FW failed to respond!\n");
6306 bnx2x_fw_dump(bp);
6307 rc = 0;
6308 }
f1410647 6309
a2fbb9ea
ET
6310 return rc;
6311}
6312
6313static void bnx2x_free_mem(struct bnx2x *bp)
6314{
6315
6316#define BNX2X_PCI_FREE(x, y, size) \
6317 do { \
6318 if (x) { \
6319 pci_free_consistent(bp->pdev, size, x, y); \
6320 x = NULL; \
6321 y = 0; \
6322 } \
6323 } while (0)
6324
6325#define BNX2X_FREE(x) \
6326 do { \
6327 if (x) { \
6328 vfree(x); \
6329 x = NULL; \
6330 } \
6331 } while (0)
6332
6333 int i;
6334
6335 /* fastpath */
555f6c78 6336 /* Common */
a2fbb9ea
ET
6337 for_each_queue(bp, i) {
6338
555f6c78 6339 /* status blocks */
a2fbb9ea
ET
6340 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6341 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6342 sizeof(struct host_status_block));
555f6c78
EG
6343 }
6344 /* Rx */
6345 for_each_rx_queue(bp, i) {
a2fbb9ea 6346
555f6c78 6347 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6348 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6349 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6350 bnx2x_fp(bp, i, rx_desc_mapping),
6351 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6352
6353 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6354 bnx2x_fp(bp, i, rx_comp_mapping),
6355 sizeof(struct eth_fast_path_rx_cqe) *
6356 NUM_RCQ_BD);
a2fbb9ea 6357
7a9b2557 6358 /* SGE ring */
32626230 6359 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6360 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6361 bnx2x_fp(bp, i, rx_sge_mapping),
6362 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6363 }
555f6c78
EG
6364 /* Tx */
6365 for_each_tx_queue(bp, i) {
6366
6367 /* fastpath tx rings: tx_buf tx_desc */
6368 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6369 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6370 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6371 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6372 }
a2fbb9ea
ET
6373 /* end of fastpath */
6374
6375 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6376 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6377
6378 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6379 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6380
6381#ifdef BCM_ISCSI
6382 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6383 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6384 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6385 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6386#endif
7a9b2557 6387 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6388
6389#undef BNX2X_PCI_FREE
6390#undef BNX2X_KFREE
6391}
6392
6393static int bnx2x_alloc_mem(struct bnx2x *bp)
6394{
6395
6396#define BNX2X_PCI_ALLOC(x, y, size) \
6397 do { \
6398 x = pci_alloc_consistent(bp->pdev, size, y); \
6399 if (x == NULL) \
6400 goto alloc_mem_err; \
6401 memset(x, 0, size); \
6402 } while (0)
6403
6404#define BNX2X_ALLOC(x, size) \
6405 do { \
6406 x = vmalloc(size); \
6407 if (x == NULL) \
6408 goto alloc_mem_err; \
6409 memset(x, 0, size); \
6410 } while (0)
6411
6412 int i;
6413
6414 /* fastpath */
555f6c78 6415 /* Common */
a2fbb9ea
ET
6416 for_each_queue(bp, i) {
6417 bnx2x_fp(bp, i, bp) = bp;
6418
555f6c78 6419 /* status blocks */
a2fbb9ea
ET
6420 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6421 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6422 sizeof(struct host_status_block));
555f6c78
EG
6423 }
6424 /* Rx */
6425 for_each_rx_queue(bp, i) {
a2fbb9ea 6426
555f6c78 6427 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6428 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6429 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6430 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6431 &bnx2x_fp(bp, i, rx_desc_mapping),
6432 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6433
6434 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6435 &bnx2x_fp(bp, i, rx_comp_mapping),
6436 sizeof(struct eth_fast_path_rx_cqe) *
6437 NUM_RCQ_BD);
6438
7a9b2557
VZ
6439 /* SGE ring */
6440 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6441 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6442 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6443 &bnx2x_fp(bp, i, rx_sge_mapping),
6444 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6445 }
555f6c78
EG
6446 /* Tx */
6447 for_each_tx_queue(bp, i) {
6448
555f6c78
EG
6449 /* fastpath tx rings: tx_buf tx_desc */
6450 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6451 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6452 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6453 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6454 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6455 }
a2fbb9ea
ET
6456 /* end of fastpath */
6457
6458 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6459 sizeof(struct host_def_status_block));
6460
6461 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6462 sizeof(struct bnx2x_slowpath));
6463
6464#ifdef BCM_ISCSI
6465 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6466
6467 /* Initialize T1 */
6468 for (i = 0; i < 64*1024; i += 64) {
6469 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6470 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6471 }
6472
6473 /* allocate searcher T2 table
6474 we allocate 1/4 of alloc num for T2
6475 (which is not entered into the ILT) */
6476 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6477
6478 /* Initialize T2 */
6479 for (i = 0; i < 16*1024; i += 64)
6480 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6481
c14423fe 6482 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6483 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6484
6485 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6486 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6487
6488 /* QM queues (128*MAX_CONN) */
6489 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6490#endif
6491
6492 /* Slow path ring */
6493 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6494
6495 return 0;
6496
6497alloc_mem_err:
6498 bnx2x_free_mem(bp);
6499 return -ENOMEM;
6500
6501#undef BNX2X_PCI_ALLOC
6502#undef BNX2X_ALLOC
6503}
6504
6505static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6506{
6507 int i;
6508
555f6c78 6509 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6510 struct bnx2x_fastpath *fp = &bp->fp[i];
6511
6512 u16 bd_cons = fp->tx_bd_cons;
6513 u16 sw_prod = fp->tx_pkt_prod;
6514 u16 sw_cons = fp->tx_pkt_cons;
6515
a2fbb9ea
ET
6516 while (sw_cons != sw_prod) {
6517 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6518 sw_cons++;
6519 }
6520 }
6521}
6522
6523static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6524{
6525 int i, j;
6526
555f6c78 6527 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6528 struct bnx2x_fastpath *fp = &bp->fp[j];
6529
a2fbb9ea
ET
6530 for (i = 0; i < NUM_RX_BD; i++) {
6531 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6532 struct sk_buff *skb = rx_buf->skb;
6533
6534 if (skb == NULL)
6535 continue;
6536
6537 pci_unmap_single(bp->pdev,
6538 pci_unmap_addr(rx_buf, mapping),
356e2385 6539 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6540
6541 rx_buf->skb = NULL;
6542 dev_kfree_skb(skb);
6543 }
7a9b2557 6544 if (!fp->disable_tpa)
32626230
EG
6545 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6546 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6547 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6548 }
6549}
6550
6551static void bnx2x_free_skbs(struct bnx2x *bp)
6552{
6553 bnx2x_free_tx_skbs(bp);
6554 bnx2x_free_rx_skbs(bp);
6555}
6556
6557static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6558{
34f80b04 6559 int i, offset = 1;
a2fbb9ea
ET
6560
6561 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6562 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6563 bp->msix_table[0].vector);
6564
6565 for_each_queue(bp, i) {
c14423fe 6566 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6567 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6568 bnx2x_fp(bp, i, state));
6569
34f80b04 6570 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6571 }
a2fbb9ea
ET
6572}
6573
6574static void bnx2x_free_irq(struct bnx2x *bp)
6575{
a2fbb9ea 6576 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6577 bnx2x_free_msix_irqs(bp);
6578 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6579 bp->flags &= ~USING_MSIX_FLAG;
6580
8badd27a
EG
6581 } else if (bp->flags & USING_MSI_FLAG) {
6582 free_irq(bp->pdev->irq, bp->dev);
6583 pci_disable_msi(bp->pdev);
6584 bp->flags &= ~USING_MSI_FLAG;
6585
a2fbb9ea
ET
6586 } else
6587 free_irq(bp->pdev->irq, bp->dev);
6588}
6589
6590static int bnx2x_enable_msix(struct bnx2x *bp)
6591{
8badd27a
EG
6592 int i, rc, offset = 1;
6593 int igu_vec = 0;
a2fbb9ea 6594
8badd27a
EG
6595 bp->msix_table[0].entry = igu_vec;
6596 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6597
34f80b04 6598 for_each_queue(bp, i) {
8badd27a 6599 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6600 bp->msix_table[i + offset].entry = igu_vec;
6601 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6602 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6603 }
6604
34f80b04 6605 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6606 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6607 if (rc) {
8badd27a
EG
6608 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6609 return rc;
34f80b04 6610 }
8badd27a 6611
a2fbb9ea
ET
6612 bp->flags |= USING_MSIX_FLAG;
6613
6614 return 0;
a2fbb9ea
ET
6615}
6616
a2fbb9ea
ET
6617static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6618{
34f80b04 6619 int i, rc, offset = 1;
a2fbb9ea 6620
a2fbb9ea
ET
6621 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6622 bp->dev->name, bp->dev);
a2fbb9ea
ET
6623 if (rc) {
6624 BNX2X_ERR("request sp irq failed\n");
6625 return -EBUSY;
6626 }
6627
6628 for_each_queue(bp, i) {
555f6c78
EG
6629 struct bnx2x_fastpath *fp = &bp->fp[i];
6630
ca00392c
EG
6631 if (i < bp->num_rx_queues)
6632 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6633 else
6634 sprintf(fp->name, "%s-tx-%d",
6635 bp->dev->name, i - bp->num_rx_queues);
6636
34f80b04 6637 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6638 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6639 if (rc) {
555f6c78 6640 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6641 bnx2x_free_msix_irqs(bp);
6642 return -EBUSY;
6643 }
6644
555f6c78 6645 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6646 }
6647
555f6c78 6648 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6649 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6650 " ... fp[%d] %d\n",
6651 bp->dev->name, bp->msix_table[0].vector,
6652 0, bp->msix_table[offset].vector,
6653 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6654
a2fbb9ea 6655 return 0;
a2fbb9ea
ET
6656}
6657
8badd27a
EG
6658static int bnx2x_enable_msi(struct bnx2x *bp)
6659{
6660 int rc;
6661
6662 rc = pci_enable_msi(bp->pdev);
6663 if (rc) {
6664 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6665 return -1;
6666 }
6667 bp->flags |= USING_MSI_FLAG;
6668
6669 return 0;
6670}
6671
a2fbb9ea
ET
6672static int bnx2x_req_irq(struct bnx2x *bp)
6673{
8badd27a 6674 unsigned long flags;
34f80b04 6675 int rc;
a2fbb9ea 6676
8badd27a
EG
6677 if (bp->flags & USING_MSI_FLAG)
6678 flags = 0;
6679 else
6680 flags = IRQF_SHARED;
6681
6682 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6683 bp->dev->name, bp->dev);
a2fbb9ea
ET
6684 if (!rc)
6685 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6686
6687 return rc;
a2fbb9ea
ET
6688}
6689
65abd74d
YG
6690static void bnx2x_napi_enable(struct bnx2x *bp)
6691{
6692 int i;
6693
555f6c78 6694 for_each_rx_queue(bp, i)
65abd74d
YG
6695 napi_enable(&bnx2x_fp(bp, i, napi));
6696}
6697
6698static void bnx2x_napi_disable(struct bnx2x *bp)
6699{
6700 int i;
6701
555f6c78 6702 for_each_rx_queue(bp, i)
65abd74d
YG
6703 napi_disable(&bnx2x_fp(bp, i, napi));
6704}
6705
6706static void bnx2x_netif_start(struct bnx2x *bp)
6707{
e1510706
EG
6708 int intr_sem;
6709
6710 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6711 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6712
6713 if (intr_sem) {
65abd74d 6714 if (netif_running(bp->dev)) {
65abd74d
YG
6715 bnx2x_napi_enable(bp);
6716 bnx2x_int_enable(bp);
555f6c78
EG
6717 if (bp->state == BNX2X_STATE_OPEN)
6718 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6719 }
6720 }
6721}
6722
f8ef6e44 6723static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6724{
f8ef6e44 6725 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6726 bnx2x_napi_disable(bp);
762d5f6c
EG
6727 netif_tx_disable(bp->dev);
6728 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6729}
6730
a2fbb9ea
ET
6731/*
6732 * Init service functions
6733 */
6734
3101c2bc 6735static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6736{
6737 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6738 int port = BP_PORT(bp);
a2fbb9ea
ET
6739
6740 /* CAM allocation
6741 * unicasts 0-31:port0 32-63:port1
6742 * multicast 64-127:port0 128-191:port1
6743 */
8d9c5f34 6744 config->hdr.length = 2;
af246401 6745 config->hdr.offset = port ? 32 : 0;
0626b899 6746 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6747 config->hdr.reserved1 = 0;
6748
6749 /* primary MAC */
6750 config->config_table[0].cam_entry.msb_mac_addr =
6751 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6752 config->config_table[0].cam_entry.middle_mac_addr =
6753 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6754 config->config_table[0].cam_entry.lsb_mac_addr =
6755 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6756 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6757 if (set)
6758 config->config_table[0].target_table_entry.flags = 0;
6759 else
6760 CAM_INVALIDATE(config->config_table[0]);
ca00392c
EG
6761 config->config_table[0].target_table_entry.clients_bit_vector =
6762 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
6763 config->config_table[0].target_table_entry.vlan_id = 0;
6764
3101c2bc
YG
6765 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6766 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6767 config->config_table[0].cam_entry.msb_mac_addr,
6768 config->config_table[0].cam_entry.middle_mac_addr,
6769 config->config_table[0].cam_entry.lsb_mac_addr);
6770
6771 /* broadcast */
4781bfad
EG
6772 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6773 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6774 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6775 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6776 if (set)
6777 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6778 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6779 else
6780 CAM_INVALIDATE(config->config_table[1]);
ca00392c
EG
6781 config->config_table[1].target_table_entry.clients_bit_vector =
6782 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
6783 config->config_table[1].target_table_entry.vlan_id = 0;
6784
6785 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6786 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6787 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6788}
6789
3101c2bc 6790static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6791{
6792 struct mac_configuration_cmd_e1h *config =
6793 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6794
34f80b04
EG
6795 /* CAM allocation for E1H
6796 * unicasts: by func number
6797 * multicast: 20+FUNC*20, 20 each
6798 */
8d9c5f34 6799 config->hdr.length = 1;
34f80b04 6800 config->hdr.offset = BP_FUNC(bp);
0626b899 6801 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6802 config->hdr.reserved1 = 0;
6803
6804 /* primary MAC */
6805 config->config_table[0].msb_mac_addr =
6806 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6807 config->config_table[0].middle_mac_addr =
6808 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6809 config->config_table[0].lsb_mac_addr =
6810 swab16(*(u16 *)&bp->dev->dev_addr[4]);
ca00392c
EG
6811 config->config_table[0].clients_bit_vector =
6812 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
6813 config->config_table[0].vlan_id = 0;
6814 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6815 if (set)
6816 config->config_table[0].flags = BP_PORT(bp);
6817 else
6818 config->config_table[0].flags =
6819 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6820
3101c2bc
YG
6821 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6822 (set ? "setting" : "clearing"),
34f80b04
EG
6823 config->config_table[0].msb_mac_addr,
6824 config->config_table[0].middle_mac_addr,
6825 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6826
6827 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6828 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6829 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6830}
6831
a2fbb9ea
ET
6832static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6833 int *state_p, int poll)
6834{
6835 /* can take a while if any port is running */
8b3a0f0b 6836 int cnt = 5000;
a2fbb9ea 6837
c14423fe
ET
6838 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6839 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6840
6841 might_sleep();
34f80b04 6842 while (cnt--) {
a2fbb9ea
ET
6843 if (poll) {
6844 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6845 /* if index is different from 0
6846 * the reply for some commands will
3101c2bc 6847 * be on the non default queue
a2fbb9ea
ET
6848 */
6849 if (idx)
6850 bnx2x_rx_int(&bp->fp[idx], 10);
6851 }
a2fbb9ea 6852
3101c2bc 6853 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6854 if (*state_p == state) {
6855#ifdef BNX2X_STOP_ON_ERROR
6856 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6857#endif
a2fbb9ea 6858 return 0;
8b3a0f0b 6859 }
a2fbb9ea 6860
a2fbb9ea 6861 msleep(1);
a2fbb9ea
ET
6862 }
6863
a2fbb9ea 6864 /* timeout! */
49d66772
ET
6865 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6866 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6867#ifdef BNX2X_STOP_ON_ERROR
6868 bnx2x_panic();
6869#endif
a2fbb9ea 6870
49d66772 6871 return -EBUSY;
a2fbb9ea
ET
6872}
6873
6874static int bnx2x_setup_leading(struct bnx2x *bp)
6875{
34f80b04 6876 int rc;
a2fbb9ea 6877
c14423fe 6878 /* reset IGU state */
34f80b04 6879 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6880
6881 /* SETUP ramrod */
6882 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6883
34f80b04
EG
6884 /* Wait for completion */
6885 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6886
34f80b04 6887 return rc;
a2fbb9ea
ET
6888}
6889
6890static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6891{
555f6c78
EG
6892 struct bnx2x_fastpath *fp = &bp->fp[index];
6893
a2fbb9ea 6894 /* reset IGU state */
555f6c78 6895 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6896
228241eb 6897 /* SETUP ramrod */
555f6c78
EG
6898 fp->state = BNX2X_FP_STATE_OPENING;
6899 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6900 fp->cl_id, 0);
a2fbb9ea
ET
6901
6902 /* Wait for completion */
6903 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6904 &(fp->state), 0);
a2fbb9ea
ET
6905}
6906
a2fbb9ea 6907static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6908
ca00392c
EG
6909static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
6910 int *num_tx_queues_out)
6911{
6912 int _num_rx_queues = 0, _num_tx_queues = 0;
6913
6914 switch (bp->multi_mode) {
6915 case ETH_RSS_MODE_DISABLED:
6916 _num_rx_queues = 1;
6917 _num_tx_queues = 1;
6918 break;
6919
6920 case ETH_RSS_MODE_REGULAR:
6921 if (num_rx_queues)
6922 _num_rx_queues = min_t(u32, num_rx_queues,
6923 BNX2X_MAX_QUEUES(bp));
6924 else
6925 _num_rx_queues = min_t(u32, num_online_cpus(),
6926 BNX2X_MAX_QUEUES(bp));
6927
6928 if (num_tx_queues)
6929 _num_tx_queues = min_t(u32, num_tx_queues,
6930 BNX2X_MAX_QUEUES(bp));
6931 else
6932 _num_tx_queues = min_t(u32, num_online_cpus(),
6933 BNX2X_MAX_QUEUES(bp));
6934
6935 /* There must be not more Tx queues than Rx queues */
6936 if (_num_tx_queues > _num_rx_queues) {
6937 BNX2X_ERR("number of tx queues (%d) > "
6938 "number of rx queues (%d)"
6939 " defaulting to %d\n",
6940 _num_tx_queues, _num_rx_queues,
6941 _num_rx_queues);
6942 _num_tx_queues = _num_rx_queues;
6943 }
6944 break;
6945
6946
6947 default:
6948 _num_rx_queues = 1;
6949 _num_tx_queues = 1;
6950 break;
6951 }
6952
6953 *num_rx_queues_out = _num_rx_queues;
6954 *num_tx_queues_out = _num_tx_queues;
6955}
6956
6957static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6958{
ca00392c 6959 int rc = 0;
a2fbb9ea 6960
8badd27a
EG
6961 switch (int_mode) {
6962 case INT_MODE_INTx:
6963 case INT_MODE_MSI:
ca00392c
EG
6964 bp->num_rx_queues = 1;
6965 bp->num_tx_queues = 1;
6966 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
6967 break;
6968
6969 case INT_MODE_MSIX:
6970 default:
ca00392c
EG
6971 /* Set interrupt mode according to bp->multi_mode value */
6972 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
6973 &bp->num_tx_queues);
6974
6975 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 6976 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 6977
2dfe0e1f
EG
6978 /* if we can't use MSI-X we only need one fp,
6979 * so try to enable MSI-X with the requested number of fp's
6980 * and fallback to MSI or legacy INTx with one fp
6981 */
ca00392c
EG
6982 rc = bnx2x_enable_msix(bp);
6983 if (rc) {
34f80b04 6984 /* failed to enable MSI-X */
555f6c78
EG
6985 if (bp->multi_mode)
6986 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
6987 "enable MSI-X (rx %d tx %d), "
6988 "set number of queues to 1\n",
6989 bp->num_rx_queues, bp->num_tx_queues);
6990 bp->num_rx_queues = 1;
6991 bp->num_tx_queues = 1;
a2fbb9ea 6992 }
8badd27a 6993 break;
a2fbb9ea 6994 }
555f6c78 6995 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 6996 return rc;
8badd27a
EG
6997}
6998
6999static void bnx2x_set_rx_mode(struct net_device *dev);
7000
7001/* must be called with rtnl_lock */
7002static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7003{
7004 u32 load_code;
ca00392c
EG
7005 int i, rc;
7006
8badd27a 7007#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7008 if (unlikely(bp->panic))
7009 return -EPERM;
7010#endif
7011
7012 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7013
ca00392c 7014 rc = bnx2x_set_int_mode(bp);
c14423fe 7015
a2fbb9ea
ET
7016 if (bnx2x_alloc_mem(bp))
7017 return -ENOMEM;
7018
555f6c78 7019 for_each_rx_queue(bp, i)
7a9b2557
VZ
7020 bnx2x_fp(bp, i, disable_tpa) =
7021 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7022
555f6c78 7023 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7024 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7025 bnx2x_poll, 128);
7026
2dfe0e1f
EG
7027 bnx2x_napi_enable(bp);
7028
34f80b04
EG
7029 if (bp->flags & USING_MSIX_FLAG) {
7030 rc = bnx2x_req_msix_irqs(bp);
7031 if (rc) {
7032 pci_disable_msix(bp->pdev);
2dfe0e1f 7033 goto load_error1;
34f80b04
EG
7034 }
7035 } else {
ca00392c
EG
7036 /* Fall to INTx if failed to enable MSI-X due to lack of
7037 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7038 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7039 bnx2x_enable_msi(bp);
34f80b04
EG
7040 bnx2x_ack_int(bp);
7041 rc = bnx2x_req_irq(bp);
7042 if (rc) {
2dfe0e1f 7043 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7044 if (bp->flags & USING_MSI_FLAG)
7045 pci_disable_msi(bp->pdev);
2dfe0e1f 7046 goto load_error1;
a2fbb9ea 7047 }
8badd27a
EG
7048 if (bp->flags & USING_MSI_FLAG) {
7049 bp->dev->irq = bp->pdev->irq;
7050 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7051 bp->dev->name, bp->pdev->irq);
7052 }
a2fbb9ea
ET
7053 }
7054
2dfe0e1f
EG
7055 /* Send LOAD_REQUEST command to MCP
7056 Returns the type of LOAD command:
7057 if it is the first port to be initialized
7058 common blocks should be initialized, otherwise - not
7059 */
7060 if (!BP_NOMCP(bp)) {
7061 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7062 if (!load_code) {
7063 BNX2X_ERR("MCP response failure, aborting\n");
7064 rc = -EBUSY;
7065 goto load_error2;
7066 }
7067 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7068 rc = -EBUSY; /* other port in diagnostic mode */
7069 goto load_error2;
7070 }
7071
7072 } else {
7073 int port = BP_PORT(bp);
7074
f5372251 7075 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7076 load_count[0], load_count[1], load_count[2]);
7077 load_count[0]++;
7078 load_count[1 + port]++;
f5372251 7079 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7080 load_count[0], load_count[1], load_count[2]);
7081 if (load_count[0] == 1)
7082 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7083 else if (load_count[1 + port] == 1)
7084 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7085 else
7086 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7087 }
7088
7089 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7090 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7091 bp->port.pmf = 1;
7092 else
7093 bp->port.pmf = 0;
7094 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7095
a2fbb9ea 7096 /* Initialize HW */
34f80b04
EG
7097 rc = bnx2x_init_hw(bp, load_code);
7098 if (rc) {
a2fbb9ea 7099 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7100 goto load_error2;
a2fbb9ea
ET
7101 }
7102
a2fbb9ea 7103 /* Setup NIC internals and enable interrupts */
471de716 7104 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
7105
7106 /* Send LOAD_DONE command to MCP */
34f80b04 7107 if (!BP_NOMCP(bp)) {
228241eb
ET
7108 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7109 if (!load_code) {
da5a662a 7110 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7111 rc = -EBUSY;
2dfe0e1f 7112 goto load_error3;
a2fbb9ea
ET
7113 }
7114 }
7115
7116 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7117
34f80b04
EG
7118 rc = bnx2x_setup_leading(bp);
7119 if (rc) {
da5a662a 7120 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7121 goto load_error3;
34f80b04 7122 }
a2fbb9ea 7123
34f80b04
EG
7124 if (CHIP_IS_E1H(bp))
7125 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7126 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7127 bp->state = BNX2X_STATE_DISABLED;
7128 }
a2fbb9ea 7129
ca00392c 7130 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7131 for_each_nondefault_queue(bp, i) {
7132 rc = bnx2x_setup_multi(bp, i);
7133 if (rc)
2dfe0e1f 7134 goto load_error3;
34f80b04 7135 }
a2fbb9ea 7136
ca00392c
EG
7137 if (CHIP_IS_E1(bp))
7138 bnx2x_set_mac_addr_e1(bp, 1);
7139 else
7140 bnx2x_set_mac_addr_e1h(bp, 1);
7141 }
34f80b04
EG
7142
7143 if (bp->port.pmf)
b5bf9068 7144 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7145
7146 /* Start fast path */
34f80b04
EG
7147 switch (load_mode) {
7148 case LOAD_NORMAL:
ca00392c
EG
7149 if (bp->state == BNX2X_STATE_OPEN) {
7150 /* Tx queue should be only reenabled */
7151 netif_tx_wake_all_queues(bp->dev);
7152 }
2dfe0e1f 7153 /* Initialize the receive filter. */
34f80b04
EG
7154 bnx2x_set_rx_mode(bp->dev);
7155 break;
7156
7157 case LOAD_OPEN:
555f6c78 7158 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7159 if (bp->state != BNX2X_STATE_OPEN)
7160 netif_tx_disable(bp->dev);
2dfe0e1f 7161 /* Initialize the receive filter. */
34f80b04 7162 bnx2x_set_rx_mode(bp->dev);
34f80b04 7163 break;
a2fbb9ea 7164
34f80b04 7165 case LOAD_DIAG:
2dfe0e1f 7166 /* Initialize the receive filter. */
a2fbb9ea 7167 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7168 bp->state = BNX2X_STATE_DIAG;
7169 break;
7170
7171 default:
7172 break;
a2fbb9ea
ET
7173 }
7174
34f80b04
EG
7175 if (!bp->port.pmf)
7176 bnx2x__link_status_update(bp);
7177
a2fbb9ea
ET
7178 /* start the timer */
7179 mod_timer(&bp->timer, jiffies + bp->current_interval);
7180
34f80b04 7181
a2fbb9ea
ET
7182 return 0;
7183
2dfe0e1f
EG
7184load_error3:
7185 bnx2x_int_disable_sync(bp, 1);
7186 if (!BP_NOMCP(bp)) {
7187 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7188 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7189 }
7190 bp->port.pmf = 0;
7a9b2557
VZ
7191 /* Free SKBs, SGEs, TPA pool and driver internals */
7192 bnx2x_free_skbs(bp);
555f6c78 7193 for_each_rx_queue(bp, i)
3196a88a 7194 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7195load_error2:
d1014634
YG
7196 /* Release IRQs */
7197 bnx2x_free_irq(bp);
2dfe0e1f
EG
7198load_error1:
7199 bnx2x_napi_disable(bp);
555f6c78 7200 for_each_rx_queue(bp, i)
7cde1c8b 7201 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7202 bnx2x_free_mem(bp);
7203
34f80b04 7204 return rc;
a2fbb9ea
ET
7205}
7206
7207static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7208{
555f6c78 7209 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7210 int rc;
7211
c14423fe 7212 /* halt the connection */
555f6c78
EG
7213 fp->state = BNX2X_FP_STATE_HALTING;
7214 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7215
34f80b04 7216 /* Wait for completion */
a2fbb9ea 7217 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7218 &(fp->state), 1);
c14423fe 7219 if (rc) /* timeout */
a2fbb9ea
ET
7220 return rc;
7221
7222 /* delete cfc entry */
7223 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7224
34f80b04
EG
7225 /* Wait for completion */
7226 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7227 &(fp->state), 1);
34f80b04 7228 return rc;
a2fbb9ea
ET
7229}
7230
da5a662a 7231static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7232{
4781bfad 7233 __le16 dsb_sp_prod_idx;
c14423fe 7234 /* if the other port is handling traffic,
a2fbb9ea 7235 this can take a lot of time */
34f80b04
EG
7236 int cnt = 500;
7237 int rc;
a2fbb9ea
ET
7238
7239 might_sleep();
7240
7241 /* Send HALT ramrod */
7242 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7243 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7244
34f80b04
EG
7245 /* Wait for completion */
7246 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7247 &(bp->fp[0].state), 1);
7248 if (rc) /* timeout */
da5a662a 7249 return rc;
a2fbb9ea 7250
49d66772 7251 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7252
228241eb 7253 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7254 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7255
49d66772 7256 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7257 we are going to reset the chip anyway
7258 so there is not much to do if this times out
7259 */
34f80b04 7260 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7261 if (!cnt) {
7262 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7263 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7264 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7265#ifdef BNX2X_STOP_ON_ERROR
7266 bnx2x_panic();
7267#endif
36e552ab 7268 rc = -EBUSY;
34f80b04
EG
7269 break;
7270 }
7271 cnt--;
da5a662a 7272 msleep(1);
5650d9d4 7273 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7274 }
7275 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7276 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7277
7278 return rc;
a2fbb9ea
ET
7279}
7280
34f80b04
EG
7281static void bnx2x_reset_func(struct bnx2x *bp)
7282{
7283 int port = BP_PORT(bp);
7284 int func = BP_FUNC(bp);
7285 int base, i;
7286
7287 /* Configure IGU */
7288 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7289 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7290
34f80b04
EG
7291 /* Clear ILT */
7292 base = FUNC_ILT_BASE(func);
7293 for (i = base; i < base + ILT_PER_FUNC; i++)
7294 bnx2x_ilt_wr(bp, i, 0);
7295}
7296
7297static void bnx2x_reset_port(struct bnx2x *bp)
7298{
7299 int port = BP_PORT(bp);
7300 u32 val;
7301
7302 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7303
7304 /* Do not rcv packets to BRB */
7305 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7306 /* Do not direct rcv packets that are not for MCP to the BRB */
7307 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7308 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7309
7310 /* Configure AEU */
7311 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7312
7313 msleep(100);
7314 /* Check for BRB port occupancy */
7315 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7316 if (val)
7317 DP(NETIF_MSG_IFDOWN,
33471629 7318 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7319
7320 /* TODO: Close Doorbell port? */
7321}
7322
34f80b04
EG
7323static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7324{
7325 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7326 BP_FUNC(bp), reset_code);
7327
7328 switch (reset_code) {
7329 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7330 bnx2x_reset_port(bp);
7331 bnx2x_reset_func(bp);
7332 bnx2x_reset_common(bp);
7333 break;
7334
7335 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7336 bnx2x_reset_port(bp);
7337 bnx2x_reset_func(bp);
7338 break;
7339
7340 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7341 bnx2x_reset_func(bp);
7342 break;
49d66772 7343
34f80b04
EG
7344 default:
7345 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7346 break;
7347 }
7348}
7349
33471629 7350/* must be called with rtnl_lock */
34f80b04 7351static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7352{
da5a662a 7353 int port = BP_PORT(bp);
a2fbb9ea 7354 u32 reset_code = 0;
da5a662a 7355 int i, cnt, rc;
a2fbb9ea
ET
7356
7357 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7358
228241eb
ET
7359 bp->rx_mode = BNX2X_RX_MODE_NONE;
7360 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7361
f8ef6e44 7362 bnx2x_netif_stop(bp, 1);
e94d8af3 7363
34f80b04
EG
7364 del_timer_sync(&bp->timer);
7365 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7366 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7367 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7368
70b9986c
EG
7369 /* Release IRQs */
7370 bnx2x_free_irq(bp);
7371
555f6c78
EG
7372 /* Wait until tx fastpath tasks complete */
7373 for_each_tx_queue(bp, i) {
228241eb
ET
7374 struct bnx2x_fastpath *fp = &bp->fp[i];
7375
34f80b04 7376 cnt = 1000;
e8b5fc51 7377 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7378
7961f791 7379 bnx2x_tx_int(fp);
34f80b04
EG
7380 if (!cnt) {
7381 BNX2X_ERR("timeout waiting for queue[%d]\n",
7382 i);
7383#ifdef BNX2X_STOP_ON_ERROR
7384 bnx2x_panic();
7385 return -EBUSY;
7386#else
7387 break;
7388#endif
7389 }
7390 cnt--;
da5a662a 7391 msleep(1);
34f80b04 7392 }
228241eb 7393 }
da5a662a
VZ
7394 /* Give HW time to discard old tx messages */
7395 msleep(1);
a2fbb9ea 7396
3101c2bc
YG
7397 if (CHIP_IS_E1(bp)) {
7398 struct mac_configuration_cmd *config =
7399 bnx2x_sp(bp, mcast_config);
7400
7401 bnx2x_set_mac_addr_e1(bp, 0);
7402
8d9c5f34 7403 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7404 CAM_INVALIDATE(config->config_table[i]);
7405
8d9c5f34 7406 config->hdr.length = i;
3101c2bc
YG
7407 if (CHIP_REV_IS_SLOW(bp))
7408 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7409 else
7410 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7411 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7412 config->hdr.reserved1 = 0;
7413
7414 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7415 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7416 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7417
7418 } else { /* E1H */
65abd74d
YG
7419 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7420
3101c2bc
YG
7421 bnx2x_set_mac_addr_e1h(bp, 0);
7422
7423 for (i = 0; i < MC_HASH_SIZE; i++)
7424 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7425
7426 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7427 }
7428
65abd74d
YG
7429 if (unload_mode == UNLOAD_NORMAL)
7430 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7431
7d0446c2 7432 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7433 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7434
7d0446c2 7435 else if (bp->wol) {
65abd74d
YG
7436 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7437 u8 *mac_addr = bp->dev->dev_addr;
7438 u32 val;
7439 /* The mac address is written to entries 1-4 to
7440 preserve entry 0 which is used by the PMF */
7441 u8 entry = (BP_E1HVN(bp) + 1)*8;
7442
7443 val = (mac_addr[0] << 8) | mac_addr[1];
7444 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7445
7446 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7447 (mac_addr[4] << 8) | mac_addr[5];
7448 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7449
7450 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7451
7452 } else
7453 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7454
34f80b04
EG
7455 /* Close multi and leading connections
7456 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7457 for_each_nondefault_queue(bp, i)
7458 if (bnx2x_stop_multi(bp, i))
228241eb 7459 goto unload_error;
a2fbb9ea 7460
da5a662a
VZ
7461 rc = bnx2x_stop_leading(bp);
7462 if (rc) {
34f80b04 7463 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7464#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7465 return -EBUSY;
da5a662a
VZ
7466#else
7467 goto unload_error;
34f80b04 7468#endif
228241eb
ET
7469 }
7470
7471unload_error:
34f80b04 7472 if (!BP_NOMCP(bp))
228241eb 7473 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7474 else {
f5372251 7475 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7476 load_count[0], load_count[1], load_count[2]);
7477 load_count[0]--;
da5a662a 7478 load_count[1 + port]--;
f5372251 7479 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7480 load_count[0], load_count[1], load_count[2]);
7481 if (load_count[0] == 0)
7482 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7483 else if (load_count[1 + port] == 0)
34f80b04
EG
7484 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7485 else
7486 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7487 }
a2fbb9ea 7488
34f80b04
EG
7489 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7490 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7491 bnx2x__link_reset(bp);
a2fbb9ea
ET
7492
7493 /* Reset the chip */
228241eb 7494 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7495
7496 /* Report UNLOAD_DONE to MCP */
34f80b04 7497 if (!BP_NOMCP(bp))
a2fbb9ea 7498 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7499
9a035440 7500 bp->port.pmf = 0;
a2fbb9ea 7501
7a9b2557 7502 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7503 bnx2x_free_skbs(bp);
555f6c78 7504 for_each_rx_queue(bp, i)
3196a88a 7505 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7506 for_each_rx_queue(bp, i)
7cde1c8b 7507 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7508 bnx2x_free_mem(bp);
7509
7510 bp->state = BNX2X_STATE_CLOSED;
228241eb 7511
a2fbb9ea
ET
7512 netif_carrier_off(bp->dev);
7513
7514 return 0;
7515}
7516
34f80b04
EG
7517static void bnx2x_reset_task(struct work_struct *work)
7518{
7519 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7520
7521#ifdef BNX2X_STOP_ON_ERROR
7522 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7523 " so reset not done to allow debug dump,\n"
ad361c98 7524 " you will need to reboot when done\n");
34f80b04
EG
7525 return;
7526#endif
7527
7528 rtnl_lock();
7529
7530 if (!netif_running(bp->dev))
7531 goto reset_task_exit;
7532
7533 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7534 bnx2x_nic_load(bp, LOAD_NORMAL);
7535
7536reset_task_exit:
7537 rtnl_unlock();
7538}
7539
a2fbb9ea
ET
7540/* end of nic load/unload */
7541
7542/* ethtool_ops */
7543
7544/*
7545 * Init service functions
7546 */
7547
f1ef27ef
EG
7548static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7549{
7550 switch (func) {
7551 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7552 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7553 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7554 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7555 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7556 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7557 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7558 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7559 default:
7560 BNX2X_ERR("Unsupported function index: %d\n", func);
7561 return (u32)(-1);
7562 }
7563}
7564
7565static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7566{
7567 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7568
7569 /* Flush all outstanding writes */
7570 mmiowb();
7571
7572 /* Pretend to be function 0 */
7573 REG_WR(bp, reg, 0);
7574 /* Flush the GRC transaction (in the chip) */
7575 new_val = REG_RD(bp, reg);
7576 if (new_val != 0) {
7577 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7578 new_val);
7579 BUG();
7580 }
7581
7582 /* From now we are in the "like-E1" mode */
7583 bnx2x_int_disable(bp);
7584
7585 /* Flush all outstanding writes */
7586 mmiowb();
7587
7588 /* Restore the original funtion settings */
7589 REG_WR(bp, reg, orig_func);
7590 new_val = REG_RD(bp, reg);
7591 if (new_val != orig_func) {
7592 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7593 orig_func, new_val);
7594 BUG();
7595 }
7596}
7597
7598static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7599{
7600 if (CHIP_IS_E1H(bp))
7601 bnx2x_undi_int_disable_e1h(bp, func);
7602 else
7603 bnx2x_int_disable(bp);
7604}
7605
34f80b04
EG
7606static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7607{
7608 u32 val;
7609
7610 /* Check if there is any driver already loaded */
7611 val = REG_RD(bp, MISC_REG_UNPREPARED);
7612 if (val == 0x1) {
7613 /* Check if it is the UNDI driver
7614 * UNDI driver initializes CID offset for normal bell to 0x7
7615 */
4a37fb66 7616 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7617 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7618 if (val == 0x7) {
7619 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7620 /* save our func */
34f80b04 7621 int func = BP_FUNC(bp);
da5a662a
VZ
7622 u32 swap_en;
7623 u32 swap_val;
34f80b04 7624
b4661739
EG
7625 /* clear the UNDI indication */
7626 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7627
34f80b04
EG
7628 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7629
7630 /* try unload UNDI on port 0 */
7631 bp->func = 0;
da5a662a
VZ
7632 bp->fw_seq =
7633 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7634 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7635 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7636
7637 /* if UNDI is loaded on the other port */
7638 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7639
da5a662a
VZ
7640 /* send "DONE" for previous unload */
7641 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7642
7643 /* unload UNDI on port 1 */
34f80b04 7644 bp->func = 1;
da5a662a
VZ
7645 bp->fw_seq =
7646 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7647 DRV_MSG_SEQ_NUMBER_MASK);
7648 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7649
7650 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7651 }
7652
b4661739
EG
7653 /* now it's safe to release the lock */
7654 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7655
f1ef27ef 7656 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7657
7658 /* close input traffic and wait for it */
7659 /* Do not rcv packets to BRB */
7660 REG_WR(bp,
7661 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7662 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7663 /* Do not direct rcv packets that are not for MCP to
7664 * the BRB */
7665 REG_WR(bp,
7666 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7667 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7668 /* clear AEU */
7669 REG_WR(bp,
7670 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7671 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7672 msleep(10);
7673
7674 /* save NIG port swap info */
7675 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7676 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7677 /* reset device */
7678 REG_WR(bp,
7679 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7680 0xd3ffffff);
34f80b04
EG
7681 REG_WR(bp,
7682 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7683 0x1403);
da5a662a
VZ
7684 /* take the NIG out of reset and restore swap values */
7685 REG_WR(bp,
7686 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7687 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7688 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7689 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7690
7691 /* send unload done to the MCP */
7692 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7693
7694 /* restore our func and fw_seq */
7695 bp->func = func;
7696 bp->fw_seq =
7697 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7698 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7699
7700 } else
7701 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7702 }
7703}
7704
7705static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7706{
7707 u32 val, val2, val3, val4, id;
72ce58c3 7708 u16 pmc;
34f80b04
EG
7709
7710 /* Get the chip revision id and number. */
7711 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7712 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7713 id = ((val & 0xffff) << 16);
7714 val = REG_RD(bp, MISC_REG_CHIP_REV);
7715 id |= ((val & 0xf) << 12);
7716 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7717 id |= ((val & 0xff) << 4);
5a40e08e 7718 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7719 id |= (val & 0xf);
7720 bp->common.chip_id = id;
7721 bp->link_params.chip_id = bp->common.chip_id;
7722 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7723
1c06328c
EG
7724 val = (REG_RD(bp, 0x2874) & 0x55);
7725 if ((bp->common.chip_id & 0x1) ||
7726 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7727 bp->flags |= ONE_PORT_FLAG;
7728 BNX2X_DEV_INFO("single port device\n");
7729 }
7730
34f80b04
EG
7731 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7732 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7733 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7734 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7735 bp->common.flash_size, bp->common.flash_size);
7736
7737 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7738 bp->link_params.shmem_base = bp->common.shmem_base;
7739 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7740
7741 if (!bp->common.shmem_base ||
7742 (bp->common.shmem_base < 0xA0000) ||
7743 (bp->common.shmem_base >= 0xC0000)) {
7744 BNX2X_DEV_INFO("MCP not active\n");
7745 bp->flags |= NO_MCP_FLAG;
7746 return;
7747 }
7748
7749 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7750 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7751 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7752 BNX2X_ERR("BAD MCP validity signature\n");
7753
7754 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7755 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7756
7757 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7758 SHARED_HW_CFG_LED_MODE_MASK) >>
7759 SHARED_HW_CFG_LED_MODE_SHIFT);
7760
c2c8b03e
EG
7761 bp->link_params.feature_config_flags = 0;
7762 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7763 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7764 bp->link_params.feature_config_flags |=
7765 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7766 else
7767 bp->link_params.feature_config_flags &=
7768 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7769
34f80b04
EG
7770 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7771 bp->common.bc_ver = val;
7772 BNX2X_DEV_INFO("bc_ver %X\n", val);
7773 if (val < BNX2X_BC_VER) {
7774 /* for now only warn
7775 * later we might need to enforce this */
7776 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7777 " please upgrade BC\n", BNX2X_BC_VER, val);
7778 }
4d295db0
EG
7779 bp->link_params.feature_config_flags |=
7780 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7781 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
7782
7783 if (BP_E1HVN(bp) == 0) {
7784 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7785 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7786 } else {
7787 /* no WOL capability for E1HVN != 0 */
7788 bp->flags |= NO_WOL_FLAG;
7789 }
7790 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7791 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7792
7793 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7794 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7795 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7796 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7797
7798 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7799 val, val2, val3, val4);
7800}
7801
7802static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7803 u32 switch_cfg)
a2fbb9ea 7804{
34f80b04 7805 int port = BP_PORT(bp);
a2fbb9ea
ET
7806 u32 ext_phy_type;
7807
a2fbb9ea
ET
7808 switch (switch_cfg) {
7809 case SWITCH_CFG_1G:
7810 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7811
c18487ee
YR
7812 ext_phy_type =
7813 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7814 switch (ext_phy_type) {
7815 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7816 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7817 ext_phy_type);
7818
34f80b04
EG
7819 bp->port.supported |= (SUPPORTED_10baseT_Half |
7820 SUPPORTED_10baseT_Full |
7821 SUPPORTED_100baseT_Half |
7822 SUPPORTED_100baseT_Full |
7823 SUPPORTED_1000baseT_Full |
7824 SUPPORTED_2500baseX_Full |
7825 SUPPORTED_TP |
7826 SUPPORTED_FIBRE |
7827 SUPPORTED_Autoneg |
7828 SUPPORTED_Pause |
7829 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7830 break;
7831
7832 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7833 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7834 ext_phy_type);
7835
34f80b04
EG
7836 bp->port.supported |= (SUPPORTED_10baseT_Half |
7837 SUPPORTED_10baseT_Full |
7838 SUPPORTED_100baseT_Half |
7839 SUPPORTED_100baseT_Full |
7840 SUPPORTED_1000baseT_Full |
7841 SUPPORTED_TP |
7842 SUPPORTED_FIBRE |
7843 SUPPORTED_Autoneg |
7844 SUPPORTED_Pause |
7845 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7846 break;
7847
7848 default:
7849 BNX2X_ERR("NVRAM config error. "
7850 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7851 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7852 return;
7853 }
7854
34f80b04
EG
7855 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7856 port*0x10);
7857 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7858 break;
7859
7860 case SWITCH_CFG_10G:
7861 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7862
c18487ee
YR
7863 ext_phy_type =
7864 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7865 switch (ext_phy_type) {
7866 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7867 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7868 ext_phy_type);
7869
34f80b04
EG
7870 bp->port.supported |= (SUPPORTED_10baseT_Half |
7871 SUPPORTED_10baseT_Full |
7872 SUPPORTED_100baseT_Half |
7873 SUPPORTED_100baseT_Full |
7874 SUPPORTED_1000baseT_Full |
7875 SUPPORTED_2500baseX_Full |
7876 SUPPORTED_10000baseT_Full |
7877 SUPPORTED_TP |
7878 SUPPORTED_FIBRE |
7879 SUPPORTED_Autoneg |
7880 SUPPORTED_Pause |
7881 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7882 break;
7883
589abe3a
EG
7884 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7885 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7886 ext_phy_type);
f1410647 7887
34f80b04 7888 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7889 SUPPORTED_1000baseT_Full |
34f80b04 7890 SUPPORTED_FIBRE |
589abe3a 7891 SUPPORTED_Autoneg |
34f80b04
EG
7892 SUPPORTED_Pause |
7893 SUPPORTED_Asym_Pause);
f1410647
ET
7894 break;
7895
589abe3a
EG
7896 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7897 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7898 ext_phy_type);
7899
34f80b04 7900 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7901 SUPPORTED_2500baseX_Full |
34f80b04 7902 SUPPORTED_1000baseT_Full |
589abe3a
EG
7903 SUPPORTED_FIBRE |
7904 SUPPORTED_Autoneg |
7905 SUPPORTED_Pause |
7906 SUPPORTED_Asym_Pause);
7907 break;
7908
7909 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7910 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7911 ext_phy_type);
7912
7913 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7914 SUPPORTED_FIBRE |
7915 SUPPORTED_Pause |
7916 SUPPORTED_Asym_Pause);
f1410647
ET
7917 break;
7918
589abe3a
EG
7919 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7920 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7921 ext_phy_type);
7922
34f80b04
EG
7923 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7924 SUPPORTED_1000baseT_Full |
7925 SUPPORTED_FIBRE |
34f80b04
EG
7926 SUPPORTED_Pause |
7927 SUPPORTED_Asym_Pause);
f1410647
ET
7928 break;
7929
589abe3a
EG
7930 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7931 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7932 ext_phy_type);
7933
34f80b04 7934 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7935 SUPPORTED_1000baseT_Full |
34f80b04 7936 SUPPORTED_Autoneg |
589abe3a 7937 SUPPORTED_FIBRE |
34f80b04
EG
7938 SUPPORTED_Pause |
7939 SUPPORTED_Asym_Pause);
c18487ee
YR
7940 break;
7941
4d295db0
EG
7942 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7943 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7944 ext_phy_type);
7945
7946 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7947 SUPPORTED_1000baseT_Full |
7948 SUPPORTED_Autoneg |
7949 SUPPORTED_FIBRE |
7950 SUPPORTED_Pause |
7951 SUPPORTED_Asym_Pause);
7952 break;
7953
f1410647
ET
7954 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7955 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7956 ext_phy_type);
7957
34f80b04
EG
7958 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7959 SUPPORTED_TP |
7960 SUPPORTED_Autoneg |
7961 SUPPORTED_Pause |
7962 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7963 break;
7964
28577185
EG
7965 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7966 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7967 ext_phy_type);
7968
7969 bp->port.supported |= (SUPPORTED_10baseT_Half |
7970 SUPPORTED_10baseT_Full |
7971 SUPPORTED_100baseT_Half |
7972 SUPPORTED_100baseT_Full |
7973 SUPPORTED_1000baseT_Full |
7974 SUPPORTED_10000baseT_Full |
7975 SUPPORTED_TP |
7976 SUPPORTED_Autoneg |
7977 SUPPORTED_Pause |
7978 SUPPORTED_Asym_Pause);
7979 break;
7980
c18487ee
YR
7981 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7982 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7983 bp->link_params.ext_phy_config);
7984 break;
7985
a2fbb9ea
ET
7986 default:
7987 BNX2X_ERR("NVRAM config error. "
7988 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7989 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7990 return;
7991 }
7992
34f80b04
EG
7993 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7994 port*0x18);
7995 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7996
a2fbb9ea
ET
7997 break;
7998
7999 default:
8000 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8001 bp->port.link_config);
a2fbb9ea
ET
8002 return;
8003 }
34f80b04 8004 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8005
8006 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8007 if (!(bp->link_params.speed_cap_mask &
8008 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8009 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8010
c18487ee
YR
8011 if (!(bp->link_params.speed_cap_mask &
8012 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8013 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8014
c18487ee
YR
8015 if (!(bp->link_params.speed_cap_mask &
8016 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8017 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8018
c18487ee
YR
8019 if (!(bp->link_params.speed_cap_mask &
8020 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8021 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8022
c18487ee
YR
8023 if (!(bp->link_params.speed_cap_mask &
8024 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8025 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8026 SUPPORTED_1000baseT_Full);
a2fbb9ea 8027
c18487ee
YR
8028 if (!(bp->link_params.speed_cap_mask &
8029 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8030 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8031
c18487ee
YR
8032 if (!(bp->link_params.speed_cap_mask &
8033 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8034 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8035
34f80b04 8036 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8037}
8038
34f80b04 8039static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8040{
c18487ee 8041 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8042
34f80b04 8043 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8044 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8045 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8046 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8047 bp->port.advertising = bp->port.supported;
a2fbb9ea 8048 } else {
c18487ee
YR
8049 u32 ext_phy_type =
8050 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8051
8052 if ((ext_phy_type ==
8053 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8054 (ext_phy_type ==
8055 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8056 /* force 10G, no AN */
c18487ee 8057 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8058 bp->port.advertising =
a2fbb9ea
ET
8059 (ADVERTISED_10000baseT_Full |
8060 ADVERTISED_FIBRE);
8061 break;
8062 }
8063 BNX2X_ERR("NVRAM config error. "
8064 "Invalid link_config 0x%x"
8065 " Autoneg not supported\n",
34f80b04 8066 bp->port.link_config);
a2fbb9ea
ET
8067 return;
8068 }
8069 break;
8070
8071 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8072 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8073 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8074 bp->port.advertising = (ADVERTISED_10baseT_Full |
8075 ADVERTISED_TP);
a2fbb9ea
ET
8076 } else {
8077 BNX2X_ERR("NVRAM config error. "
8078 "Invalid link_config 0x%x"
8079 " speed_cap_mask 0x%x\n",
34f80b04 8080 bp->port.link_config,
c18487ee 8081 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8082 return;
8083 }
8084 break;
8085
8086 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8087 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8088 bp->link_params.req_line_speed = SPEED_10;
8089 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8090 bp->port.advertising = (ADVERTISED_10baseT_Half |
8091 ADVERTISED_TP);
a2fbb9ea
ET
8092 } else {
8093 BNX2X_ERR("NVRAM config error. "
8094 "Invalid link_config 0x%x"
8095 " speed_cap_mask 0x%x\n",
34f80b04 8096 bp->port.link_config,
c18487ee 8097 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8098 return;
8099 }
8100 break;
8101
8102 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8103 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8104 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8105 bp->port.advertising = (ADVERTISED_100baseT_Full |
8106 ADVERTISED_TP);
a2fbb9ea
ET
8107 } else {
8108 BNX2X_ERR("NVRAM config error. "
8109 "Invalid link_config 0x%x"
8110 " speed_cap_mask 0x%x\n",
34f80b04 8111 bp->port.link_config,
c18487ee 8112 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8113 return;
8114 }
8115 break;
8116
8117 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8118 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8119 bp->link_params.req_line_speed = SPEED_100;
8120 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8121 bp->port.advertising = (ADVERTISED_100baseT_Half |
8122 ADVERTISED_TP);
a2fbb9ea
ET
8123 } else {
8124 BNX2X_ERR("NVRAM config error. "
8125 "Invalid link_config 0x%x"
8126 " speed_cap_mask 0x%x\n",
34f80b04 8127 bp->port.link_config,
c18487ee 8128 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8129 return;
8130 }
8131 break;
8132
8133 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8134 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8135 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8136 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8137 ADVERTISED_TP);
a2fbb9ea
ET
8138 } else {
8139 BNX2X_ERR("NVRAM config error. "
8140 "Invalid link_config 0x%x"
8141 " speed_cap_mask 0x%x\n",
34f80b04 8142 bp->port.link_config,
c18487ee 8143 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8144 return;
8145 }
8146 break;
8147
8148 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8149 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8150 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8151 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8152 ADVERTISED_TP);
a2fbb9ea
ET
8153 } else {
8154 BNX2X_ERR("NVRAM config error. "
8155 "Invalid link_config 0x%x"
8156 " speed_cap_mask 0x%x\n",
34f80b04 8157 bp->port.link_config,
c18487ee 8158 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8159 return;
8160 }
8161 break;
8162
8163 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8164 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8165 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8166 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8167 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8168 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8169 ADVERTISED_FIBRE);
a2fbb9ea
ET
8170 } else {
8171 BNX2X_ERR("NVRAM config error. "
8172 "Invalid link_config 0x%x"
8173 " speed_cap_mask 0x%x\n",
34f80b04 8174 bp->port.link_config,
c18487ee 8175 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8176 return;
8177 }
8178 break;
8179
8180 default:
8181 BNX2X_ERR("NVRAM config error. "
8182 "BAD link speed link_config 0x%x\n",
34f80b04 8183 bp->port.link_config);
c18487ee 8184 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8185 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8186 break;
8187 }
a2fbb9ea 8188
34f80b04
EG
8189 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8190 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8191 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8192 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8193 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8194
c18487ee 8195 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8196 " advertising 0x%x\n",
c18487ee
YR
8197 bp->link_params.req_line_speed,
8198 bp->link_params.req_duplex,
34f80b04 8199 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8200}
8201
34f80b04 8202static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8203{
34f80b04
EG
8204 int port = BP_PORT(bp);
8205 u32 val, val2;
589abe3a 8206 u32 config;
c2c8b03e 8207 u16 i;
a2fbb9ea 8208
c18487ee 8209 bp->link_params.bp = bp;
34f80b04 8210 bp->link_params.port = port;
c18487ee 8211
c18487ee 8212 bp->link_params.lane_config =
a2fbb9ea 8213 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8214 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8215 SHMEM_RD(bp,
8216 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8217 /* BCM8727_NOC => BCM8727 no over current */
8218 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8219 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8220 bp->link_params.ext_phy_config &=
8221 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8222 bp->link_params.ext_phy_config |=
8223 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8224 bp->link_params.feature_config_flags |=
8225 FEATURE_CONFIG_BCM8727_NOC;
8226 }
8227
c18487ee 8228 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8229 SHMEM_RD(bp,
8230 dev_info.port_hw_config[port].speed_capability_mask);
8231
34f80b04 8232 bp->port.link_config =
a2fbb9ea
ET
8233 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8234
c2c8b03e
EG
8235 /* Get the 4 lanes xgxs config rx and tx */
8236 for (i = 0; i < 2; i++) {
8237 val = SHMEM_RD(bp,
8238 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8239 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8240 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8241
8242 val = SHMEM_RD(bp,
8243 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8244 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8245 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8246 }
8247
3ce2c3f9
EG
8248 /* If the device is capable of WoL, set the default state according
8249 * to the HW
8250 */
4d295db0 8251 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8252 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8253 (config & PORT_FEATURE_WOL_ENABLED));
8254
c2c8b03e
EG
8255 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8256 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8257 bp->link_params.lane_config,
8258 bp->link_params.ext_phy_config,
34f80b04 8259 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8260
4d295db0
EG
8261 bp->link_params.switch_cfg |= (bp->port.link_config &
8262 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8263 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8264
8265 bnx2x_link_settings_requested(bp);
8266
8267 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8268 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8269 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8270 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8271 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8272 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8273 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8274 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8275 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8276 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8277}
8278
8279static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8280{
8281 int func = BP_FUNC(bp);
8282 u32 val, val2;
8283 int rc = 0;
a2fbb9ea 8284
34f80b04 8285 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8286
34f80b04
EG
8287 bp->e1hov = 0;
8288 bp->e1hmf = 0;
8289 if (CHIP_IS_E1H(bp)) {
8290 bp->mf_config =
8291 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8292
3196a88a
EG
8293 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8294 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8295 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8296
34f80b04
EG
8297 bp->e1hov = val;
8298 bp->e1hmf = 1;
8299 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8300 "(0x%04x)\n",
8301 func, bp->e1hov, bp->e1hov);
8302 } else {
f5372251 8303 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8304 if (BP_E1HVN(bp)) {
8305 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8306 " aborting\n", func);
8307 rc = -EPERM;
8308 }
8309 }
8310 }
a2fbb9ea 8311
34f80b04
EG
8312 if (!BP_NOMCP(bp)) {
8313 bnx2x_get_port_hwinfo(bp);
8314
8315 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8316 DRV_MSG_SEQ_NUMBER_MASK);
8317 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8318 }
8319
8320 if (IS_E1HMF(bp)) {
8321 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8322 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8323 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8324 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8325 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8326 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8327 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8328 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8329 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8330 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8331 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8332 ETH_ALEN);
8333 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8334 ETH_ALEN);
a2fbb9ea 8335 }
34f80b04
EG
8336
8337 return rc;
a2fbb9ea
ET
8338 }
8339
34f80b04
EG
8340 if (BP_NOMCP(bp)) {
8341 /* only supposed to happen on emulation/FPGA */
33471629 8342 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8343 random_ether_addr(bp->dev->dev_addr);
8344 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8345 }
a2fbb9ea 8346
34f80b04
EG
8347 return rc;
8348}
8349
8350static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8351{
8352 int func = BP_FUNC(bp);
87942b46 8353 int timer_interval;
34f80b04
EG
8354 int rc;
8355
da5a662a
VZ
8356 /* Disable interrupt handling until HW is initialized */
8357 atomic_set(&bp->intr_sem, 1);
e1510706 8358 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8359
34f80b04 8360 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8361
1cf167f2 8362 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8363 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8364
8365 rc = bnx2x_get_hwinfo(bp);
8366
8367 /* need to reset chip if undi was active */
8368 if (!BP_NOMCP(bp))
8369 bnx2x_undi_unload(bp);
8370
8371 if (CHIP_REV_IS_FPGA(bp))
8372 printk(KERN_ERR PFX "FPGA detected\n");
8373
8374 if (BP_NOMCP(bp) && (func == 0))
8375 printk(KERN_ERR PFX
8376 "MCP disabled, must load devices in order!\n");
8377
555f6c78 8378 /* Set multi queue mode */
8badd27a
EG
8379 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8380 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8381 printk(KERN_ERR PFX
8badd27a 8382 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8383 multi_mode = ETH_RSS_MODE_DISABLED;
8384 }
8385 bp->multi_mode = multi_mode;
8386
8387
7a9b2557
VZ
8388 /* Set TPA flags */
8389 if (disable_tpa) {
8390 bp->flags &= ~TPA_ENABLE_FLAG;
8391 bp->dev->features &= ~NETIF_F_LRO;
8392 } else {
8393 bp->flags |= TPA_ENABLE_FLAG;
8394 bp->dev->features |= NETIF_F_LRO;
8395 }
8396
8d5726c4 8397 bp->mrrs = mrrs;
7a9b2557 8398
34f80b04
EG
8399 bp->tx_ring_size = MAX_TX_AVAIL;
8400 bp->rx_ring_size = MAX_RX_AVAIL;
8401
8402 bp->rx_csum = 1;
34f80b04
EG
8403
8404 bp->tx_ticks = 50;
8405 bp->rx_ticks = 25;
8406
87942b46
EG
8407 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8408 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8409
8410 init_timer(&bp->timer);
8411 bp->timer.expires = jiffies + bp->current_interval;
8412 bp->timer.data = (unsigned long) bp;
8413 bp->timer.function = bnx2x_timer;
8414
8415 return rc;
a2fbb9ea
ET
8416}
8417
8418/*
8419 * ethtool service functions
8420 */
8421
8422/* All ethtool functions called with rtnl_lock */
8423
8424static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8425{
8426 struct bnx2x *bp = netdev_priv(dev);
8427
34f80b04
EG
8428 cmd->supported = bp->port.supported;
8429 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8430
8431 if (netif_carrier_ok(dev)) {
c18487ee
YR
8432 cmd->speed = bp->link_vars.line_speed;
8433 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8434 } else {
c18487ee
YR
8435 cmd->speed = bp->link_params.req_line_speed;
8436 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8437 }
34f80b04
EG
8438 if (IS_E1HMF(bp)) {
8439 u16 vn_max_rate;
8440
8441 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8442 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8443 if (vn_max_rate < cmd->speed)
8444 cmd->speed = vn_max_rate;
8445 }
a2fbb9ea 8446
c18487ee
YR
8447 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8448 u32 ext_phy_type =
8449 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8450
8451 switch (ext_phy_type) {
8452 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8453 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8454 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8455 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8457 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8458 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8459 cmd->port = PORT_FIBRE;
8460 break;
8461
8462 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8463 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8464 cmd->port = PORT_TP;
8465 break;
8466
c18487ee
YR
8467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8468 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8469 bp->link_params.ext_phy_config);
8470 break;
8471
f1410647
ET
8472 default:
8473 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8474 bp->link_params.ext_phy_config);
8475 break;
f1410647
ET
8476 }
8477 } else
a2fbb9ea 8478 cmd->port = PORT_TP;
a2fbb9ea 8479
34f80b04 8480 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8481 cmd->transceiver = XCVR_INTERNAL;
8482
c18487ee 8483 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8484 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8485 else
a2fbb9ea 8486 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8487
8488 cmd->maxtxpkt = 0;
8489 cmd->maxrxpkt = 0;
8490
8491 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8492 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8493 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8494 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8495 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8496 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8497 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8498
8499 return 0;
8500}
8501
8502static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8503{
8504 struct bnx2x *bp = netdev_priv(dev);
8505 u32 advertising;
8506
34f80b04
EG
8507 if (IS_E1HMF(bp))
8508 return 0;
8509
a2fbb9ea
ET
8510 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8511 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8512 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8513 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8514 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8515 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8516 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8517
a2fbb9ea 8518 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8519 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8520 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8521 return -EINVAL;
f1410647 8522 }
a2fbb9ea
ET
8523
8524 /* advertise the requested speed and duplex if supported */
34f80b04 8525 cmd->advertising &= bp->port.supported;
a2fbb9ea 8526
c18487ee
YR
8527 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8528 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8529 bp->port.advertising |= (ADVERTISED_Autoneg |
8530 cmd->advertising);
a2fbb9ea
ET
8531
8532 } else { /* forced speed */
8533 /* advertise the requested speed and duplex if supported */
8534 switch (cmd->speed) {
8535 case SPEED_10:
8536 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8537 if (!(bp->port.supported &
f1410647
ET
8538 SUPPORTED_10baseT_Full)) {
8539 DP(NETIF_MSG_LINK,
8540 "10M full not supported\n");
a2fbb9ea 8541 return -EINVAL;
f1410647 8542 }
a2fbb9ea
ET
8543
8544 advertising = (ADVERTISED_10baseT_Full |
8545 ADVERTISED_TP);
8546 } else {
34f80b04 8547 if (!(bp->port.supported &
f1410647
ET
8548 SUPPORTED_10baseT_Half)) {
8549 DP(NETIF_MSG_LINK,
8550 "10M half not supported\n");
a2fbb9ea 8551 return -EINVAL;
f1410647 8552 }
a2fbb9ea
ET
8553
8554 advertising = (ADVERTISED_10baseT_Half |
8555 ADVERTISED_TP);
8556 }
8557 break;
8558
8559 case SPEED_100:
8560 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8561 if (!(bp->port.supported &
f1410647
ET
8562 SUPPORTED_100baseT_Full)) {
8563 DP(NETIF_MSG_LINK,
8564 "100M full not supported\n");
a2fbb9ea 8565 return -EINVAL;
f1410647 8566 }
a2fbb9ea
ET
8567
8568 advertising = (ADVERTISED_100baseT_Full |
8569 ADVERTISED_TP);
8570 } else {
34f80b04 8571 if (!(bp->port.supported &
f1410647
ET
8572 SUPPORTED_100baseT_Half)) {
8573 DP(NETIF_MSG_LINK,
8574 "100M half not supported\n");
a2fbb9ea 8575 return -EINVAL;
f1410647 8576 }
a2fbb9ea
ET
8577
8578 advertising = (ADVERTISED_100baseT_Half |
8579 ADVERTISED_TP);
8580 }
8581 break;
8582
8583 case SPEED_1000:
f1410647
ET
8584 if (cmd->duplex != DUPLEX_FULL) {
8585 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8586 return -EINVAL;
f1410647 8587 }
a2fbb9ea 8588
34f80b04 8589 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8590 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8591 return -EINVAL;
f1410647 8592 }
a2fbb9ea
ET
8593
8594 advertising = (ADVERTISED_1000baseT_Full |
8595 ADVERTISED_TP);
8596 break;
8597
8598 case SPEED_2500:
f1410647
ET
8599 if (cmd->duplex != DUPLEX_FULL) {
8600 DP(NETIF_MSG_LINK,
8601 "2.5G half not supported\n");
a2fbb9ea 8602 return -EINVAL;
f1410647 8603 }
a2fbb9ea 8604
34f80b04 8605 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8606 DP(NETIF_MSG_LINK,
8607 "2.5G full not supported\n");
a2fbb9ea 8608 return -EINVAL;
f1410647 8609 }
a2fbb9ea 8610
f1410647 8611 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8612 ADVERTISED_TP);
8613 break;
8614
8615 case SPEED_10000:
f1410647
ET
8616 if (cmd->duplex != DUPLEX_FULL) {
8617 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8618 return -EINVAL;
f1410647 8619 }
a2fbb9ea 8620
34f80b04 8621 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8622 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8623 return -EINVAL;
f1410647 8624 }
a2fbb9ea
ET
8625
8626 advertising = (ADVERTISED_10000baseT_Full |
8627 ADVERTISED_FIBRE);
8628 break;
8629
8630 default:
f1410647 8631 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8632 return -EINVAL;
8633 }
8634
c18487ee
YR
8635 bp->link_params.req_line_speed = cmd->speed;
8636 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8637 bp->port.advertising = advertising;
a2fbb9ea
ET
8638 }
8639
c18487ee 8640 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8641 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8642 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8643 bp->port.advertising);
a2fbb9ea 8644
34f80b04 8645 if (netif_running(dev)) {
bb2a0f7a 8646 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8647 bnx2x_link_set(bp);
8648 }
a2fbb9ea
ET
8649
8650 return 0;
8651}
8652
c18487ee
YR
8653#define PHY_FW_VER_LEN 10
8654
a2fbb9ea
ET
8655static void bnx2x_get_drvinfo(struct net_device *dev,
8656 struct ethtool_drvinfo *info)
8657{
8658 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8659 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8660
8661 strcpy(info->driver, DRV_MODULE_NAME);
8662 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8663
8664 phy_fw_ver[0] = '\0';
34f80b04 8665 if (bp->port.pmf) {
4a37fb66 8666 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8667 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8668 (bp->state != BNX2X_STATE_CLOSED),
8669 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8670 bnx2x_release_phy_lock(bp);
34f80b04 8671 }
c18487ee 8672
f0e53a84
EG
8673 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8674 (bp->common.bc_ver & 0xff0000) >> 16,
8675 (bp->common.bc_ver & 0xff00) >> 8,
8676 (bp->common.bc_ver & 0xff),
8677 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8678 strcpy(info->bus_info, pci_name(bp->pdev));
8679 info->n_stats = BNX2X_NUM_STATS;
8680 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8681 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8682 info->regdump_len = 0;
8683}
8684
0a64ea57
EG
8685#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8686#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8687
8688static int bnx2x_get_regs_len(struct net_device *dev)
8689{
8690 static u32 regdump_len;
8691 struct bnx2x *bp = netdev_priv(dev);
8692 int i;
8693
8694 if (regdump_len)
8695 return regdump_len;
8696
8697 if (CHIP_IS_E1(bp)) {
8698 for (i = 0; i < REGS_COUNT; i++)
8699 if (IS_E1_ONLINE(reg_addrs[i].info))
8700 regdump_len += reg_addrs[i].size;
8701
8702 for (i = 0; i < WREGS_COUNT_E1; i++)
8703 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8704 regdump_len += wreg_addrs_e1[i].size *
8705 (1 + wreg_addrs_e1[i].read_regs_count);
8706
8707 } else { /* E1H */
8708 for (i = 0; i < REGS_COUNT; i++)
8709 if (IS_E1H_ONLINE(reg_addrs[i].info))
8710 regdump_len += reg_addrs[i].size;
8711
8712 for (i = 0; i < WREGS_COUNT_E1H; i++)
8713 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8714 regdump_len += wreg_addrs_e1h[i].size *
8715 (1 + wreg_addrs_e1h[i].read_regs_count);
8716 }
8717 regdump_len *= 4;
8718 regdump_len += sizeof(struct dump_hdr);
8719
8720 return regdump_len;
8721}
8722
8723static void bnx2x_get_regs(struct net_device *dev,
8724 struct ethtool_regs *regs, void *_p)
8725{
8726 u32 *p = _p, i, j;
8727 struct bnx2x *bp = netdev_priv(dev);
8728 struct dump_hdr dump_hdr = {0};
8729
8730 regs->version = 0;
8731 memset(p, 0, regs->len);
8732
8733 if (!netif_running(bp->dev))
8734 return;
8735
8736 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8737 dump_hdr.dump_sign = dump_sign_all;
8738 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8739 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8740 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8741 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8742 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8743
8744 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8745 p += dump_hdr.hdr_size + 1;
8746
8747 if (CHIP_IS_E1(bp)) {
8748 for (i = 0; i < REGS_COUNT; i++)
8749 if (IS_E1_ONLINE(reg_addrs[i].info))
8750 for (j = 0; j < reg_addrs[i].size; j++)
8751 *p++ = REG_RD(bp,
8752 reg_addrs[i].addr + j*4);
8753
8754 } else { /* E1H */
8755 for (i = 0; i < REGS_COUNT; i++)
8756 if (IS_E1H_ONLINE(reg_addrs[i].info))
8757 for (j = 0; j < reg_addrs[i].size; j++)
8758 *p++ = REG_RD(bp,
8759 reg_addrs[i].addr + j*4);
8760 }
8761}
8762
a2fbb9ea
ET
8763static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8764{
8765 struct bnx2x *bp = netdev_priv(dev);
8766
8767 if (bp->flags & NO_WOL_FLAG) {
8768 wol->supported = 0;
8769 wol->wolopts = 0;
8770 } else {
8771 wol->supported = WAKE_MAGIC;
8772 if (bp->wol)
8773 wol->wolopts = WAKE_MAGIC;
8774 else
8775 wol->wolopts = 0;
8776 }
8777 memset(&wol->sopass, 0, sizeof(wol->sopass));
8778}
8779
8780static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8781{
8782 struct bnx2x *bp = netdev_priv(dev);
8783
8784 if (wol->wolopts & ~WAKE_MAGIC)
8785 return -EINVAL;
8786
8787 if (wol->wolopts & WAKE_MAGIC) {
8788 if (bp->flags & NO_WOL_FLAG)
8789 return -EINVAL;
8790
8791 bp->wol = 1;
34f80b04 8792 } else
a2fbb9ea 8793 bp->wol = 0;
34f80b04 8794
a2fbb9ea
ET
8795 return 0;
8796}
8797
8798static u32 bnx2x_get_msglevel(struct net_device *dev)
8799{
8800 struct bnx2x *bp = netdev_priv(dev);
8801
8802 return bp->msglevel;
8803}
8804
8805static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8806{
8807 struct bnx2x *bp = netdev_priv(dev);
8808
8809 if (capable(CAP_NET_ADMIN))
8810 bp->msglevel = level;
8811}
8812
8813static int bnx2x_nway_reset(struct net_device *dev)
8814{
8815 struct bnx2x *bp = netdev_priv(dev);
8816
34f80b04
EG
8817 if (!bp->port.pmf)
8818 return 0;
a2fbb9ea 8819
34f80b04 8820 if (netif_running(dev)) {
bb2a0f7a 8821 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8822 bnx2x_link_set(bp);
8823 }
a2fbb9ea
ET
8824
8825 return 0;
8826}
8827
01e53298
NO
8828static u32
8829bnx2x_get_link(struct net_device *dev)
8830{
8831 struct bnx2x *bp = netdev_priv(dev);
8832
8833 return bp->link_vars.link_up;
8834}
8835
a2fbb9ea
ET
8836static int bnx2x_get_eeprom_len(struct net_device *dev)
8837{
8838 struct bnx2x *bp = netdev_priv(dev);
8839
34f80b04 8840 return bp->common.flash_size;
a2fbb9ea
ET
8841}
8842
8843static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8844{
34f80b04 8845 int port = BP_PORT(bp);
a2fbb9ea
ET
8846 int count, i;
8847 u32 val = 0;
8848
8849 /* adjust timeout for emulation/FPGA */
8850 count = NVRAM_TIMEOUT_COUNT;
8851 if (CHIP_REV_IS_SLOW(bp))
8852 count *= 100;
8853
8854 /* request access to nvram interface */
8855 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8856 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8857
8858 for (i = 0; i < count*10; i++) {
8859 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8860 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8861 break;
8862
8863 udelay(5);
8864 }
8865
8866 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8867 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8868 return -EBUSY;
8869 }
8870
8871 return 0;
8872}
8873
8874static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8875{
34f80b04 8876 int port = BP_PORT(bp);
a2fbb9ea
ET
8877 int count, i;
8878 u32 val = 0;
8879
8880 /* adjust timeout for emulation/FPGA */
8881 count = NVRAM_TIMEOUT_COUNT;
8882 if (CHIP_REV_IS_SLOW(bp))
8883 count *= 100;
8884
8885 /* relinquish nvram interface */
8886 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8887 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8888
8889 for (i = 0; i < count*10; i++) {
8890 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8891 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8892 break;
8893
8894 udelay(5);
8895 }
8896
8897 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8898 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8899 return -EBUSY;
8900 }
8901
8902 return 0;
8903}
8904
8905static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8906{
8907 u32 val;
8908
8909 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8910
8911 /* enable both bits, even on read */
8912 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8913 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8914 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8915}
8916
8917static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8918{
8919 u32 val;
8920
8921 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8922
8923 /* disable both bits, even after read */
8924 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8925 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8926 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8927}
8928
4781bfad 8929static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8930 u32 cmd_flags)
8931{
f1410647 8932 int count, i, rc;
a2fbb9ea
ET
8933 u32 val;
8934
8935 /* build the command word */
8936 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8937
8938 /* need to clear DONE bit separately */
8939 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8940
8941 /* address of the NVRAM to read from */
8942 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8943 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8944
8945 /* issue a read command */
8946 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8947
8948 /* adjust timeout for emulation/FPGA */
8949 count = NVRAM_TIMEOUT_COUNT;
8950 if (CHIP_REV_IS_SLOW(bp))
8951 count *= 100;
8952
8953 /* wait for completion */
8954 *ret_val = 0;
8955 rc = -EBUSY;
8956 for (i = 0; i < count; i++) {
8957 udelay(5);
8958 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8959
8960 if (val & MCPR_NVM_COMMAND_DONE) {
8961 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8962 /* we read nvram data in cpu order
8963 * but ethtool sees it as an array of bytes
8964 * converting to big-endian will do the work */
4781bfad 8965 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8966 rc = 0;
8967 break;
8968 }
8969 }
8970
8971 return rc;
8972}
8973
8974static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8975 int buf_size)
8976{
8977 int rc;
8978 u32 cmd_flags;
4781bfad 8979 __be32 val;
a2fbb9ea
ET
8980
8981 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8982 DP(BNX2X_MSG_NVM,
c14423fe 8983 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8984 offset, buf_size);
8985 return -EINVAL;
8986 }
8987
34f80b04
EG
8988 if (offset + buf_size > bp->common.flash_size) {
8989 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8990 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8991 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8992 return -EINVAL;
8993 }
8994
8995 /* request access to nvram interface */
8996 rc = bnx2x_acquire_nvram_lock(bp);
8997 if (rc)
8998 return rc;
8999
9000 /* enable access to nvram interface */
9001 bnx2x_enable_nvram_access(bp);
9002
9003 /* read the first word(s) */
9004 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9005 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9006 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9007 memcpy(ret_buf, &val, 4);
9008
9009 /* advance to the next dword */
9010 offset += sizeof(u32);
9011 ret_buf += sizeof(u32);
9012 buf_size -= sizeof(u32);
9013 cmd_flags = 0;
9014 }
9015
9016 if (rc == 0) {
9017 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9018 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9019 memcpy(ret_buf, &val, 4);
9020 }
9021
9022 /* disable access to nvram interface */
9023 bnx2x_disable_nvram_access(bp);
9024 bnx2x_release_nvram_lock(bp);
9025
9026 return rc;
9027}
9028
9029static int bnx2x_get_eeprom(struct net_device *dev,
9030 struct ethtool_eeprom *eeprom, u8 *eebuf)
9031{
9032 struct bnx2x *bp = netdev_priv(dev);
9033 int rc;
9034
2add3acb
EG
9035 if (!netif_running(dev))
9036 return -EAGAIN;
9037
34f80b04 9038 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9039 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9040 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9041 eeprom->len, eeprom->len);
9042
9043 /* parameters already validated in ethtool_get_eeprom */
9044
9045 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9046
9047 return rc;
9048}
9049
9050static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9051 u32 cmd_flags)
9052{
f1410647 9053 int count, i, rc;
a2fbb9ea
ET
9054
9055 /* build the command word */
9056 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9057
9058 /* need to clear DONE bit separately */
9059 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9060
9061 /* write the data */
9062 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9063
9064 /* address of the NVRAM to write to */
9065 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9066 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9067
9068 /* issue the write command */
9069 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9070
9071 /* adjust timeout for emulation/FPGA */
9072 count = NVRAM_TIMEOUT_COUNT;
9073 if (CHIP_REV_IS_SLOW(bp))
9074 count *= 100;
9075
9076 /* wait for completion */
9077 rc = -EBUSY;
9078 for (i = 0; i < count; i++) {
9079 udelay(5);
9080 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9081 if (val & MCPR_NVM_COMMAND_DONE) {
9082 rc = 0;
9083 break;
9084 }
9085 }
9086
9087 return rc;
9088}
9089
f1410647 9090#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9091
9092static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9093 int buf_size)
9094{
9095 int rc;
9096 u32 cmd_flags;
9097 u32 align_offset;
4781bfad 9098 __be32 val;
a2fbb9ea 9099
34f80b04
EG
9100 if (offset + buf_size > bp->common.flash_size) {
9101 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9102 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9103 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9104 return -EINVAL;
9105 }
9106
9107 /* request access to nvram interface */
9108 rc = bnx2x_acquire_nvram_lock(bp);
9109 if (rc)
9110 return rc;
9111
9112 /* enable access to nvram interface */
9113 bnx2x_enable_nvram_access(bp);
9114
9115 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9116 align_offset = (offset & ~0x03);
9117 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9118
9119 if (rc == 0) {
9120 val &= ~(0xff << BYTE_OFFSET(offset));
9121 val |= (*data_buf << BYTE_OFFSET(offset));
9122
9123 /* nvram data is returned as an array of bytes
9124 * convert it back to cpu order */
9125 val = be32_to_cpu(val);
9126
a2fbb9ea
ET
9127 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9128 cmd_flags);
9129 }
9130
9131 /* disable access to nvram interface */
9132 bnx2x_disable_nvram_access(bp);
9133 bnx2x_release_nvram_lock(bp);
9134
9135 return rc;
9136}
9137
9138static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9139 int buf_size)
9140{
9141 int rc;
9142 u32 cmd_flags;
9143 u32 val;
9144 u32 written_so_far;
9145
34f80b04 9146 if (buf_size == 1) /* ethtool */
a2fbb9ea 9147 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9148
9149 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9150 DP(BNX2X_MSG_NVM,
c14423fe 9151 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9152 offset, buf_size);
9153 return -EINVAL;
9154 }
9155
34f80b04
EG
9156 if (offset + buf_size > bp->common.flash_size) {
9157 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9158 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9159 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9160 return -EINVAL;
9161 }
9162
9163 /* request access to nvram interface */
9164 rc = bnx2x_acquire_nvram_lock(bp);
9165 if (rc)
9166 return rc;
9167
9168 /* enable access to nvram interface */
9169 bnx2x_enable_nvram_access(bp);
9170
9171 written_so_far = 0;
9172 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9173 while ((written_so_far < buf_size) && (rc == 0)) {
9174 if (written_so_far == (buf_size - sizeof(u32)))
9175 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9176 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9177 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9178 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9179 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9180
9181 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9182
9183 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9184
9185 /* advance to the next dword */
9186 offset += sizeof(u32);
9187 data_buf += sizeof(u32);
9188 written_so_far += sizeof(u32);
9189 cmd_flags = 0;
9190 }
9191
9192 /* disable access to nvram interface */
9193 bnx2x_disable_nvram_access(bp);
9194 bnx2x_release_nvram_lock(bp);
9195
9196 return rc;
9197}
9198
9199static int bnx2x_set_eeprom(struct net_device *dev,
9200 struct ethtool_eeprom *eeprom, u8 *eebuf)
9201{
9202 struct bnx2x *bp = netdev_priv(dev);
9203 int rc;
9204
9f4c9583
EG
9205 if (!netif_running(dev))
9206 return -EAGAIN;
9207
34f80b04 9208 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9209 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9210 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9211 eeprom->len, eeprom->len);
9212
9213 /* parameters already validated in ethtool_set_eeprom */
9214
c18487ee 9215 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
9216 if (eeprom->magic == 0x00504859)
9217 if (bp->port.pmf) {
9218
4a37fb66 9219 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9220 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9221 bp->link_params.ext_phy_config,
9222 (bp->state != BNX2X_STATE_CLOSED),
9223 eebuf, eeprom->len);
bb2a0f7a
YG
9224 if ((bp->state == BNX2X_STATE_OPEN) ||
9225 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9226 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9227 &bp->link_vars, 1);
34f80b04
EG
9228 rc |= bnx2x_phy_init(&bp->link_params,
9229 &bp->link_vars);
bb2a0f7a 9230 }
4a37fb66 9231 bnx2x_release_phy_lock(bp);
34f80b04
EG
9232
9233 } else /* Only the PMF can access the PHY */
9234 return -EINVAL;
9235 else
c18487ee 9236 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9237
9238 return rc;
9239}
9240
9241static int bnx2x_get_coalesce(struct net_device *dev,
9242 struct ethtool_coalesce *coal)
9243{
9244 struct bnx2x *bp = netdev_priv(dev);
9245
9246 memset(coal, 0, sizeof(struct ethtool_coalesce));
9247
9248 coal->rx_coalesce_usecs = bp->rx_ticks;
9249 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9250
9251 return 0;
9252}
9253
ca00392c 9254#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9255static int bnx2x_set_coalesce(struct net_device *dev,
9256 struct ethtool_coalesce *coal)
9257{
9258 struct bnx2x *bp = netdev_priv(dev);
9259
9260 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9261 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9262 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9263
9264 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9265 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9266 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9267
34f80b04 9268 if (netif_running(dev))
a2fbb9ea
ET
9269 bnx2x_update_coalesce(bp);
9270
9271 return 0;
9272}
9273
9274static void bnx2x_get_ringparam(struct net_device *dev,
9275 struct ethtool_ringparam *ering)
9276{
9277 struct bnx2x *bp = netdev_priv(dev);
9278
9279 ering->rx_max_pending = MAX_RX_AVAIL;
9280 ering->rx_mini_max_pending = 0;
9281 ering->rx_jumbo_max_pending = 0;
9282
9283 ering->rx_pending = bp->rx_ring_size;
9284 ering->rx_mini_pending = 0;
9285 ering->rx_jumbo_pending = 0;
9286
9287 ering->tx_max_pending = MAX_TX_AVAIL;
9288 ering->tx_pending = bp->tx_ring_size;
9289}
9290
9291static int bnx2x_set_ringparam(struct net_device *dev,
9292 struct ethtool_ringparam *ering)
9293{
9294 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9295 int rc = 0;
a2fbb9ea
ET
9296
9297 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9298 (ering->tx_pending > MAX_TX_AVAIL) ||
9299 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9300 return -EINVAL;
9301
9302 bp->rx_ring_size = ering->rx_pending;
9303 bp->tx_ring_size = ering->tx_pending;
9304
34f80b04
EG
9305 if (netif_running(dev)) {
9306 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9307 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9308 }
9309
34f80b04 9310 return rc;
a2fbb9ea
ET
9311}
9312
9313static void bnx2x_get_pauseparam(struct net_device *dev,
9314 struct ethtool_pauseparam *epause)
9315{
9316 struct bnx2x *bp = netdev_priv(dev);
9317
356e2385
EG
9318 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9319 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9320 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9321
c0700f90
DM
9322 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9323 BNX2X_FLOW_CTRL_RX);
9324 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9325 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9326
9327 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9328 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9329 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9330}
9331
9332static int bnx2x_set_pauseparam(struct net_device *dev,
9333 struct ethtool_pauseparam *epause)
9334{
9335 struct bnx2x *bp = netdev_priv(dev);
9336
34f80b04
EG
9337 if (IS_E1HMF(bp))
9338 return 0;
9339
a2fbb9ea
ET
9340 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9341 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9342 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9343
c0700f90 9344 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9345
f1410647 9346 if (epause->rx_pause)
c0700f90 9347 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9348
f1410647 9349 if (epause->tx_pause)
c0700f90 9350 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9351
c0700f90
DM
9352 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9353 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9354
c18487ee 9355 if (epause->autoneg) {
34f80b04 9356 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9357 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9358 return -EINVAL;
9359 }
a2fbb9ea 9360
c18487ee 9361 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9362 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9363 }
a2fbb9ea 9364
c18487ee
YR
9365 DP(NETIF_MSG_LINK,
9366 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9367
9368 if (netif_running(dev)) {
bb2a0f7a 9369 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9370 bnx2x_link_set(bp);
9371 }
a2fbb9ea
ET
9372
9373 return 0;
9374}
9375
df0f2343
VZ
9376static int bnx2x_set_flags(struct net_device *dev, u32 data)
9377{
9378 struct bnx2x *bp = netdev_priv(dev);
9379 int changed = 0;
9380 int rc = 0;
9381
9382 /* TPA requires Rx CSUM offloading */
9383 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9384 if (!(dev->features & NETIF_F_LRO)) {
9385 dev->features |= NETIF_F_LRO;
9386 bp->flags |= TPA_ENABLE_FLAG;
9387 changed = 1;
9388 }
9389
9390 } else if (dev->features & NETIF_F_LRO) {
9391 dev->features &= ~NETIF_F_LRO;
9392 bp->flags &= ~TPA_ENABLE_FLAG;
9393 changed = 1;
9394 }
9395
9396 if (changed && netif_running(dev)) {
9397 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9398 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9399 }
9400
9401 return rc;
9402}
9403
a2fbb9ea
ET
9404static u32 bnx2x_get_rx_csum(struct net_device *dev)
9405{
9406 struct bnx2x *bp = netdev_priv(dev);
9407
9408 return bp->rx_csum;
9409}
9410
9411static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9412{
9413 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9414 int rc = 0;
a2fbb9ea
ET
9415
9416 bp->rx_csum = data;
df0f2343
VZ
9417
9418 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9419 TPA'ed packets will be discarded due to wrong TCP CSUM */
9420 if (!data) {
9421 u32 flags = ethtool_op_get_flags(dev);
9422
9423 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9424 }
9425
9426 return rc;
a2fbb9ea
ET
9427}
9428
9429static int bnx2x_set_tso(struct net_device *dev, u32 data)
9430{
755735eb 9431 if (data) {
a2fbb9ea 9432 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9433 dev->features |= NETIF_F_TSO6;
9434 } else {
a2fbb9ea 9435 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9436 dev->features &= ~NETIF_F_TSO6;
9437 }
9438
a2fbb9ea
ET
9439 return 0;
9440}
9441
f3c87cdd 9442static const struct {
a2fbb9ea
ET
9443 char string[ETH_GSTRING_LEN];
9444} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9445 { "register_test (offline)" },
9446 { "memory_test (offline)" },
9447 { "loopback_test (offline)" },
9448 { "nvram_test (online)" },
9449 { "interrupt_test (online)" },
9450 { "link_test (online)" },
d3d4f495 9451 { "idle check (online)" }
a2fbb9ea
ET
9452};
9453
9454static int bnx2x_self_test_count(struct net_device *dev)
9455{
9456 return BNX2X_NUM_TESTS;
9457}
9458
f3c87cdd
YG
9459static int bnx2x_test_registers(struct bnx2x *bp)
9460{
9461 int idx, i, rc = -ENODEV;
9462 u32 wr_val = 0;
9dabc424 9463 int port = BP_PORT(bp);
f3c87cdd
YG
9464 static const struct {
9465 u32 offset0;
9466 u32 offset1;
9467 u32 mask;
9468 } reg_tbl[] = {
9469/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9470 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9471 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9472 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9473 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9474 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9475 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9476 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9477 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9478 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9479/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9480 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9481 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9482 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9483 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9484 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9485 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9486 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9487 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9488 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9489/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9490 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9491 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9492 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9493 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9494 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9495 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9496 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9497 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9498 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9499/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9500 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9501 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9502 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9503 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9504 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9505 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9506
9507 { 0xffffffff, 0, 0x00000000 }
9508 };
9509
9510 if (!netif_running(bp->dev))
9511 return rc;
9512
9513 /* Repeat the test twice:
9514 First by writing 0x00000000, second by writing 0xffffffff */
9515 for (idx = 0; idx < 2; idx++) {
9516
9517 switch (idx) {
9518 case 0:
9519 wr_val = 0;
9520 break;
9521 case 1:
9522 wr_val = 0xffffffff;
9523 break;
9524 }
9525
9526 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9527 u32 offset, mask, save_val, val;
f3c87cdd
YG
9528
9529 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9530 mask = reg_tbl[i].mask;
9531
9532 save_val = REG_RD(bp, offset);
9533
9534 REG_WR(bp, offset, wr_val);
9535 val = REG_RD(bp, offset);
9536
9537 /* Restore the original register's value */
9538 REG_WR(bp, offset, save_val);
9539
9540 /* verify that value is as expected value */
9541 if ((val & mask) != (wr_val & mask))
9542 goto test_reg_exit;
9543 }
9544 }
9545
9546 rc = 0;
9547
9548test_reg_exit:
9549 return rc;
9550}
9551
9552static int bnx2x_test_memory(struct bnx2x *bp)
9553{
9554 int i, j, rc = -ENODEV;
9555 u32 val;
9556 static const struct {
9557 u32 offset;
9558 int size;
9559 } mem_tbl[] = {
9560 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9561 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9562 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9563 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9564 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9565 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9566 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9567
9568 { 0xffffffff, 0 }
9569 };
9570 static const struct {
9571 char *name;
9572 u32 offset;
9dabc424
YG
9573 u32 e1_mask;
9574 u32 e1h_mask;
f3c87cdd 9575 } prty_tbl[] = {
9dabc424
YG
9576 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9577 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9578 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9579 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9580 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9581 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9582
9583 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9584 };
9585
9586 if (!netif_running(bp->dev))
9587 return rc;
9588
9589 /* Go through all the memories */
9590 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9591 for (j = 0; j < mem_tbl[i].size; j++)
9592 REG_RD(bp, mem_tbl[i].offset + j*4);
9593
9594 /* Check the parity status */
9595 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9596 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9597 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9598 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9599 DP(NETIF_MSG_HW,
9600 "%s is 0x%x\n", prty_tbl[i].name, val);
9601 goto test_mem_exit;
9602 }
9603 }
9604
9605 rc = 0;
9606
9607test_mem_exit:
9608 return rc;
9609}
9610
f3c87cdd
YG
9611static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9612{
9613 int cnt = 1000;
9614
9615 if (link_up)
9616 while (bnx2x_link_test(bp) && cnt--)
9617 msleep(10);
9618}
9619
9620static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9621{
9622 unsigned int pkt_size, num_pkts, i;
9623 struct sk_buff *skb;
9624 unsigned char *packet;
ca00392c
EG
9625 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9626 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
9627 u16 tx_start_idx, tx_idx;
9628 u16 rx_start_idx, rx_idx;
ca00392c 9629 u16 pkt_prod, bd_prod;
f3c87cdd 9630 struct sw_tx_bd *tx_buf;
ca00392c
EG
9631 struct eth_tx_start_bd *tx_start_bd;
9632 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
9633 dma_addr_t mapping;
9634 union eth_rx_cqe *cqe;
9635 u8 cqe_fp_flags;
9636 struct sw_rx_bd *rx_buf;
9637 u16 len;
9638 int rc = -ENODEV;
9639
b5bf9068
EG
9640 /* check the loopback mode */
9641 switch (loopback_mode) {
9642 case BNX2X_PHY_LOOPBACK:
9643 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9644 return -EINVAL;
9645 break;
9646 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9647 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9648 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9649 break;
9650 default:
f3c87cdd 9651 return -EINVAL;
b5bf9068 9652 }
f3c87cdd 9653
b5bf9068
EG
9654 /* prepare the loopback packet */
9655 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9656 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9657 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9658 if (!skb) {
9659 rc = -ENOMEM;
9660 goto test_loopback_exit;
9661 }
9662 packet = skb_put(skb, pkt_size);
9663 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
9664 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9665 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
9666 for (i = ETH_HLEN; i < pkt_size; i++)
9667 packet[i] = (unsigned char) (i & 0xff);
9668
b5bf9068 9669 /* send the loopback packet */
f3c87cdd 9670 num_pkts = 0;
ca00392c
EG
9671 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9672 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 9673
ca00392c
EG
9674 pkt_prod = fp_tx->tx_pkt_prod++;
9675 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9676 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 9677 tx_buf->skb = skb;
ca00392c 9678 tx_buf->flags = 0;
f3c87cdd 9679
ca00392c
EG
9680 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9681 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
9682 mapping = pci_map_single(bp->pdev, skb->data,
9683 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
9684 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9685 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9686 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9687 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9688 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
9689 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9690 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
9691 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9692
9693 /* turn on parsing and get a BD */
9694 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9695 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
9696
9697 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 9698
58f4c4cf
EG
9699 wmb();
9700
ca00392c
EG
9701 fp_tx->tx_db.data.prod += 2;
9702 barrier();
9703 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
9704
9705 mmiowb();
9706
9707 num_pkts++;
ca00392c 9708 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
9709 bp->dev->trans_start = jiffies;
9710
9711 udelay(100);
9712
ca00392c 9713 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
9714 if (tx_idx != tx_start_idx + num_pkts)
9715 goto test_loopback_exit;
9716
ca00392c 9717 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
9718 if (rx_idx != rx_start_idx + num_pkts)
9719 goto test_loopback_exit;
9720
ca00392c 9721 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
9722 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9723 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9724 goto test_loopback_rx_exit;
9725
9726 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9727 if (len != pkt_size)
9728 goto test_loopback_rx_exit;
9729
ca00392c 9730 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
9731 skb = rx_buf->skb;
9732 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9733 for (i = ETH_HLEN; i < pkt_size; i++)
9734 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9735 goto test_loopback_rx_exit;
9736
9737 rc = 0;
9738
9739test_loopback_rx_exit:
f3c87cdd 9740
ca00392c
EG
9741 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
9742 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
9743 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
9744 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
9745
9746 /* Update producers */
ca00392c
EG
9747 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
9748 fp_rx->rx_sge_prod);
f3c87cdd
YG
9749
9750test_loopback_exit:
9751 bp->link_params.loopback_mode = LOOPBACK_NONE;
9752
9753 return rc;
9754}
9755
9756static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9757{
b5bf9068 9758 int rc = 0, res;
f3c87cdd
YG
9759
9760 if (!netif_running(bp->dev))
9761 return BNX2X_LOOPBACK_FAILED;
9762
f8ef6e44 9763 bnx2x_netif_stop(bp, 1);
3910c8ae 9764 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9765
b5bf9068
EG
9766 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9767 if (res) {
9768 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9769 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9770 }
9771
b5bf9068
EG
9772 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9773 if (res) {
9774 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9775 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9776 }
9777
3910c8ae 9778 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9779 bnx2x_netif_start(bp);
9780
9781 return rc;
9782}
9783
9784#define CRC32_RESIDUAL 0xdebb20e3
9785
9786static int bnx2x_test_nvram(struct bnx2x *bp)
9787{
9788 static const struct {
9789 int offset;
9790 int size;
9791 } nvram_tbl[] = {
9792 { 0, 0x14 }, /* bootstrap */
9793 { 0x14, 0xec }, /* dir */
9794 { 0x100, 0x350 }, /* manuf_info */
9795 { 0x450, 0xf0 }, /* feature_info */
9796 { 0x640, 0x64 }, /* upgrade_key_info */
9797 { 0x6a4, 0x64 },
9798 { 0x708, 0x70 }, /* manuf_key_info */
9799 { 0x778, 0x70 },
9800 { 0, 0 }
9801 };
4781bfad 9802 __be32 buf[0x350 / 4];
f3c87cdd
YG
9803 u8 *data = (u8 *)buf;
9804 int i, rc;
9805 u32 magic, csum;
9806
9807 rc = bnx2x_nvram_read(bp, 0, data, 4);
9808 if (rc) {
f5372251 9809 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9810 goto test_nvram_exit;
9811 }
9812
9813 magic = be32_to_cpu(buf[0]);
9814 if (magic != 0x669955aa) {
9815 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9816 rc = -ENODEV;
9817 goto test_nvram_exit;
9818 }
9819
9820 for (i = 0; nvram_tbl[i].size; i++) {
9821
9822 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9823 nvram_tbl[i].size);
9824 if (rc) {
9825 DP(NETIF_MSG_PROBE,
f5372251 9826 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9827 goto test_nvram_exit;
9828 }
9829
9830 csum = ether_crc_le(nvram_tbl[i].size, data);
9831 if (csum != CRC32_RESIDUAL) {
9832 DP(NETIF_MSG_PROBE,
9833 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9834 rc = -ENODEV;
9835 goto test_nvram_exit;
9836 }
9837 }
9838
9839test_nvram_exit:
9840 return rc;
9841}
9842
9843static int bnx2x_test_intr(struct bnx2x *bp)
9844{
9845 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9846 int i, rc;
9847
9848 if (!netif_running(bp->dev))
9849 return -ENODEV;
9850
8d9c5f34 9851 config->hdr.length = 0;
af246401
EG
9852 if (CHIP_IS_E1(bp))
9853 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9854 else
9855 config->hdr.offset = BP_FUNC(bp);
0626b899 9856 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9857 config->hdr.reserved1 = 0;
9858
9859 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9860 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9861 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9862 if (rc == 0) {
9863 bp->set_mac_pending++;
9864 for (i = 0; i < 10; i++) {
9865 if (!bp->set_mac_pending)
9866 break;
9867 msleep_interruptible(10);
9868 }
9869 if (i == 10)
9870 rc = -ENODEV;
9871 }
9872
9873 return rc;
9874}
9875
a2fbb9ea
ET
9876static void bnx2x_self_test(struct net_device *dev,
9877 struct ethtool_test *etest, u64 *buf)
9878{
9879 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9880
9881 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9882
f3c87cdd 9883 if (!netif_running(dev))
a2fbb9ea 9884 return;
a2fbb9ea 9885
33471629 9886 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9887 if (IS_E1HMF(bp))
9888 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9889
9890 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
9891 int port = BP_PORT(bp);
9892 u32 val;
f3c87cdd
YG
9893 u8 link_up;
9894
279abdf5
EG
9895 /* save current value of input enable for TX port IF */
9896 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9897 /* disable input for TX port IF */
9898 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9899
f3c87cdd
YG
9900 link_up = bp->link_vars.link_up;
9901 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9902 bnx2x_nic_load(bp, LOAD_DIAG);
9903 /* wait until link state is restored */
9904 bnx2x_wait_for_link(bp, link_up);
9905
9906 if (bnx2x_test_registers(bp) != 0) {
9907 buf[0] = 1;
9908 etest->flags |= ETH_TEST_FL_FAILED;
9909 }
9910 if (bnx2x_test_memory(bp) != 0) {
9911 buf[1] = 1;
9912 etest->flags |= ETH_TEST_FL_FAILED;
9913 }
9914 buf[2] = bnx2x_test_loopback(bp, link_up);
9915 if (buf[2] != 0)
9916 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9917
f3c87cdd 9918 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
9919
9920 /* restore input for TX port IF */
9921 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9922
f3c87cdd
YG
9923 bnx2x_nic_load(bp, LOAD_NORMAL);
9924 /* wait until link state is restored */
9925 bnx2x_wait_for_link(bp, link_up);
9926 }
9927 if (bnx2x_test_nvram(bp) != 0) {
9928 buf[3] = 1;
a2fbb9ea
ET
9929 etest->flags |= ETH_TEST_FL_FAILED;
9930 }
f3c87cdd
YG
9931 if (bnx2x_test_intr(bp) != 0) {
9932 buf[4] = 1;
9933 etest->flags |= ETH_TEST_FL_FAILED;
9934 }
9935 if (bp->port.pmf)
9936 if (bnx2x_link_test(bp) != 0) {
9937 buf[5] = 1;
9938 etest->flags |= ETH_TEST_FL_FAILED;
9939 }
f3c87cdd
YG
9940
9941#ifdef BNX2X_EXTRA_DEBUG
9942 bnx2x_panic_dump(bp);
9943#endif
a2fbb9ea
ET
9944}
9945
de832a55
EG
9946static const struct {
9947 long offset;
9948 int size;
9949 u8 string[ETH_GSTRING_LEN];
9950} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9951/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9952 { Q_STATS_OFFSET32(error_bytes_received_hi),
9953 8, "[%d]: rx_error_bytes" },
9954 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9955 8, "[%d]: rx_ucast_packets" },
9956 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9957 8, "[%d]: rx_mcast_packets" },
9958 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9959 8, "[%d]: rx_bcast_packets" },
9960 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9961 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9962 4, "[%d]: rx_phy_ip_err_discards"},
9963 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9964 4, "[%d]: rx_skb_alloc_discard" },
9965 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9966
9967/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9968 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9969 8, "[%d]: tx_packets" }
9970};
9971
bb2a0f7a
YG
9972static const struct {
9973 long offset;
9974 int size;
9975 u32 flags;
66e855f3
YG
9976#define STATS_FLAGS_PORT 1
9977#define STATS_FLAGS_FUNC 2
de832a55 9978#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9979 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9980} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9981/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9982 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9983 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9984 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9985 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9986 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9987 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9988 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9989 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9990 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9991 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9992 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9993 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9994 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9995 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9996 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9997 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9998 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9999/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10000 8, STATS_FLAGS_PORT, "rx_fragments" },
10001 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10002 8, STATS_FLAGS_PORT, "rx_jabbers" },
10003 { STATS_OFFSET32(no_buff_discard_hi),
10004 8, STATS_FLAGS_BOTH, "rx_discards" },
10005 { STATS_OFFSET32(mac_filter_discard),
10006 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10007 { STATS_OFFSET32(xxoverflow_discard),
10008 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10009 { STATS_OFFSET32(brb_drop_hi),
10010 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10011 { STATS_OFFSET32(brb_truncate_hi),
10012 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10013 { STATS_OFFSET32(pause_frames_received_hi),
10014 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10015 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10016 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10017 { STATS_OFFSET32(nig_timer_max),
10018 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10019/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10020 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10021 { STATS_OFFSET32(rx_skb_alloc_failed),
10022 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10023 { STATS_OFFSET32(hw_csum_err),
10024 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10025
10026 { STATS_OFFSET32(total_bytes_transmitted_hi),
10027 8, STATS_FLAGS_BOTH, "tx_bytes" },
10028 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10029 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10030 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10031 8, STATS_FLAGS_BOTH, "tx_packets" },
10032 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10033 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10034 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10035 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10036 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10037 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10038 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10039 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10040/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10041 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10042 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10043 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10044 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10045 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10046 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10047 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10048 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10049 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10050 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10051 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10052 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10053 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10054 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10055 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10056 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10057 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10058 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10059 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10060/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10061 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10062 { STATS_OFFSET32(pause_frames_sent_hi),
10063 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10064};
10065
de832a55
EG
10066#define IS_PORT_STAT(i) \
10067 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10068#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10069#define IS_E1HMF_MODE_STAT(bp) \
10070 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10071
a2fbb9ea
ET
10072static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10073{
bb2a0f7a 10074 struct bnx2x *bp = netdev_priv(dev);
de832a55 10075 int i, j, k;
bb2a0f7a 10076
a2fbb9ea
ET
10077 switch (stringset) {
10078 case ETH_SS_STATS:
de832a55
EG
10079 if (is_multi(bp)) {
10080 k = 0;
ca00392c 10081 for_each_rx_queue(bp, i) {
de832a55
EG
10082 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10083 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10084 bnx2x_q_stats_arr[j].string, i);
10085 k += BNX2X_NUM_Q_STATS;
10086 }
10087 if (IS_E1HMF_MODE_STAT(bp))
10088 break;
10089 for (j = 0; j < BNX2X_NUM_STATS; j++)
10090 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10091 bnx2x_stats_arr[j].string);
10092 } else {
10093 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10094 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10095 continue;
10096 strcpy(buf + j*ETH_GSTRING_LEN,
10097 bnx2x_stats_arr[i].string);
10098 j++;
10099 }
bb2a0f7a 10100 }
a2fbb9ea
ET
10101 break;
10102
10103 case ETH_SS_TEST:
10104 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10105 break;
10106 }
10107}
10108
10109static int bnx2x_get_stats_count(struct net_device *dev)
10110{
bb2a0f7a 10111 struct bnx2x *bp = netdev_priv(dev);
de832a55 10112 int i, num_stats;
bb2a0f7a 10113
de832a55 10114 if (is_multi(bp)) {
ca00392c 10115 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
de832a55
EG
10116 if (!IS_E1HMF_MODE_STAT(bp))
10117 num_stats += BNX2X_NUM_STATS;
10118 } else {
10119 if (IS_E1HMF_MODE_STAT(bp)) {
10120 num_stats = 0;
10121 for (i = 0; i < BNX2X_NUM_STATS; i++)
10122 if (IS_FUNC_STAT(i))
10123 num_stats++;
10124 } else
10125 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10126 }
de832a55 10127
bb2a0f7a 10128 return num_stats;
a2fbb9ea
ET
10129}
10130
10131static void bnx2x_get_ethtool_stats(struct net_device *dev,
10132 struct ethtool_stats *stats, u64 *buf)
10133{
10134 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10135 u32 *hw_stats, *offset;
10136 int i, j, k;
bb2a0f7a 10137
de832a55
EG
10138 if (is_multi(bp)) {
10139 k = 0;
ca00392c 10140 for_each_rx_queue(bp, i) {
de832a55
EG
10141 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10142 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10143 if (bnx2x_q_stats_arr[j].size == 0) {
10144 /* skip this counter */
10145 buf[k + j] = 0;
10146 continue;
10147 }
10148 offset = (hw_stats +
10149 bnx2x_q_stats_arr[j].offset);
10150 if (bnx2x_q_stats_arr[j].size == 4) {
10151 /* 4-byte counter */
10152 buf[k + j] = (u64) *offset;
10153 continue;
10154 }
10155 /* 8-byte counter */
10156 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10157 }
10158 k += BNX2X_NUM_Q_STATS;
10159 }
10160 if (IS_E1HMF_MODE_STAT(bp))
10161 return;
10162 hw_stats = (u32 *)&bp->eth_stats;
10163 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10164 if (bnx2x_stats_arr[j].size == 0) {
10165 /* skip this counter */
10166 buf[k + j] = 0;
10167 continue;
10168 }
10169 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10170 if (bnx2x_stats_arr[j].size == 4) {
10171 /* 4-byte counter */
10172 buf[k + j] = (u64) *offset;
10173 continue;
10174 }
10175 /* 8-byte counter */
10176 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10177 }
de832a55
EG
10178 } else {
10179 hw_stats = (u32 *)&bp->eth_stats;
10180 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10181 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10182 continue;
10183 if (bnx2x_stats_arr[i].size == 0) {
10184 /* skip this counter */
10185 buf[j] = 0;
10186 j++;
10187 continue;
10188 }
10189 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10190 if (bnx2x_stats_arr[i].size == 4) {
10191 /* 4-byte counter */
10192 buf[j] = (u64) *offset;
10193 j++;
10194 continue;
10195 }
10196 /* 8-byte counter */
10197 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10198 j++;
a2fbb9ea 10199 }
a2fbb9ea
ET
10200 }
10201}
10202
10203static int bnx2x_phys_id(struct net_device *dev, u32 data)
10204{
10205 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10206 int port = BP_PORT(bp);
a2fbb9ea
ET
10207 int i;
10208
34f80b04
EG
10209 if (!netif_running(dev))
10210 return 0;
10211
10212 if (!bp->port.pmf)
10213 return 0;
10214
a2fbb9ea
ET
10215 if (data == 0)
10216 data = 2;
10217
10218 for (i = 0; i < (data * 2); i++) {
c18487ee 10219 if ((i % 2) == 0)
34f80b04 10220 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10221 bp->link_params.hw_led_mode,
10222 bp->link_params.chip_id);
10223 else
34f80b04 10224 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10225 bp->link_params.hw_led_mode,
10226 bp->link_params.chip_id);
10227
a2fbb9ea
ET
10228 msleep_interruptible(500);
10229 if (signal_pending(current))
10230 break;
10231 }
10232
c18487ee 10233 if (bp->link_vars.link_up)
34f80b04 10234 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10235 bp->link_vars.line_speed,
10236 bp->link_params.hw_led_mode,
10237 bp->link_params.chip_id);
a2fbb9ea
ET
10238
10239 return 0;
10240}
10241
10242static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10243 .get_settings = bnx2x_get_settings,
10244 .set_settings = bnx2x_set_settings,
10245 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10246 .get_regs_len = bnx2x_get_regs_len,
10247 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10248 .get_wol = bnx2x_get_wol,
10249 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10250 .get_msglevel = bnx2x_get_msglevel,
10251 .set_msglevel = bnx2x_set_msglevel,
10252 .nway_reset = bnx2x_nway_reset,
01e53298 10253 .get_link = bnx2x_get_link,
7a9b2557
VZ
10254 .get_eeprom_len = bnx2x_get_eeprom_len,
10255 .get_eeprom = bnx2x_get_eeprom,
10256 .set_eeprom = bnx2x_set_eeprom,
10257 .get_coalesce = bnx2x_get_coalesce,
10258 .set_coalesce = bnx2x_set_coalesce,
10259 .get_ringparam = bnx2x_get_ringparam,
10260 .set_ringparam = bnx2x_set_ringparam,
10261 .get_pauseparam = bnx2x_get_pauseparam,
10262 .set_pauseparam = bnx2x_set_pauseparam,
10263 .get_rx_csum = bnx2x_get_rx_csum,
10264 .set_rx_csum = bnx2x_set_rx_csum,
10265 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10266 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10267 .set_flags = bnx2x_set_flags,
10268 .get_flags = ethtool_op_get_flags,
10269 .get_sg = ethtool_op_get_sg,
10270 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10271 .get_tso = ethtool_op_get_tso,
10272 .set_tso = bnx2x_set_tso,
10273 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10274 .self_test = bnx2x_self_test,
10275 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10276 .phys_id = bnx2x_phys_id,
10277 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10278 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10279};
10280
10281/* end of ethtool_ops */
10282
10283/****************************************************************************
10284* General service functions
10285****************************************************************************/
10286
10287static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10288{
10289 u16 pmcsr;
10290
10291 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10292
10293 switch (state) {
10294 case PCI_D0:
34f80b04 10295 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10296 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10297 PCI_PM_CTRL_PME_STATUS));
10298
10299 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10300 /* delay required during transition out of D3hot */
a2fbb9ea 10301 msleep(20);
34f80b04 10302 break;
a2fbb9ea 10303
34f80b04
EG
10304 case PCI_D3hot:
10305 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10306 pmcsr |= 3;
a2fbb9ea 10307
34f80b04
EG
10308 if (bp->wol)
10309 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10310
34f80b04
EG
10311 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10312 pmcsr);
a2fbb9ea 10313
34f80b04
EG
10314 /* No more memory access after this point until
10315 * device is brought back to D0.
10316 */
10317 break;
10318
10319 default:
10320 return -EINVAL;
10321 }
10322 return 0;
a2fbb9ea
ET
10323}
10324
237907c1
EG
10325static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10326{
10327 u16 rx_cons_sb;
10328
10329 /* Tell compiler that status block fields can change */
10330 barrier();
10331 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10332 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10333 rx_cons_sb++;
10334 return (fp->rx_comp_cons != rx_cons_sb);
10335}
10336
34f80b04
EG
10337/*
10338 * net_device service functions
10339 */
10340
a2fbb9ea
ET
10341static int bnx2x_poll(struct napi_struct *napi, int budget)
10342{
10343 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10344 napi);
10345 struct bnx2x *bp = fp->bp;
10346 int work_done = 0;
10347
10348#ifdef BNX2X_STOP_ON_ERROR
10349 if (unlikely(bp->panic))
34f80b04 10350 goto poll_panic;
a2fbb9ea
ET
10351#endif
10352
a2fbb9ea
ET
10353 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10354 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10355
10356 bnx2x_update_fpsb_idx(fp);
10357
8534f32c 10358 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10359 work_done = bnx2x_rx_int(fp, budget);
356e2385 10360
8534f32c
EG
10361 /* must not complete if we consumed full budget */
10362 if (work_done >= budget)
10363 goto poll_again;
10364 }
a2fbb9ea 10365
ca00392c 10366 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10367 * ensure that status block indices have been actually read
ca00392c 10368 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10369 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10370 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10371 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10372 * may be postponed to right before bnx2x_ack_sb). In this case
10373 * there will never be another interrupt until there is another update
10374 * of the status block, while there is still unhandled work.
10375 */
10376 rmb();
a2fbb9ea 10377
ca00392c 10378 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10379#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10380poll_panic:
a2fbb9ea 10381#endif
288379f0 10382 napi_complete(napi);
a2fbb9ea 10383
0626b899 10384 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10385 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10386 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10387 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10388 }
356e2385 10389
8534f32c 10390poll_again:
a2fbb9ea
ET
10391 return work_done;
10392}
10393
755735eb
EG
10394
10395/* we split the first BD into headers and data BDs
33471629 10396 * to ease the pain of our fellow microcode engineers
755735eb
EG
10397 * we use one mapping for both BDs
10398 * So far this has only been observed to happen
10399 * in Other Operating Systems(TM)
10400 */
10401static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10402 struct bnx2x_fastpath *fp,
ca00392c
EG
10403 struct sw_tx_bd *tx_buf,
10404 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10405 u16 bd_prod, int nbd)
10406{
ca00392c 10407 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10408 struct eth_tx_bd *d_tx_bd;
10409 dma_addr_t mapping;
10410 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10411
10412 /* first fix first BD */
10413 h_tx_bd->nbd = cpu_to_le16(nbd);
10414 h_tx_bd->nbytes = cpu_to_le16(hlen);
10415
10416 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10417 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10418 h_tx_bd->addr_lo, h_tx_bd->nbd);
10419
10420 /* now get a new data BD
10421 * (after the pbd) and fill it */
10422 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10423 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10424
10425 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10426 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10427
10428 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10429 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10430 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10431
10432 /* this marks the BD as one that has no individual mapping */
10433 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10434
755735eb
EG
10435 DP(NETIF_MSG_TX_QUEUED,
10436 "TSO split data size is %d (%x:%x)\n",
10437 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10438
ca00392c
EG
10439 /* update tx_bd */
10440 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10441
10442 return bd_prod;
10443}
10444
10445static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10446{
10447 if (fix > 0)
10448 csum = (u16) ~csum_fold(csum_sub(csum,
10449 csum_partial(t_header - fix, fix, 0)));
10450
10451 else if (fix < 0)
10452 csum = (u16) ~csum_fold(csum_add(csum,
10453 csum_partial(t_header, -fix, 0)));
10454
10455 return swab16(csum);
10456}
10457
10458static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10459{
10460 u32 rc;
10461
10462 if (skb->ip_summed != CHECKSUM_PARTIAL)
10463 rc = XMIT_PLAIN;
10464
10465 else {
4781bfad 10466 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10467 rc = XMIT_CSUM_V6;
10468 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10469 rc |= XMIT_CSUM_TCP;
10470
10471 } else {
10472 rc = XMIT_CSUM_V4;
10473 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10474 rc |= XMIT_CSUM_TCP;
10475 }
10476 }
10477
10478 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10479 rc |= XMIT_GSO_V4;
10480
10481 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10482 rc |= XMIT_GSO_V6;
10483
10484 return rc;
10485}
10486
632da4d6 10487#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10488/* check if packet requires linearization (packet is too fragmented)
10489 no need to check fragmentation if page size > 8K (there will be no
10490 violation to FW restrictions) */
755735eb
EG
10491static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10492 u32 xmit_type)
10493{
10494 int to_copy = 0;
10495 int hlen = 0;
10496 int first_bd_sz = 0;
10497
10498 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10499 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10500
10501 if (xmit_type & XMIT_GSO) {
10502 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10503 /* Check if LSO packet needs to be copied:
10504 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10505 int wnd_size = MAX_FETCH_BD - 3;
33471629 10506 /* Number of windows to check */
755735eb
EG
10507 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10508 int wnd_idx = 0;
10509 int frag_idx = 0;
10510 u32 wnd_sum = 0;
10511
10512 /* Headers length */
10513 hlen = (int)(skb_transport_header(skb) - skb->data) +
10514 tcp_hdrlen(skb);
10515
10516 /* Amount of data (w/o headers) on linear part of SKB*/
10517 first_bd_sz = skb_headlen(skb) - hlen;
10518
10519 wnd_sum = first_bd_sz;
10520
10521 /* Calculate the first sum - it's special */
10522 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10523 wnd_sum +=
10524 skb_shinfo(skb)->frags[frag_idx].size;
10525
10526 /* If there was data on linear skb data - check it */
10527 if (first_bd_sz > 0) {
10528 if (unlikely(wnd_sum < lso_mss)) {
10529 to_copy = 1;
10530 goto exit_lbl;
10531 }
10532
10533 wnd_sum -= first_bd_sz;
10534 }
10535
10536 /* Others are easier: run through the frag list and
10537 check all windows */
10538 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10539 wnd_sum +=
10540 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10541
10542 if (unlikely(wnd_sum < lso_mss)) {
10543 to_copy = 1;
10544 break;
10545 }
10546 wnd_sum -=
10547 skb_shinfo(skb)->frags[wnd_idx].size;
10548 }
755735eb
EG
10549 } else {
10550 /* in non-LSO too fragmented packet should always
10551 be linearized */
10552 to_copy = 1;
10553 }
10554 }
10555
10556exit_lbl:
10557 if (unlikely(to_copy))
10558 DP(NETIF_MSG_TX_QUEUED,
10559 "Linearization IS REQUIRED for %s packet. "
10560 "num_frags %d hlen %d first_bd_sz %d\n",
10561 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10562 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10563
10564 return to_copy;
10565}
632da4d6 10566#endif
755735eb
EG
10567
10568/* called with netif_tx_lock
a2fbb9ea 10569 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10570 * netif_wake_queue()
a2fbb9ea
ET
10571 */
10572static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10573{
10574 struct bnx2x *bp = netdev_priv(dev);
ca00392c 10575 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 10576 struct netdev_queue *txq;
a2fbb9ea 10577 struct sw_tx_bd *tx_buf;
ca00392c
EG
10578 struct eth_tx_start_bd *tx_start_bd;
10579 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
10580 struct eth_tx_parse_bd *pbd = NULL;
10581 u16 pkt_prod, bd_prod;
755735eb 10582 int nbd, fp_index;
a2fbb9ea 10583 dma_addr_t mapping;
755735eb 10584 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
10585 int i;
10586 u8 hlen = 0;
ca00392c 10587 __le16 pkt_size = 0;
a2fbb9ea
ET
10588
10589#ifdef BNX2X_STOP_ON_ERROR
10590 if (unlikely(bp->panic))
10591 return NETDEV_TX_BUSY;
10592#endif
10593
555f6c78
EG
10594 fp_index = skb_get_queue_mapping(skb);
10595 txq = netdev_get_tx_queue(dev, fp_index);
10596
ca00392c
EG
10597 fp = &bp->fp[fp_index + bp->num_rx_queues];
10598 fp_stat = &bp->fp[fp_index];
755735eb 10599
231fd58a 10600 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 10601 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 10602 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10603 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10604 return NETDEV_TX_BUSY;
10605 }
10606
755735eb
EG
10607 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10608 " gso type %x xmit_type %x\n",
10609 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10610 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10611
632da4d6 10612#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10613 /* First, check if we need to linearize the skb (due to FW
10614 restrictions). No need to check fragmentation if page size > 8K
10615 (there will be no violation to FW restrictions) */
755735eb
EG
10616 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10617 /* Statistics of linearization */
10618 bp->lin_cnt++;
10619 if (skb_linearize(skb) != 0) {
10620 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10621 "silently dropping this SKB\n");
10622 dev_kfree_skb_any(skb);
da5a662a 10623 return NETDEV_TX_OK;
755735eb
EG
10624 }
10625 }
632da4d6 10626#endif
755735eb 10627
a2fbb9ea 10628 /*
755735eb 10629 Please read carefully. First we use one BD which we mark as start,
ca00392c 10630 then we have a parsing info BD (used for TSO or xsum),
755735eb 10631 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10632 (don't forget to mark the last one as last,
10633 and to unmap only AFTER you write to the BD ...)
755735eb 10634 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10635 */
10636
10637 pkt_prod = fp->tx_pkt_prod++;
755735eb 10638 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10639
755735eb 10640 /* get a tx_buf and first BD */
a2fbb9ea 10641 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 10642 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 10643
ca00392c
EG
10644 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10645 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10646 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 10647 /* header nbd */
ca00392c 10648 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10649
755735eb
EG
10650 /* remember the first BD of the packet */
10651 tx_buf->first_bd = fp->tx_bd_prod;
10652 tx_buf->skb = skb;
ca00392c 10653 tx_buf->flags = 0;
a2fbb9ea
ET
10654
10655 DP(NETIF_MSG_TX_QUEUED,
10656 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 10657 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 10658
0c6671b0
EG
10659#ifdef BCM_VLAN
10660 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10661 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
10662 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10663 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 10664 } else
0c6671b0 10665#endif
ca00392c 10666 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10667
ca00392c
EG
10668 /* turn on parsing and get a BD */
10669 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10670 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 10671
ca00392c 10672 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
10673
10674 if (xmit_type & XMIT_CSUM) {
ca00392c 10675 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
10676
10677 /* for now NS flag is not used in Linux */
4781bfad
EG
10678 pbd->global_data =
10679 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10680 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10681
755735eb
EG
10682 pbd->ip_hlen = (skb_transport_header(skb) -
10683 skb_network_header(skb)) / 2;
10684
10685 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10686
755735eb 10687 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 10688 hlen = hlen*2;
a2fbb9ea 10689
ca00392c 10690 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
10691
10692 if (xmit_type & XMIT_CSUM_V4)
ca00392c 10693 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
10694 ETH_TX_BD_FLAGS_IP_CSUM;
10695 else
ca00392c
EG
10696 tx_start_bd->bd_flags.as_bitfield |=
10697 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
10698
10699 if (xmit_type & XMIT_CSUM_TCP) {
10700 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10701
10702 } else {
10703 s8 fix = SKB_CS_OFF(skb); /* signed! */
10704
ca00392c 10705 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 10706
755735eb 10707 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
10708 "hlen %d fix %d csum before fix %x\n",
10709 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
10710
10711 /* HW bug: fixup the CSUM */
10712 pbd->tcp_pseudo_csum =
10713 bnx2x_csum_fix(skb_transport_header(skb),
10714 SKB_CS(skb), fix);
10715
10716 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10717 pbd->tcp_pseudo_csum);
10718 }
a2fbb9ea
ET
10719 }
10720
10721 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10722 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 10723
ca00392c
EG
10724 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10725 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10726 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
10727 tx_start_bd->nbd = cpu_to_le16(nbd);
10728 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10729 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
10730
10731 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 10732 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
10733 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
10734 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
10735 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 10736
755735eb 10737 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10738
10739 DP(NETIF_MSG_TX_QUEUED,
10740 "TSO packet len %d hlen %d total len %d tso size %d\n",
10741 skb->len, hlen, skb_headlen(skb),
10742 skb_shinfo(skb)->gso_size);
10743
ca00392c 10744 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 10745
755735eb 10746 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
10747 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
10748 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
10749
10750 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10751 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10752 pbd->tcp_flags = pbd_tcp_flags(skb);
10753
10754 if (xmit_type & XMIT_GSO_V4) {
10755 pbd->ip_id = swab16(ip_hdr(skb)->id);
10756 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10757 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10758 ip_hdr(skb)->daddr,
10759 0, IPPROTO_TCP, 0));
755735eb
EG
10760
10761 } else
10762 pbd->tcp_pseudo_csum =
10763 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10764 &ipv6_hdr(skb)->daddr,
10765 0, IPPROTO_TCP, 0));
10766
a2fbb9ea
ET
10767 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10768 }
ca00392c 10769 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 10770
755735eb
EG
10771 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10772 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10773
755735eb 10774 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
10775 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10776 if (total_pkt_bd == NULL)
10777 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 10778
755735eb
EG
10779 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10780 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10781
ca00392c
EG
10782 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10783 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10784 tx_data_bd->nbytes = cpu_to_le16(frag->size);
10785 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 10786
755735eb 10787 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
10788 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
10789 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
10790 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
10791 }
10792
ca00392c 10793 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 10794
a2fbb9ea
ET
10795 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10796
755735eb 10797 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10798 * if the packet contains or ends with it
10799 */
10800 if (TX_BD_POFF(bd_prod) < nbd)
10801 nbd++;
10802
ca00392c
EG
10803 if (total_pkt_bd != NULL)
10804 total_pkt_bd->total_pkt_bytes = pkt_size;
10805
a2fbb9ea
ET
10806 if (pbd)
10807 DP(NETIF_MSG_TX_QUEUED,
10808 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10809 " tcp_flags %x xsum %x seq %u hlen %u\n",
10810 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10811 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10812 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10813
755735eb 10814 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10815
58f4c4cf
EG
10816 /*
10817 * Make sure that the BD data is updated before updating the producer
10818 * since FW might read the BD right after the producer is updated.
10819 * This is only applicable for weak-ordered memory model archs such
10820 * as IA-64. The following barrier is also mandatory since FW will
10821 * assumes packets must have BDs.
10822 */
10823 wmb();
10824
ca00392c
EG
10825 fp->tx_db.data.prod += nbd;
10826 barrier();
10827 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
10828
10829 mmiowb();
10830
755735eb 10831 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10832
10833 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 10834 netif_tx_stop_queue(txq);
58f4c4cf
EG
10835 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10836 if we put Tx into XOFF state. */
10837 smp_mb();
ca00392c 10838 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 10839 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10840 netif_tx_wake_queue(txq);
a2fbb9ea 10841 }
ca00392c 10842 fp_stat->tx_pkt++;
a2fbb9ea
ET
10843
10844 return NETDEV_TX_OK;
10845}
10846
bb2a0f7a 10847/* called with rtnl_lock */
a2fbb9ea
ET
10848static int bnx2x_open(struct net_device *dev)
10849{
10850 struct bnx2x *bp = netdev_priv(dev);
10851
6eccabb3
EG
10852 netif_carrier_off(dev);
10853
a2fbb9ea
ET
10854 bnx2x_set_power_state(bp, PCI_D0);
10855
bb2a0f7a 10856 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10857}
10858
bb2a0f7a 10859/* called with rtnl_lock */
a2fbb9ea
ET
10860static int bnx2x_close(struct net_device *dev)
10861{
a2fbb9ea
ET
10862 struct bnx2x *bp = netdev_priv(dev);
10863
10864 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10865 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10866 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10867 if (!CHIP_REV_IS_SLOW(bp))
10868 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10869
10870 return 0;
10871}
10872
f5372251 10873/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10874static void bnx2x_set_rx_mode(struct net_device *dev)
10875{
10876 struct bnx2x *bp = netdev_priv(dev);
10877 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10878 int port = BP_PORT(bp);
10879
10880 if (bp->state != BNX2X_STATE_OPEN) {
10881 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10882 return;
10883 }
10884
10885 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10886
10887 if (dev->flags & IFF_PROMISC)
10888 rx_mode = BNX2X_RX_MODE_PROMISC;
10889
10890 else if ((dev->flags & IFF_ALLMULTI) ||
10891 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10892 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10893
10894 else { /* some multicasts */
10895 if (CHIP_IS_E1(bp)) {
10896 int i, old, offset;
10897 struct dev_mc_list *mclist;
10898 struct mac_configuration_cmd *config =
10899 bnx2x_sp(bp, mcast_config);
10900
10901 for (i = 0, mclist = dev->mc_list;
10902 mclist && (i < dev->mc_count);
10903 i++, mclist = mclist->next) {
10904
10905 config->config_table[i].
10906 cam_entry.msb_mac_addr =
10907 swab16(*(u16 *)&mclist->dmi_addr[0]);
10908 config->config_table[i].
10909 cam_entry.middle_mac_addr =
10910 swab16(*(u16 *)&mclist->dmi_addr[2]);
10911 config->config_table[i].
10912 cam_entry.lsb_mac_addr =
10913 swab16(*(u16 *)&mclist->dmi_addr[4]);
10914 config->config_table[i].cam_entry.flags =
10915 cpu_to_le16(port);
10916 config->config_table[i].
10917 target_table_entry.flags = 0;
ca00392c
EG
10918 config->config_table[i].target_table_entry.
10919 clients_bit_vector =
10920 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
10921 config->config_table[i].
10922 target_table_entry.vlan_id = 0;
10923
10924 DP(NETIF_MSG_IFUP,
10925 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10926 config->config_table[i].
10927 cam_entry.msb_mac_addr,
10928 config->config_table[i].
10929 cam_entry.middle_mac_addr,
10930 config->config_table[i].
10931 cam_entry.lsb_mac_addr);
10932 }
8d9c5f34 10933 old = config->hdr.length;
34f80b04
EG
10934 if (old > i) {
10935 for (; i < old; i++) {
10936 if (CAM_IS_INVALID(config->
10937 config_table[i])) {
af246401 10938 /* already invalidated */
34f80b04
EG
10939 break;
10940 }
10941 /* invalidate */
10942 CAM_INVALIDATE(config->
10943 config_table[i]);
10944 }
10945 }
10946
10947 if (CHIP_REV_IS_SLOW(bp))
10948 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10949 else
10950 offset = BNX2X_MAX_MULTICAST*(1 + port);
10951
8d9c5f34 10952 config->hdr.length = i;
34f80b04 10953 config->hdr.offset = offset;
8d9c5f34 10954 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10955 config->hdr.reserved1 = 0;
10956
10957 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10958 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10959 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10960 0);
10961 } else { /* E1H */
10962 /* Accept one or more multicasts */
10963 struct dev_mc_list *mclist;
10964 u32 mc_filter[MC_HASH_SIZE];
10965 u32 crc, bit, regidx;
10966 int i;
10967
10968 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10969
10970 for (i = 0, mclist = dev->mc_list;
10971 mclist && (i < dev->mc_count);
10972 i++, mclist = mclist->next) {
10973
7c510e4b
JB
10974 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10975 mclist->dmi_addr);
34f80b04
EG
10976
10977 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10978 bit = (crc >> 24) & 0xff;
10979 regidx = bit >> 5;
10980 bit &= 0x1f;
10981 mc_filter[regidx] |= (1 << bit);
10982 }
10983
10984 for (i = 0; i < MC_HASH_SIZE; i++)
10985 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10986 mc_filter[i]);
10987 }
10988 }
10989
10990 bp->rx_mode = rx_mode;
10991 bnx2x_set_storm_rx_mode(bp);
10992}
10993
10994/* called with rtnl_lock */
a2fbb9ea
ET
10995static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10996{
10997 struct sockaddr *addr = p;
10998 struct bnx2x *bp = netdev_priv(dev);
10999
34f80b04 11000 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11001 return -EINVAL;
11002
11003 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11004 if (netif_running(dev)) {
11005 if (CHIP_IS_E1(bp))
3101c2bc 11006 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 11007 else
3101c2bc 11008 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 11009 }
a2fbb9ea
ET
11010
11011 return 0;
11012}
11013
c18487ee 11014/* called with rtnl_lock */
a2fbb9ea
ET
11015static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11016{
11017 struct mii_ioctl_data *data = if_mii(ifr);
11018 struct bnx2x *bp = netdev_priv(dev);
3196a88a 11019 int port = BP_PORT(bp);
a2fbb9ea
ET
11020 int err;
11021
11022 switch (cmd) {
11023 case SIOCGMIIPHY:
34f80b04 11024 data->phy_id = bp->port.phy_addr;
a2fbb9ea 11025
c14423fe 11026 /* fallthrough */
c18487ee 11027
a2fbb9ea 11028 case SIOCGMIIREG: {
c18487ee 11029 u16 mii_regval;
a2fbb9ea 11030
c18487ee
YR
11031 if (!netif_running(dev))
11032 return -EAGAIN;
a2fbb9ea 11033
34f80b04 11034 mutex_lock(&bp->port.phy_mutex);
3196a88a 11035 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
11036 DEFAULT_PHY_DEV_ADDR,
11037 (data->reg_num & 0x1f), &mii_regval);
11038 data->val_out = mii_regval;
34f80b04 11039 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
11040 return err;
11041 }
11042
11043 case SIOCSMIIREG:
11044 if (!capable(CAP_NET_ADMIN))
11045 return -EPERM;
11046
c18487ee
YR
11047 if (!netif_running(dev))
11048 return -EAGAIN;
11049
34f80b04 11050 mutex_lock(&bp->port.phy_mutex);
3196a88a 11051 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
11052 DEFAULT_PHY_DEV_ADDR,
11053 (data->reg_num & 0x1f), data->val_in);
34f80b04 11054 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
11055 return err;
11056
11057 default:
11058 /* do nothing */
11059 break;
11060 }
11061
11062 return -EOPNOTSUPP;
11063}
11064
34f80b04 11065/* called with rtnl_lock */
a2fbb9ea
ET
11066static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11067{
11068 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11069 int rc = 0;
a2fbb9ea
ET
11070
11071 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11072 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11073 return -EINVAL;
11074
11075 /* This does not race with packet allocation
c14423fe 11076 * because the actual alloc size is
a2fbb9ea
ET
11077 * only updated as part of load
11078 */
11079 dev->mtu = new_mtu;
11080
11081 if (netif_running(dev)) {
34f80b04
EG
11082 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11083 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11084 }
34f80b04
EG
11085
11086 return rc;
a2fbb9ea
ET
11087}
11088
11089static void bnx2x_tx_timeout(struct net_device *dev)
11090{
11091 struct bnx2x *bp = netdev_priv(dev);
11092
11093#ifdef BNX2X_STOP_ON_ERROR
11094 if (!bp->panic)
11095 bnx2x_panic();
11096#endif
11097 /* This allows the netif to be shutdown gracefully before resetting */
11098 schedule_work(&bp->reset_task);
11099}
11100
11101#ifdef BCM_VLAN
34f80b04 11102/* called with rtnl_lock */
a2fbb9ea
ET
11103static void bnx2x_vlan_rx_register(struct net_device *dev,
11104 struct vlan_group *vlgrp)
11105{
11106 struct bnx2x *bp = netdev_priv(dev);
11107
11108 bp->vlgrp = vlgrp;
0c6671b0
EG
11109
11110 /* Set flags according to the required capabilities */
11111 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11112
11113 if (dev->features & NETIF_F_HW_VLAN_TX)
11114 bp->flags |= HW_VLAN_TX_FLAG;
11115
11116 if (dev->features & NETIF_F_HW_VLAN_RX)
11117 bp->flags |= HW_VLAN_RX_FLAG;
11118
a2fbb9ea 11119 if (netif_running(dev))
49d66772 11120 bnx2x_set_client_config(bp);
a2fbb9ea 11121}
34f80b04 11122
a2fbb9ea
ET
11123#endif
11124
11125#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11126static void poll_bnx2x(struct net_device *dev)
11127{
11128 struct bnx2x *bp = netdev_priv(dev);
11129
11130 disable_irq(bp->pdev->irq);
11131 bnx2x_interrupt(bp->pdev->irq, dev);
11132 enable_irq(bp->pdev->irq);
11133}
11134#endif
11135
c64213cd
SH
11136static const struct net_device_ops bnx2x_netdev_ops = {
11137 .ndo_open = bnx2x_open,
11138 .ndo_stop = bnx2x_close,
11139 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11140 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11141 .ndo_set_mac_address = bnx2x_change_mac_addr,
11142 .ndo_validate_addr = eth_validate_addr,
11143 .ndo_do_ioctl = bnx2x_ioctl,
11144 .ndo_change_mtu = bnx2x_change_mtu,
11145 .ndo_tx_timeout = bnx2x_tx_timeout,
11146#ifdef BCM_VLAN
11147 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11148#endif
11149#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11150 .ndo_poll_controller = poll_bnx2x,
11151#endif
11152};
11153
34f80b04
EG
11154static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11155 struct net_device *dev)
a2fbb9ea
ET
11156{
11157 struct bnx2x *bp;
11158 int rc;
11159
11160 SET_NETDEV_DEV(dev, &pdev->dev);
11161 bp = netdev_priv(dev);
11162
34f80b04
EG
11163 bp->dev = dev;
11164 bp->pdev = pdev;
a2fbb9ea 11165 bp->flags = 0;
34f80b04 11166 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11167
11168 rc = pci_enable_device(pdev);
11169 if (rc) {
11170 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11171 goto err_out;
11172 }
11173
11174 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11175 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11176 " aborting\n");
11177 rc = -ENODEV;
11178 goto err_out_disable;
11179 }
11180
11181 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11182 printk(KERN_ERR PFX "Cannot find second PCI device"
11183 " base address, aborting\n");
11184 rc = -ENODEV;
11185 goto err_out_disable;
11186 }
11187
34f80b04
EG
11188 if (atomic_read(&pdev->enable_cnt) == 1) {
11189 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11190 if (rc) {
11191 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11192 " aborting\n");
11193 goto err_out_disable;
11194 }
a2fbb9ea 11195
34f80b04
EG
11196 pci_set_master(pdev);
11197 pci_save_state(pdev);
11198 }
a2fbb9ea
ET
11199
11200 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11201 if (bp->pm_cap == 0) {
11202 printk(KERN_ERR PFX "Cannot find power management"
11203 " capability, aborting\n");
11204 rc = -EIO;
11205 goto err_out_release;
11206 }
11207
11208 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11209 if (bp->pcie_cap == 0) {
11210 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11211 " aborting\n");
11212 rc = -EIO;
11213 goto err_out_release;
11214 }
11215
6a35528a 11216 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11217 bp->flags |= USING_DAC_FLAG;
6a35528a 11218 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11219 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11220 " failed, aborting\n");
11221 rc = -EIO;
11222 goto err_out_release;
11223 }
11224
284901a9 11225 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11226 printk(KERN_ERR PFX "System does not support DMA,"
11227 " aborting\n");
11228 rc = -EIO;
11229 goto err_out_release;
11230 }
11231
34f80b04
EG
11232 dev->mem_start = pci_resource_start(pdev, 0);
11233 dev->base_addr = dev->mem_start;
11234 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11235
11236 dev->irq = pdev->irq;
11237
275f165f 11238 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11239 if (!bp->regview) {
11240 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11241 rc = -ENOMEM;
11242 goto err_out_release;
11243 }
11244
34f80b04
EG
11245 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11246 min_t(u64, BNX2X_DB_SIZE,
11247 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11248 if (!bp->doorbells) {
11249 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11250 rc = -ENOMEM;
11251 goto err_out_unmap;
11252 }
11253
11254 bnx2x_set_power_state(bp, PCI_D0);
11255
34f80b04
EG
11256 /* clean indirect addresses */
11257 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11258 PCICFG_VENDOR_ID_OFFSET);
11259 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11260 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11261 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11262 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11263
34f80b04 11264 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11265
c64213cd 11266 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11267 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11268 dev->features |= NETIF_F_SG;
11269 dev->features |= NETIF_F_HW_CSUM;
11270 if (bp->flags & USING_DAC_FLAG)
11271 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11272 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11273 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11274#ifdef BCM_VLAN
11275 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11276 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11277
11278 dev->vlan_features |= NETIF_F_SG;
11279 dev->vlan_features |= NETIF_F_HW_CSUM;
11280 if (bp->flags & USING_DAC_FLAG)
11281 dev->vlan_features |= NETIF_F_HIGHDMA;
11282 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11283 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11284#endif
a2fbb9ea
ET
11285
11286 return 0;
11287
11288err_out_unmap:
11289 if (bp->regview) {
11290 iounmap(bp->regview);
11291 bp->regview = NULL;
11292 }
a2fbb9ea
ET
11293 if (bp->doorbells) {
11294 iounmap(bp->doorbells);
11295 bp->doorbells = NULL;
11296 }
11297
11298err_out_release:
34f80b04
EG
11299 if (atomic_read(&pdev->enable_cnt) == 1)
11300 pci_release_regions(pdev);
a2fbb9ea
ET
11301
11302err_out_disable:
11303 pci_disable_device(pdev);
11304 pci_set_drvdata(pdev, NULL);
11305
11306err_out:
11307 return rc;
11308}
11309
25047950
ET
11310static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11311{
11312 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11313
11314 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11315 return val;
11316}
11317
11318/* return value of 1=2.5GHz 2=5GHz */
11319static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11320{
11321 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11322
11323 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11324 return val;
11325}
94a78b79
VZ
11326static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11327{
11328 struct bnx2x_fw_file_hdr *fw_hdr;
11329 struct bnx2x_fw_file_section *sections;
11330 u16 *ops_offsets;
11331 u32 offset, len, num_ops;
11332 int i;
11333 const struct firmware *firmware = bp->firmware;
11334 const u8 * fw_ver;
11335
11336 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11337 return -EINVAL;
11338
11339 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11340 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11341
11342 /* Make sure none of the offsets and sizes make us read beyond
11343 * the end of the firmware data */
11344 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11345 offset = be32_to_cpu(sections[i].offset);
11346 len = be32_to_cpu(sections[i].len);
11347 if (offset + len > firmware->size) {
11348 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11349 return -EINVAL;
11350 }
11351 }
11352
11353 /* Likewise for the init_ops offsets */
11354 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11355 ops_offsets = (u16 *)(firmware->data + offset);
11356 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11357
11358 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11359 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11360 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11361 return -EINVAL;
11362 }
11363 }
11364
11365 /* Check FW version */
11366 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11367 fw_ver = firmware->data + offset;
11368 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11369 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11370 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11371 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11372 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11373 " Should be %d.%d.%d.%d\n",
11374 fw_ver[0], fw_ver[1], fw_ver[2],
11375 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11376 BCM_5710_FW_MINOR_VERSION,
11377 BCM_5710_FW_REVISION_VERSION,
11378 BCM_5710_FW_ENGINEERING_VERSION);
11379 return -EINVAL;
11380 }
11381
11382 return 0;
11383}
11384
11385static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11386{
11387 u32 i;
11388 const __be32 *source = (const __be32*)_source;
11389 u32 *target = (u32*)_target;
11390
11391 for (i = 0; i < n/4; i++)
11392 target[i] = be32_to_cpu(source[i]);
11393}
11394
11395/*
11396 Ops array is stored in the following format:
11397 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11398 */
11399static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11400{
11401 u32 i, j, tmp;
11402 const __be32 *source = (const __be32*)_source;
11403 struct raw_op *target = (struct raw_op*)_target;
11404
11405 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11406 tmp = be32_to_cpu(source[j]);
11407 target[i].op = (tmp >> 24) & 0xff;
11408 target[i].offset = tmp & 0xffffff;
11409 target[i].raw_data = be32_to_cpu(source[j+1]);
11410 }
11411}
11412static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11413{
11414 u32 i;
11415 u16 *target = (u16*)_target;
11416 const __be16 *source = (const __be16*)_source;
11417
11418 for (i = 0; i < n/2; i++)
11419 target[i] = be16_to_cpu(source[i]);
11420}
11421
11422#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11423 do { \
11424 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11425 bp->arr = kmalloc(len, GFP_KERNEL); \
11426 if (!bp->arr) { \
11427 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11428 goto lbl; \
11429 } \
11430 func(bp->firmware->data + \
11431 be32_to_cpu(fw_hdr->arr.offset), \
11432 (u8*)bp->arr, len); \
11433 } while (0)
11434
11435
11436static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11437{
11438 char fw_file_name[40] = {0};
11439 int rc, offset;
11440 struct bnx2x_fw_file_hdr *fw_hdr;
11441
11442 /* Create a FW file name */
11443 if (CHIP_IS_E1(bp))
11444 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11445 else
11446 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11447
11448 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11449 BCM_5710_FW_MAJOR_VERSION,
11450 BCM_5710_FW_MINOR_VERSION,
11451 BCM_5710_FW_REVISION_VERSION,
11452 BCM_5710_FW_ENGINEERING_VERSION);
11453
11454 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11455
11456 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11457 if (rc) {
11458 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11459 goto request_firmware_exit;
11460 }
11461
11462 rc = bnx2x_check_firmware(bp);
11463 if (rc) {
11464 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11465 goto request_firmware_exit;
11466 }
11467
11468 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11469
11470 /* Initialize the pointers to the init arrays */
11471 /* Blob */
11472 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11473
11474 /* Opcodes */
11475 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11476
11477 /* Offsets */
11478 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11479
11480 /* STORMs firmware */
11481 bp->tsem_int_table_data = bp->firmware->data +
11482 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11483 bp->tsem_pram_data = bp->firmware->data +
11484 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11485 bp->usem_int_table_data = bp->firmware->data +
11486 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11487 bp->usem_pram_data = bp->firmware->data +
11488 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11489 bp->xsem_int_table_data = bp->firmware->data +
11490 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11491 bp->xsem_pram_data = bp->firmware->data +
11492 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11493 bp->csem_int_table_data = bp->firmware->data +
11494 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11495 bp->csem_pram_data = bp->firmware->data +
11496 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11497
11498 return 0;
11499init_offsets_alloc_err:
11500 kfree(bp->init_ops);
11501init_ops_alloc_err:
11502 kfree(bp->init_data);
11503request_firmware_exit:
11504 release_firmware(bp->firmware);
11505
11506 return rc;
11507}
11508
11509
25047950 11510
a2fbb9ea
ET
11511static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11512 const struct pci_device_id *ent)
11513{
11514 static int version_printed;
11515 struct net_device *dev = NULL;
11516 struct bnx2x *bp;
25047950 11517 int rc;
a2fbb9ea
ET
11518
11519 if (version_printed++ == 0)
11520 printk(KERN_INFO "%s", version);
11521
11522 /* dev zeroed in init_etherdev */
555f6c78 11523 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11524 if (!dev) {
11525 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11526 return -ENOMEM;
34f80b04 11527 }
a2fbb9ea 11528
a2fbb9ea
ET
11529 bp = netdev_priv(dev);
11530 bp->msglevel = debug;
11531
34f80b04 11532 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11533 if (rc < 0) {
11534 free_netdev(dev);
11535 return rc;
11536 }
11537
a2fbb9ea
ET
11538 pci_set_drvdata(pdev, dev);
11539
34f80b04 11540 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11541 if (rc)
11542 goto init_one_exit;
11543
94a78b79
VZ
11544 /* Set init arrays */
11545 rc = bnx2x_init_firmware(bp, &pdev->dev);
11546 if (rc) {
11547 printk(KERN_ERR PFX "Error loading firmware\n");
11548 goto init_one_exit;
11549 }
11550
693fc0d1 11551 rc = register_netdev(dev);
34f80b04 11552 if (rc) {
693fc0d1 11553 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11554 goto init_one_exit;
11555 }
11556
25047950 11557 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11558 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11559 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11560 bnx2x_get_pcie_width(bp),
11561 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11562 dev->base_addr, bp->pdev->irq);
e174961c 11563 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11564
a2fbb9ea 11565 return 0;
34f80b04
EG
11566
11567init_one_exit:
11568 if (bp->regview)
11569 iounmap(bp->regview);
11570
11571 if (bp->doorbells)
11572 iounmap(bp->doorbells);
11573
11574 free_netdev(dev);
11575
11576 if (atomic_read(&pdev->enable_cnt) == 1)
11577 pci_release_regions(pdev);
11578
11579 pci_disable_device(pdev);
11580 pci_set_drvdata(pdev, NULL);
11581
11582 return rc;
a2fbb9ea
ET
11583}
11584
11585static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11586{
11587 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11588 struct bnx2x *bp;
11589
11590 if (!dev) {
228241eb
ET
11591 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11592 return;
11593 }
228241eb 11594 bp = netdev_priv(dev);
a2fbb9ea 11595
a2fbb9ea
ET
11596 unregister_netdev(dev);
11597
94a78b79
VZ
11598 kfree(bp->init_ops_offsets);
11599 kfree(bp->init_ops);
11600 kfree(bp->init_data);
11601 release_firmware(bp->firmware);
11602
a2fbb9ea
ET
11603 if (bp->regview)
11604 iounmap(bp->regview);
11605
11606 if (bp->doorbells)
11607 iounmap(bp->doorbells);
11608
11609 free_netdev(dev);
34f80b04
EG
11610
11611 if (atomic_read(&pdev->enable_cnt) == 1)
11612 pci_release_regions(pdev);
11613
a2fbb9ea
ET
11614 pci_disable_device(pdev);
11615 pci_set_drvdata(pdev, NULL);
11616}
11617
11618static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11619{
11620 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11621 struct bnx2x *bp;
11622
34f80b04
EG
11623 if (!dev) {
11624 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11625 return -ENODEV;
11626 }
11627 bp = netdev_priv(dev);
a2fbb9ea 11628
34f80b04 11629 rtnl_lock();
a2fbb9ea 11630
34f80b04 11631 pci_save_state(pdev);
228241eb 11632
34f80b04
EG
11633 if (!netif_running(dev)) {
11634 rtnl_unlock();
11635 return 0;
11636 }
a2fbb9ea
ET
11637
11638 netif_device_detach(dev);
a2fbb9ea 11639
da5a662a 11640 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11641
a2fbb9ea 11642 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11643
34f80b04
EG
11644 rtnl_unlock();
11645
a2fbb9ea
ET
11646 return 0;
11647}
11648
11649static int bnx2x_resume(struct pci_dev *pdev)
11650{
11651 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11652 struct bnx2x *bp;
a2fbb9ea
ET
11653 int rc;
11654
228241eb
ET
11655 if (!dev) {
11656 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11657 return -ENODEV;
11658 }
228241eb 11659 bp = netdev_priv(dev);
a2fbb9ea 11660
34f80b04
EG
11661 rtnl_lock();
11662
228241eb 11663 pci_restore_state(pdev);
34f80b04
EG
11664
11665 if (!netif_running(dev)) {
11666 rtnl_unlock();
11667 return 0;
11668 }
11669
a2fbb9ea
ET
11670 bnx2x_set_power_state(bp, PCI_D0);
11671 netif_device_attach(dev);
11672
da5a662a 11673 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11674
34f80b04
EG
11675 rtnl_unlock();
11676
11677 return rc;
a2fbb9ea
ET
11678}
11679
f8ef6e44
YG
11680static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11681{
11682 int i;
11683
11684 bp->state = BNX2X_STATE_ERROR;
11685
11686 bp->rx_mode = BNX2X_RX_MODE_NONE;
11687
11688 bnx2x_netif_stop(bp, 0);
11689
11690 del_timer_sync(&bp->timer);
11691 bp->stats_state = STATS_STATE_DISABLED;
11692 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11693
11694 /* Release IRQs */
11695 bnx2x_free_irq(bp);
11696
11697 if (CHIP_IS_E1(bp)) {
11698 struct mac_configuration_cmd *config =
11699 bnx2x_sp(bp, mcast_config);
11700
8d9c5f34 11701 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11702 CAM_INVALIDATE(config->config_table[i]);
11703 }
11704
11705 /* Free SKBs, SGEs, TPA pool and driver internals */
11706 bnx2x_free_skbs(bp);
555f6c78 11707 for_each_rx_queue(bp, i)
f8ef6e44 11708 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11709 for_each_rx_queue(bp, i)
7cde1c8b 11710 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11711 bnx2x_free_mem(bp);
11712
11713 bp->state = BNX2X_STATE_CLOSED;
11714
11715 netif_carrier_off(bp->dev);
11716
11717 return 0;
11718}
11719
11720static void bnx2x_eeh_recover(struct bnx2x *bp)
11721{
11722 u32 val;
11723
11724 mutex_init(&bp->port.phy_mutex);
11725
11726 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11727 bp->link_params.shmem_base = bp->common.shmem_base;
11728 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11729
11730 if (!bp->common.shmem_base ||
11731 (bp->common.shmem_base < 0xA0000) ||
11732 (bp->common.shmem_base >= 0xC0000)) {
11733 BNX2X_DEV_INFO("MCP not active\n");
11734 bp->flags |= NO_MCP_FLAG;
11735 return;
11736 }
11737
11738 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11739 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11740 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11741 BNX2X_ERR("BAD MCP validity signature\n");
11742
11743 if (!BP_NOMCP(bp)) {
11744 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11745 & DRV_MSG_SEQ_NUMBER_MASK);
11746 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11747 }
11748}
11749
493adb1f
WX
11750/**
11751 * bnx2x_io_error_detected - called when PCI error is detected
11752 * @pdev: Pointer to PCI device
11753 * @state: The current pci connection state
11754 *
11755 * This function is called after a PCI bus error affecting
11756 * this device has been detected.
11757 */
11758static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11759 pci_channel_state_t state)
11760{
11761 struct net_device *dev = pci_get_drvdata(pdev);
11762 struct bnx2x *bp = netdev_priv(dev);
11763
11764 rtnl_lock();
11765
11766 netif_device_detach(dev);
11767
07ce50e4
DN
11768 if (state == pci_channel_io_perm_failure) {
11769 rtnl_unlock();
11770 return PCI_ERS_RESULT_DISCONNECT;
11771 }
11772
493adb1f 11773 if (netif_running(dev))
f8ef6e44 11774 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11775
11776 pci_disable_device(pdev);
11777
11778 rtnl_unlock();
11779
11780 /* Request a slot reset */
11781 return PCI_ERS_RESULT_NEED_RESET;
11782}
11783
11784/**
11785 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11786 * @pdev: Pointer to PCI device
11787 *
11788 * Restart the card from scratch, as if from a cold-boot.
11789 */
11790static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11791{
11792 struct net_device *dev = pci_get_drvdata(pdev);
11793 struct bnx2x *bp = netdev_priv(dev);
11794
11795 rtnl_lock();
11796
11797 if (pci_enable_device(pdev)) {
11798 dev_err(&pdev->dev,
11799 "Cannot re-enable PCI device after reset\n");
11800 rtnl_unlock();
11801 return PCI_ERS_RESULT_DISCONNECT;
11802 }
11803
11804 pci_set_master(pdev);
11805 pci_restore_state(pdev);
11806
11807 if (netif_running(dev))
11808 bnx2x_set_power_state(bp, PCI_D0);
11809
11810 rtnl_unlock();
11811
11812 return PCI_ERS_RESULT_RECOVERED;
11813}
11814
11815/**
11816 * bnx2x_io_resume - called when traffic can start flowing again
11817 * @pdev: Pointer to PCI device
11818 *
11819 * This callback is called when the error recovery driver tells us that
11820 * its OK to resume normal operation.
11821 */
11822static void bnx2x_io_resume(struct pci_dev *pdev)
11823{
11824 struct net_device *dev = pci_get_drvdata(pdev);
11825 struct bnx2x *bp = netdev_priv(dev);
11826
11827 rtnl_lock();
11828
f8ef6e44
YG
11829 bnx2x_eeh_recover(bp);
11830
493adb1f 11831 if (netif_running(dev))
f8ef6e44 11832 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11833
11834 netif_device_attach(dev);
11835
11836 rtnl_unlock();
11837}
11838
11839static struct pci_error_handlers bnx2x_err_handler = {
11840 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11841 .slot_reset = bnx2x_io_slot_reset,
11842 .resume = bnx2x_io_resume,
493adb1f
WX
11843};
11844
a2fbb9ea 11845static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11846 .name = DRV_MODULE_NAME,
11847 .id_table = bnx2x_pci_tbl,
11848 .probe = bnx2x_init_one,
11849 .remove = __devexit_p(bnx2x_remove_one),
11850 .suspend = bnx2x_suspend,
11851 .resume = bnx2x_resume,
11852 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11853};
11854
11855static int __init bnx2x_init(void)
11856{
dd21ca6d
SG
11857 int ret;
11858
1cf167f2
EG
11859 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11860 if (bnx2x_wq == NULL) {
11861 printk(KERN_ERR PFX "Cannot create workqueue\n");
11862 return -ENOMEM;
11863 }
11864
dd21ca6d
SG
11865 ret = pci_register_driver(&bnx2x_pci_driver);
11866 if (ret) {
11867 printk(KERN_ERR PFX "Cannot register driver\n");
11868 destroy_workqueue(bnx2x_wq);
11869 }
11870 return ret;
a2fbb9ea
ET
11871}
11872
11873static void __exit bnx2x_cleanup(void)
11874{
11875 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11876
11877 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11878}
11879
11880module_init(bnx2x_init);
11881module_exit(bnx2x_cleanup);
11882
94a78b79 11883