]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Refactor MAC address setup code.
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
c458bc50
EG
59#define DRV_MODULE_VERSION "1.52.1"
60#define DRV_MODULE_RELDATE "2009/08/12"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
ab6ad5a4
EG
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
94a78b79 68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
a18f5128
EG
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
9898f86d 108static int poll;
a2fbb9ea 109module_param(poll, int, 0);
9898f86d 110MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
9898f86d 116static int debug;
a2fbb9ea 117module_param(debug, int, 0);
9898f86d
EG
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
a2fbb9ea
ET
128};
129
34f80b04 130/* indexed by board_type, above */
53a10565 131static struct {
a2fbb9ea
ET
132 char *name;
133} board_info[] __devinitdata = {
34f80b04
EG
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
137};
138
34f80b04 139
a2fbb9ea 140static const struct pci_device_id bnx2x_pci_tbl[] = {
e4ed7113
EG
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
144 { 0 }
145};
146
147MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149/****************************************************************************
150* General service functions
151****************************************************************************/
152
153/* used only at init
154 * locking is done by mcp
155 */
573f2035 156void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
157{
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
162}
163
a2fbb9ea
ET
164static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165{
166 u32 val;
167
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
172
173 return val;
174}
a2fbb9ea
ET
175
176static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181};
182
183/* copy command into DMAE command memory and set DMAE command go */
184static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185 int idx)
186{
187 u32 cmd_offset;
188 int i;
189
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
ad8d3948
EG
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
196 }
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
198}
199
ad8d3948
EG
200void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 u32 len32)
a2fbb9ea 202{
5ff7b6d4 203 struct dmae_command dmae;
a2fbb9ea 204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
205 int cnt = 200;
206
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213 return;
214 }
215
5ff7b6d4 216 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 217
5ff7b6d4
EG
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 221#ifdef __BIG_ENDIAN
5ff7b6d4 222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 223#else
5ff7b6d4 224 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 225#endif
5ff7b6d4
EG
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
232 dmae.len = len32;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 236
c3eefaf6 237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 247
5ff7b6d4
EG
248 mutex_lock(&bp->dmae_mutex);
249
a2fbb9ea
ET
250 *wb_comp = 0;
251
5ff7b6d4 252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
253
254 udelay(5);
ad8d3948
EG
255
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
ad8d3948 259 if (!cnt) {
c3eefaf6 260 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
261 break;
262 }
ad8d3948 263 cnt--;
12469401
YG
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
266 msleep(100);
267 else
268 udelay(5);
a2fbb9ea 269 }
ad8d3948
EG
270
271 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
272}
273
c18487ee 274void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 275{
5ff7b6d4 276 struct dmae_command dmae;
a2fbb9ea 277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
278 int cnt = 200;
279
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 int i;
283
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288 return;
289 }
290
5ff7b6d4 291 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 292
5ff7b6d4
EG
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 296#ifdef __BIG_ENDIAN
5ff7b6d4 297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 298#else
5ff7b6d4 299 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 300#endif
5ff7b6d4
EG
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307 dmae.len = len32;
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 311
c3eefaf6 312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 319
5ff7b6d4
EG
320 mutex_lock(&bp->dmae_mutex);
321
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
323 *wb_comp = 0;
324
5ff7b6d4 325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
326
327 udelay(5);
ad8d3948
EG
328
329 while (*wb_comp != DMAE_COMP_VAL) {
330
ad8d3948 331 if (!cnt) {
c3eefaf6 332 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
333 break;
334 }
ad8d3948 335 cnt--;
12469401
YG
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
338 msleep(100);
339 else
340 udelay(5);
a2fbb9ea 341 }
ad8d3948 342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
345
346 mutex_unlock(&bp->dmae_mutex);
347}
348
573f2035
EG
349void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 u32 addr, u32 len)
351{
352 int offset = 0;
353
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
359 }
360
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362}
363
ad8d3948
EG
364/* used only for slowpath so not inlined */
365static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366{
367 u32 wb_write[2];
368
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 372}
a2fbb9ea 373
ad8d3948
EG
374#ifdef USE_WB_RD
375static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376{
377 u32 wb_data[2];
378
379 REG_RD_DMAE(bp, reg, wb_data, 2);
380
381 return HILO_U64(wb_data[0], wb_data[1]);
382}
383#endif
384
a2fbb9ea
ET
385static int bnx2x_mc_assert(struct bnx2x *bp)
386{
a2fbb9ea 387 char last_idx;
34f80b04
EG
388 int i, rc = 0;
389 u32 row0, row1, row2, row3;
390
391 /* XSTORM */
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 if (last_idx)
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
413 rc++;
414 } else {
415 break;
416 }
417 }
418
419 /* TSTORM */
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 if (last_idx)
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
441 rc++;
442 } else {
443 break;
444 }
445 }
446
447 /* CSTORM */
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 if (last_idx)
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
469 rc++;
470 } else {
471 break;
472 }
473 }
474
475 /* USTORM */
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 if (last_idx)
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
497 rc++;
498 } else {
499 break;
a2fbb9ea
ET
500 }
501 }
34f80b04 502
a2fbb9ea
ET
503 return rc;
504}
c14423fe 505
a2fbb9ea
ET
506static void bnx2x_fw_dump(struct bnx2x *bp)
507{
508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 513 mark = ((mark + 0x3) & ~0x3);
ad361c98 514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 515
ad361c98 516 printk(KERN_ERR PFX);
a2fbb9ea
ET
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520 offset + 4*word));
521 data[8] = 0x0;
49d66772 522 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
523 }
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527 offset + 4*word));
528 data[8] = 0x0;
49d66772 529 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 530 }
ad361c98 531 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
532}
533
534static void bnx2x_panic_dump(struct bnx2x *bp)
535{
536 int i;
537 u16 j, start, end;
538
66e855f3
YG
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
a2fbb9ea
ET
542 BNX2X_ERR("begin crash dump -----------------\n");
543
8440d2b6
EG
544 /* Indices */
545 /* Common */
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552 /* Rx */
553 for_each_rx_queue(bp, i) {
a2fbb9ea 554 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 555
c3eefaf6 556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 559 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
567 }
a2fbb9ea 568
8440d2b6
EG
569 /* Tx */
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 572
c3eefaf6 573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 579 fp->status_blk->c_status_block.status_block_index,
ca00392c 580 fp->tx_db.data.prod);
8440d2b6 581 }
a2fbb9ea 582
8440d2b6
EG
583 /* Rings */
584 /* Rx */
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
587
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 590 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
c3eefaf6
EG
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
596 }
597
3196a88a
EG
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
8440d2b6 600 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
c3eefaf6
EG
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
606 }
607
a2fbb9ea
ET
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
c3eefaf6
EG
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
615 }
616 }
617
8440d2b6
EG
618 /* Tx */
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
c3eefaf6
EG
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
629 }
630
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
c3eefaf6
EG
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
638 }
639 }
a2fbb9ea 640
34f80b04 641 bnx2x_fw_dump(bp);
a2fbb9ea
ET
642 bnx2x_mc_assert(bp);
643 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
644}
645
615f8fd9 646static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 647{
34f80b04 648 int port = BP_PORT(bp);
a2fbb9ea
ET
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
653
654 if (msix) {
8badd27a
EG
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
659 } else if (msi) {
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
664 } else {
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 669
8badd27a
EG
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671 val, port, addr);
615f8fd9
ET
672
673 REG_WR(bp, addr, val);
674
a2fbb9ea
ET
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676 }
677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
680
681 REG_WR(bp, addr, val);
37dbbf32
EG
682 /*
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
684 */
685 mmiowb();
686 barrier();
34f80b04
EG
687
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
690 if (IS_E1HMF(bp)) {
8badd27a 691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 692 if (bp->port.pmf)
4acac6a5
EG
693 /* enable nig and gpio3 attention */
694 val |= 0x1100;
34f80b04
EG
695 } else
696 val = 0xffff;
697
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700 }
37dbbf32
EG
701
702 /* Make sure that interrupts are indeed enabled from here on */
703 mmiowb();
a2fbb9ea
ET
704}
705
615f8fd9 706static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 707{
34f80b04 708 int port = BP_PORT(bp);
a2fbb9ea
ET
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
711
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718 val, port, addr);
719
8badd27a
EG
720 /* flush all outstanding writes */
721 mmiowb();
722
a2fbb9ea
ET
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726}
727
f8ef6e44 728static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 729{
a2fbb9ea 730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 731 int i, offset;
a2fbb9ea 732
34f80b04 733 /* disable interrupt handling */
a2fbb9ea 734 atomic_inc(&bp->intr_sem);
e1510706
EG
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
f8ef6e44
YG
737 if (disable_hw)
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
a2fbb9ea
ET
740
741 /* make sure all ISRs are done */
742 if (msix) {
8badd27a
EG
743 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1;
a2fbb9ea 745 for_each_queue(bp, i)
8badd27a 746 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
747 } else
748 synchronize_irq(bp->pdev->irq);
749
750 /* make sure sp_task is not running */
1cf167f2
EG
751 cancel_delayed_work(&bp->sp_task);
752 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
753}
754
34f80b04 755/* fast path */
a2fbb9ea
ET
756
757/*
34f80b04 758 * General service functions
a2fbb9ea
ET
759 */
760
34f80b04 761static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
762 u8 storm, u16 index, u8 op, u8 update)
763{
5c862848
EG
764 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
765 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
766 struct igu_ack_register igu_ack;
767
768 igu_ack.status_block_index = index;
769 igu_ack.sb_id_and_flags =
34f80b04 770 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
771 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
772 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
773 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
774
5c862848
EG
775 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
776 (*(u32 *)&igu_ack), hc_addr);
777 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
778
779 /* Make sure that ACK is written */
780 mmiowb();
781 barrier();
a2fbb9ea
ET
782}
783
784static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
785{
786 struct host_status_block *fpsb = fp->status_blk;
787 u16 rc = 0;
788
789 barrier(); /* status block is written to by the chip */
790 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
791 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
792 rc |= 1;
793 }
794 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
795 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
796 rc |= 2;
797 }
798 return rc;
799}
800
a2fbb9ea
ET
801static u16 bnx2x_ack_int(struct bnx2x *bp)
802{
5c862848
EG
803 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
804 COMMAND_REG_SIMD_MASK);
805 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 806
5c862848
EG
807 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
808 result, hc_addr);
a2fbb9ea 809
a2fbb9ea
ET
810 return result;
811}
812
813
814/*
815 * fast path service functions
816 */
817
e8b5fc51
VZ
818static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
819{
820 /* Tell compiler that consumer and producer can change */
821 barrier();
822 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
823}
824
a2fbb9ea
ET
825/* free skb in the packet ring at pos idx
826 * return idx of last bd freed
827 */
828static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
829 u16 idx)
830{
831 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
832 struct eth_tx_start_bd *tx_start_bd;
833 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 834 struct sk_buff *skb = tx_buf->skb;
34f80b04 835 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
836 int nbd;
837
838 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
839 idx, tx_buf, skb);
840
841 /* unmap first bd */
842 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
843 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
844 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
845 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 846
ca00392c 847 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 848#ifdef BNX2X_STOP_ON_ERROR
ca00392c 849 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 850 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
851 bnx2x_panic();
852 }
853#endif
ca00392c 854 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 855
ca00392c
EG
856 /* Get the next bd */
857 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 858
ca00392c
EG
859 /* Skip a parse bd... */
860 --nbd;
861 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
862
863 /* ...and the TSO split header bd since they have no mapping */
864 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
865 --nbd;
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
867 }
868
869 /* now free frags */
870 while (nbd > 0) {
871
872 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
873 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
874 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
875 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
876 if (--nbd)
877 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
878 }
879
880 /* release skb */
53e5e96e 881 WARN_ON(!skb);
ca00392c 882 dev_kfree_skb_any(skb);
a2fbb9ea
ET
883 tx_buf->first_bd = 0;
884 tx_buf->skb = NULL;
885
34f80b04 886 return new_cons;
a2fbb9ea
ET
887}
888
34f80b04 889static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 890{
34f80b04
EG
891 s16 used;
892 u16 prod;
893 u16 cons;
a2fbb9ea 894
34f80b04 895 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
896 prod = fp->tx_bd_prod;
897 cons = fp->tx_bd_cons;
898
34f80b04
EG
899 /* NUM_TX_RINGS = number of "next-page" entries
900 It will be used as a threshold */
901 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 902
34f80b04 903#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
904 WARN_ON(used < 0);
905 WARN_ON(used > fp->bp->tx_ring_size);
906 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 907#endif
a2fbb9ea 908
34f80b04 909 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
910}
911
7961f791 912static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
913{
914 struct bnx2x *bp = fp->bp;
555f6c78 915 struct netdev_queue *txq;
a2fbb9ea
ET
916 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
917 int done = 0;
918
919#ifdef BNX2X_STOP_ON_ERROR
920 if (unlikely(bp->panic))
921 return;
922#endif
923
ca00392c 924 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
925 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
926 sw_cons = fp->tx_pkt_cons;
927
928 while (sw_cons != hw_cons) {
929 u16 pkt_cons;
930
931 pkt_cons = TX_BD(sw_cons);
932
933 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
934
34f80b04 935 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
936 hw_cons, sw_cons, pkt_cons);
937
34f80b04 938/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
939 rmb();
940 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
941 }
942*/
943 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
944 sw_cons++;
945 done++;
a2fbb9ea
ET
946 }
947
948 fp->tx_pkt_cons = sw_cons;
949 fp->tx_bd_cons = bd_cons;
950
a2fbb9ea 951 /* TBD need a thresh? */
555f6c78 952 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 953
6044735d
EG
954 /* Need to make the tx_bd_cons update visible to start_xmit()
955 * before checking for netif_tx_queue_stopped(). Without the
956 * memory barrier, there is a small possibility that
957 * start_xmit() will miss it and cause the queue to be stopped
958 * forever.
959 */
960 smp_mb();
961
555f6c78 962 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 963 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 964 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 965 netif_tx_wake_queue(txq);
a2fbb9ea
ET
966 }
967}
968
3196a88a 969
a2fbb9ea
ET
970static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
971 union eth_rx_cqe *rr_cqe)
972{
973 struct bnx2x *bp = fp->bp;
974 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
975 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
976
34f80b04 977 DP(BNX2X_MSG_SP,
a2fbb9ea 978 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 979 fp->index, cid, command, bp->state,
34f80b04 980 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
981
982 bp->spq_left++;
983
0626b899 984 if (fp->index) {
a2fbb9ea
ET
985 switch (command | fp->state) {
986 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
987 BNX2X_FP_STATE_OPENING):
988 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
989 cid);
990 fp->state = BNX2X_FP_STATE_OPEN;
991 break;
992
993 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
994 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
995 cid);
996 fp->state = BNX2X_FP_STATE_HALTED;
997 break;
998
999 default:
34f80b04
EG
1000 BNX2X_ERR("unexpected MC reply (%d) "
1001 "fp->state is %x\n", command, fp->state);
1002 break;
a2fbb9ea 1003 }
34f80b04 1004 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1005 return;
1006 }
c14423fe 1007
a2fbb9ea
ET
1008 switch (command | bp->state) {
1009 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1010 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1011 bp->state = BNX2X_STATE_OPEN;
1012 break;
1013
1014 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1015 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1016 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1017 fp->state = BNX2X_FP_STATE_HALTED;
1018 break;
1019
a2fbb9ea 1020 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1021 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1022 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1023 break;
1024
3196a88a 1025
a2fbb9ea 1026 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1027 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1028 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1029 bp->set_mac_pending--;
1030 smp_wmb();
a2fbb9ea
ET
1031 break;
1032
49d66772 1033 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1034 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1035 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1036 bp->set_mac_pending--;
1037 smp_wmb();
49d66772
ET
1038 break;
1039
a2fbb9ea 1040 default:
34f80b04 1041 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1042 command, bp->state);
34f80b04 1043 break;
a2fbb9ea 1044 }
34f80b04 1045 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1046}
1047
7a9b2557
VZ
1048static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1050{
1051 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1052 struct page *page = sw_buf->page;
1053 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1054
1055 /* Skip "next page" elements */
1056 if (!page)
1057 return;
1058
1059 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1060 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1061 __free_pages(page, PAGES_PER_SGE_SHIFT);
1062
1063 sw_buf->page = NULL;
1064 sge->addr_hi = 0;
1065 sge->addr_lo = 0;
1066}
1067
1068static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1069 struct bnx2x_fastpath *fp, int last)
1070{
1071 int i;
1072
1073 for (i = 0; i < last; i++)
1074 bnx2x_free_rx_sge(bp, fp, i);
1075}
1076
1077static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1078 struct bnx2x_fastpath *fp, u16 index)
1079{
1080 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1081 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1082 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1083 dma_addr_t mapping;
1084
1085 if (unlikely(page == NULL))
1086 return -ENOMEM;
1087
4f40f2cb 1088 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1089 PCI_DMA_FROMDEVICE);
8d8bb39b 1090 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1091 __free_pages(page, PAGES_PER_SGE_SHIFT);
1092 return -ENOMEM;
1093 }
1094
1095 sw_buf->page = page;
1096 pci_unmap_addr_set(sw_buf, mapping, mapping);
1097
1098 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1099 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1100
1101 return 0;
1102}
1103
a2fbb9ea
ET
1104static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1105 struct bnx2x_fastpath *fp, u16 index)
1106{
1107 struct sk_buff *skb;
1108 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1109 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1110 dma_addr_t mapping;
1111
1112 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1113 if (unlikely(skb == NULL))
1114 return -ENOMEM;
1115
437cf2f1 1116 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1117 PCI_DMA_FROMDEVICE);
8d8bb39b 1118 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1119 dev_kfree_skb(skb);
1120 return -ENOMEM;
1121 }
1122
1123 rx_buf->skb = skb;
1124 pci_unmap_addr_set(rx_buf, mapping, mapping);
1125
1126 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1127 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1128
1129 return 0;
1130}
1131
1132/* note that we are not allocating a new skb,
1133 * we are just moving one from cons to prod
1134 * we are not creating a new mapping,
1135 * so there is no need to check for dma_mapping_error().
1136 */
1137static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1138 struct sk_buff *skb, u16 cons, u16 prod)
1139{
1140 struct bnx2x *bp = fp->bp;
1141 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1142 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1143 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1144 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1145
1146 pci_dma_sync_single_for_device(bp->pdev,
1147 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1148 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1149
1150 prod_rx_buf->skb = cons_rx_buf->skb;
1151 pci_unmap_addr_set(prod_rx_buf, mapping,
1152 pci_unmap_addr(cons_rx_buf, mapping));
1153 *prod_bd = *cons_bd;
1154}
1155
7a9b2557
VZ
1156static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1157 u16 idx)
1158{
1159 u16 last_max = fp->last_max_sge;
1160
1161 if (SUB_S16(idx, last_max) > 0)
1162 fp->last_max_sge = idx;
1163}
1164
1165static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1166{
1167 int i, j;
1168
1169 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1170 int idx = RX_SGE_CNT * i - 1;
1171
1172 for (j = 0; j < 2; j++) {
1173 SGE_MASK_CLEAR_BIT(fp, idx);
1174 idx--;
1175 }
1176 }
1177}
1178
1179static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1180 struct eth_fast_path_rx_cqe *fp_cqe)
1181{
1182 struct bnx2x *bp = fp->bp;
4f40f2cb 1183 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1184 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1185 SGE_PAGE_SHIFT;
7a9b2557
VZ
1186 u16 last_max, last_elem, first_elem;
1187 u16 delta = 0;
1188 u16 i;
1189
1190 if (!sge_len)
1191 return;
1192
1193 /* First mark all used pages */
1194 for (i = 0; i < sge_len; i++)
1195 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1196
1197 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1198 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1199
1200 /* Here we assume that the last SGE index is the biggest */
1201 prefetch((void *)(fp->sge_mask));
1202 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1203
1204 last_max = RX_SGE(fp->last_max_sge);
1205 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1206 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1207
1208 /* If ring is not full */
1209 if (last_elem + 1 != first_elem)
1210 last_elem++;
1211
1212 /* Now update the prod */
1213 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1214 if (likely(fp->sge_mask[i]))
1215 break;
1216
1217 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1218 delta += RX_SGE_MASK_ELEM_SZ;
1219 }
1220
1221 if (delta > 0) {
1222 fp->rx_sge_prod += delta;
1223 /* clear page-end entries */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1225 }
1226
1227 DP(NETIF_MSG_RX_STATUS,
1228 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1229 fp->last_max_sge, fp->rx_sge_prod);
1230}
1231
1232static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1233{
1234 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1235 memset(fp->sge_mask, 0xff,
1236 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1237
33471629
EG
1238 /* Clear the two last indices in the page to 1:
1239 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1240 hence will never be indicated and should be removed from
1241 the calculations. */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1243}
1244
1245static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1246 struct sk_buff *skb, u16 cons, u16 prod)
1247{
1248 struct bnx2x *bp = fp->bp;
1249 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1250 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1251 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1252 dma_addr_t mapping;
1253
1254 /* move empty skb from pool to prod and map it */
1255 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1256 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1257 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1258 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1259
1260 /* move partial skb from cons to pool (don't unmap yet) */
1261 fp->tpa_pool[queue] = *cons_rx_buf;
1262
1263 /* mark bin state as start - print error if current state != stop */
1264 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1265 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1266
1267 fp->tpa_state[queue] = BNX2X_TPA_START;
1268
1269 /* point prod_bd to new skb */
1270 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1271 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1272
1273#ifdef BNX2X_STOP_ON_ERROR
1274 fp->tpa_queue_used |= (1 << queue);
1275#ifdef __powerpc64__
1276 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1277#else
1278 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1279#endif
1280 fp->tpa_queue_used);
1281#endif
1282}
1283
1284static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1285 struct sk_buff *skb,
1286 struct eth_fast_path_rx_cqe *fp_cqe,
1287 u16 cqe_idx)
1288{
1289 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1290 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1291 u32 i, frag_len, frag_size, pages;
1292 int err;
1293 int j;
1294
1295 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1296 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1297
1298 /* This is needed in order to enable forwarding support */
1299 if (frag_size)
4f40f2cb 1300 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1301 max(frag_size, (u32)len_on_bd));
1302
1303#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1304 if (pages >
1305 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1306 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1307 pages, cqe_idx);
1308 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1309 fp_cqe->pkt_len, len_on_bd);
1310 bnx2x_panic();
1311 return -EINVAL;
1312 }
1313#endif
1314
1315 /* Run through the SGL and compose the fragmented skb */
1316 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1317 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1318
1319 /* FW gives the indices of the SGE as if the ring is an array
1320 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1321 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1322 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1323 old_rx_pg = *rx_pg;
1324
1325 /* If we fail to allocate a substitute page, we simply stop
1326 where we are and drop the whole packet */
1327 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1328 if (unlikely(err)) {
de832a55 1329 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1330 return err;
1331 }
1332
1333 /* Unmap the page as we r going to pass it to the stack */
1334 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1335 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1336
1337 /* Add one frag and update the appropriate fields in the skb */
1338 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1339
1340 skb->data_len += frag_len;
1341 skb->truesize += frag_len;
1342 skb->len += frag_len;
1343
1344 frag_size -= frag_len;
1345 }
1346
1347 return 0;
1348}
1349
1350static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1351 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1352 u16 cqe_idx)
1353{
1354 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1355 struct sk_buff *skb = rx_buf->skb;
1356 /* alloc new skb */
1357 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1358
1359 /* Unmap skb in the pool anyway, as we are going to change
1360 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1361 fails. */
1362 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1363 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1364
7a9b2557 1365 if (likely(new_skb)) {
66e855f3
YG
1366 /* fix ip xsum and give it to the stack */
1367 /* (no need to map the new skb) */
0c6671b0
EG
1368#ifdef BCM_VLAN
1369 int is_vlan_cqe =
1370 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1371 PARSING_FLAGS_VLAN);
1372 int is_not_hwaccel_vlan_cqe =
1373 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1374#endif
7a9b2557
VZ
1375
1376 prefetch(skb);
1377 prefetch(((char *)(skb)) + 128);
1378
7a9b2557
VZ
1379#ifdef BNX2X_STOP_ON_ERROR
1380 if (pad + len > bp->rx_buf_size) {
1381 BNX2X_ERR("skb_put is about to fail... "
1382 "pad %d len %d rx_buf_size %d\n",
1383 pad, len, bp->rx_buf_size);
1384 bnx2x_panic();
1385 return;
1386 }
1387#endif
1388
1389 skb_reserve(skb, pad);
1390 skb_put(skb, len);
1391
1392 skb->protocol = eth_type_trans(skb, bp->dev);
1393 skb->ip_summed = CHECKSUM_UNNECESSARY;
1394
1395 {
1396 struct iphdr *iph;
1397
1398 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1399#ifdef BCM_VLAN
1400 /* If there is no Rx VLAN offloading -
1401 take VLAN tag into an account */
1402 if (unlikely(is_not_hwaccel_vlan_cqe))
1403 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1404#endif
7a9b2557
VZ
1405 iph->check = 0;
1406 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1407 }
1408
1409 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1410 &cqe->fast_path_cqe, cqe_idx)) {
1411#ifdef BCM_VLAN
0c6671b0
EG
1412 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1413 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1414 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1415 le16_to_cpu(cqe->fast_path_cqe.
1416 vlan_tag));
1417 else
1418#endif
1419 netif_receive_skb(skb);
1420 } else {
1421 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1422 " - dropping packet!\n");
1423 dev_kfree_skb(skb);
1424 }
1425
7a9b2557
VZ
1426
1427 /* put new skb in bin */
1428 fp->tpa_pool[queue].skb = new_skb;
1429
1430 } else {
66e855f3 1431 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1432 DP(NETIF_MSG_RX_STATUS,
1433 "Failed to allocate new skb - dropping packet!\n");
de832a55 1434 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1435 }
1436
1437 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1438}
1439
1440static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1441 struct bnx2x_fastpath *fp,
1442 u16 bd_prod, u16 rx_comp_prod,
1443 u16 rx_sge_prod)
1444{
8d9c5f34 1445 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1446 int i;
1447
1448 /* Update producers */
1449 rx_prods.bd_prod = bd_prod;
1450 rx_prods.cqe_prod = rx_comp_prod;
1451 rx_prods.sge_prod = rx_sge_prod;
1452
58f4c4cf
EG
1453 /*
1454 * Make sure that the BD and SGE data is updated before updating the
1455 * producers since FW might read the BD/SGE right after the producer
1456 * is updated.
1457 * This is only applicable for weak-ordered memory model archs such
1458 * as IA-64. The following barrier is also mandatory since FW will
1459 * assumes BDs must have buffers.
1460 */
1461 wmb();
1462
8d9c5f34
EG
1463 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1464 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1465 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1466 ((u32 *)&rx_prods)[i]);
1467
58f4c4cf
EG
1468 mmiowb(); /* keep prod updates ordered */
1469
7a9b2557 1470 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1471 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1472 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1473}
1474
a2fbb9ea
ET
1475static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1476{
1477 struct bnx2x *bp = fp->bp;
34f80b04 1478 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1479 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1480 int rx_pkt = 0;
1481
1482#ifdef BNX2X_STOP_ON_ERROR
1483 if (unlikely(bp->panic))
1484 return 0;
1485#endif
1486
34f80b04
EG
1487 /* CQ "next element" is of the size of the regular element,
1488 that's why it's ok here */
a2fbb9ea
ET
1489 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1490 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1491 hw_comp_cons++;
1492
1493 bd_cons = fp->rx_bd_cons;
1494 bd_prod = fp->rx_bd_prod;
34f80b04 1495 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1496 sw_comp_cons = fp->rx_comp_cons;
1497 sw_comp_prod = fp->rx_comp_prod;
1498
1499 /* Memory barrier necessary as speculative reads of the rx
1500 * buffer can be ahead of the index in the status block
1501 */
1502 rmb();
1503
1504 DP(NETIF_MSG_RX_STATUS,
1505 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1506 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1507
1508 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1509 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1510 struct sk_buff *skb;
1511 union eth_rx_cqe *cqe;
34f80b04
EG
1512 u8 cqe_fp_flags;
1513 u16 len, pad;
a2fbb9ea
ET
1514
1515 comp_ring_cons = RCQ_BD(sw_comp_cons);
1516 bd_prod = RX_BD(bd_prod);
1517 bd_cons = RX_BD(bd_cons);
1518
619e7a66
EG
1519 /* Prefetch the page containing the BD descriptor
1520 at producer's index. It will be needed when new skb is
1521 allocated */
1522 prefetch((void *)(PAGE_ALIGN((unsigned long)
1523 (&fp->rx_desc_ring[bd_prod])) -
1524 PAGE_SIZE + 1));
1525
a2fbb9ea 1526 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1527 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1528
a2fbb9ea 1529 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1530 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1531 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1532 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1533 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1534 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1535
1536 /* is this a slowpath msg? */
34f80b04 1537 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1538 bnx2x_sp_event(fp, cqe);
1539 goto next_cqe;
1540
1541 /* this is an rx packet */
1542 } else {
1543 rx_buf = &fp->rx_buf_ring[bd_cons];
1544 skb = rx_buf->skb;
a2fbb9ea
ET
1545 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1546 pad = cqe->fast_path_cqe.placement_offset;
1547
7a9b2557
VZ
1548 /* If CQE is marked both TPA_START and TPA_END
1549 it is a non-TPA CQE */
1550 if ((!fp->disable_tpa) &&
1551 (TPA_TYPE(cqe_fp_flags) !=
1552 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1553 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1554
1555 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1556 DP(NETIF_MSG_RX_STATUS,
1557 "calling tpa_start on queue %d\n",
1558 queue);
1559
1560 bnx2x_tpa_start(fp, queue, skb,
1561 bd_cons, bd_prod);
1562 goto next_rx;
1563 }
1564
1565 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1566 DP(NETIF_MSG_RX_STATUS,
1567 "calling tpa_stop on queue %d\n",
1568 queue);
1569
1570 if (!BNX2X_RX_SUM_FIX(cqe))
1571 BNX2X_ERR("STOP on none TCP "
1572 "data\n");
1573
1574 /* This is a size of the linear data
1575 on this skb */
1576 len = le16_to_cpu(cqe->fast_path_cqe.
1577 len_on_bd);
1578 bnx2x_tpa_stop(bp, fp, queue, pad,
1579 len, cqe, comp_ring_cons);
1580#ifdef BNX2X_STOP_ON_ERROR
1581 if (bp->panic)
17cb4006 1582 return 0;
7a9b2557
VZ
1583#endif
1584
1585 bnx2x_update_sge_prod(fp,
1586 &cqe->fast_path_cqe);
1587 goto next_cqe;
1588 }
1589 }
1590
a2fbb9ea
ET
1591 pci_dma_sync_single_for_device(bp->pdev,
1592 pci_unmap_addr(rx_buf, mapping),
1593 pad + RX_COPY_THRESH,
1594 PCI_DMA_FROMDEVICE);
1595 prefetch(skb);
1596 prefetch(((char *)(skb)) + 128);
1597
1598 /* is this an error packet? */
34f80b04 1599 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1600 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1601 "ERROR flags %x rx packet %u\n",
1602 cqe_fp_flags, sw_comp_cons);
de832a55 1603 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1604 goto reuse_rx;
1605 }
1606
1607 /* Since we don't have a jumbo ring
1608 * copy small packets if mtu > 1500
1609 */
1610 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1611 (len <= RX_COPY_THRESH)) {
1612 struct sk_buff *new_skb;
1613
1614 new_skb = netdev_alloc_skb(bp->dev,
1615 len + pad);
1616 if (new_skb == NULL) {
1617 DP(NETIF_MSG_RX_ERR,
34f80b04 1618 "ERROR packet dropped "
a2fbb9ea 1619 "because of alloc failure\n");
de832a55 1620 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1621 goto reuse_rx;
1622 }
1623
1624 /* aligned copy */
1625 skb_copy_from_linear_data_offset(skb, pad,
1626 new_skb->data + pad, len);
1627 skb_reserve(new_skb, pad);
1628 skb_put(new_skb, len);
1629
1630 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1631
1632 skb = new_skb;
1633
a119a069
EG
1634 } else
1635 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1636 pci_unmap_single(bp->pdev,
1637 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1638 bp->rx_buf_size,
a2fbb9ea
ET
1639 PCI_DMA_FROMDEVICE);
1640 skb_reserve(skb, pad);
1641 skb_put(skb, len);
1642
1643 } else {
1644 DP(NETIF_MSG_RX_ERR,
34f80b04 1645 "ERROR packet dropped because "
a2fbb9ea 1646 "of alloc failure\n");
de832a55 1647 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1648reuse_rx:
1649 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1650 goto next_rx;
1651 }
1652
1653 skb->protocol = eth_type_trans(skb, bp->dev);
1654
1655 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1656 if (bp->rx_csum) {
1adcd8be
EG
1657 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1658 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1659 else
de832a55 1660 fp->eth_q_stats.hw_csum_err++;
66e855f3 1661 }
a2fbb9ea
ET
1662 }
1663
748e5439 1664 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1665
a2fbb9ea 1666#ifdef BCM_VLAN
0c6671b0 1667 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1668 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1669 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1670 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1671 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1672 else
1673#endif
34f80b04 1674 netif_receive_skb(skb);
a2fbb9ea 1675
a2fbb9ea
ET
1676
1677next_rx:
1678 rx_buf->skb = NULL;
1679
1680 bd_cons = NEXT_RX_IDX(bd_cons);
1681 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1682 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1683 rx_pkt++;
a2fbb9ea
ET
1684next_cqe:
1685 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1686 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1687
34f80b04 1688 if (rx_pkt == budget)
a2fbb9ea
ET
1689 break;
1690 } /* while */
1691
1692 fp->rx_bd_cons = bd_cons;
34f80b04 1693 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1694 fp->rx_comp_cons = sw_comp_cons;
1695 fp->rx_comp_prod = sw_comp_prod;
1696
7a9b2557
VZ
1697 /* Update producers */
1698 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1699 fp->rx_sge_prod);
a2fbb9ea
ET
1700
1701 fp->rx_pkt += rx_pkt;
1702 fp->rx_calls++;
1703
1704 return rx_pkt;
1705}
1706
1707static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1708{
1709 struct bnx2x_fastpath *fp = fp_cookie;
1710 struct bnx2x *bp = fp->bp;
a2fbb9ea 1711
da5a662a
VZ
1712 /* Return here if interrupt is disabled */
1713 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1714 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1715 return IRQ_HANDLED;
1716 }
1717
34f80b04 1718 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1719 fp->index, fp->sb_id);
0626b899 1720 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1721
1722#ifdef BNX2X_STOP_ON_ERROR
1723 if (unlikely(bp->panic))
1724 return IRQ_HANDLED;
1725#endif
ca00392c
EG
1726 /* Handle Rx or Tx according to MSI-X vector */
1727 if (fp->is_rx_queue) {
1728 prefetch(fp->rx_cons_sb);
1729 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1730
ca00392c 1731 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1732
ca00392c
EG
1733 } else {
1734 prefetch(fp->tx_cons_sb);
1735 prefetch(&fp->status_blk->c_status_block.status_block_index);
1736
1737 bnx2x_update_fpsb_idx(fp);
1738 rmb();
1739 bnx2x_tx_int(fp);
1740
1741 /* Re-enable interrupts */
1742 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1743 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1744 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1745 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1746 }
34f80b04 1747
a2fbb9ea
ET
1748 return IRQ_HANDLED;
1749}
1750
1751static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1752{
555f6c78 1753 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1754 u16 status = bnx2x_ack_int(bp);
34f80b04 1755 u16 mask;
ca00392c 1756 int i;
a2fbb9ea 1757
34f80b04 1758 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1759 if (unlikely(status == 0)) {
1760 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1761 return IRQ_NONE;
1762 }
f5372251 1763 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1764
34f80b04 1765 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1766 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1767 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1768 return IRQ_HANDLED;
1769 }
1770
3196a88a
EG
1771#ifdef BNX2X_STOP_ON_ERROR
1772 if (unlikely(bp->panic))
1773 return IRQ_HANDLED;
1774#endif
1775
ca00392c
EG
1776 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1777 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1778
ca00392c
EG
1779 mask = 0x2 << fp->sb_id;
1780 if (status & mask) {
1781 /* Handle Rx or Tx according to SB id */
1782 if (fp->is_rx_queue) {
1783 prefetch(fp->rx_cons_sb);
1784 prefetch(&fp->status_blk->u_status_block.
1785 status_block_index);
a2fbb9ea 1786
ca00392c 1787 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1788
ca00392c
EG
1789 } else {
1790 prefetch(fp->tx_cons_sb);
1791 prefetch(&fp->status_blk->c_status_block.
1792 status_block_index);
1793
1794 bnx2x_update_fpsb_idx(fp);
1795 rmb();
1796 bnx2x_tx_int(fp);
1797
1798 /* Re-enable interrupts */
1799 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1800 le16_to_cpu(fp->fp_u_idx),
1801 IGU_INT_NOP, 1);
1802 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1803 le16_to_cpu(fp->fp_c_idx),
1804 IGU_INT_ENABLE, 1);
1805 }
1806 status &= ~mask;
1807 }
a2fbb9ea
ET
1808 }
1809
a2fbb9ea 1810
34f80b04 1811 if (unlikely(status & 0x1)) {
1cf167f2 1812 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1813
1814 status &= ~0x1;
1815 if (!status)
1816 return IRQ_HANDLED;
1817 }
1818
34f80b04
EG
1819 if (status)
1820 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1821 status);
a2fbb9ea 1822
c18487ee 1823 return IRQ_HANDLED;
a2fbb9ea
ET
1824}
1825
c18487ee 1826/* end of fast path */
a2fbb9ea 1827
bb2a0f7a 1828static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1829
c18487ee
YR
1830/* Link */
1831
1832/*
1833 * General service functions
1834 */
a2fbb9ea 1835
4a37fb66 1836static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1837{
1838 u32 lock_status;
1839 u32 resource_bit = (1 << resource);
4a37fb66
YG
1840 int func = BP_FUNC(bp);
1841 u32 hw_lock_control_reg;
c18487ee 1842 int cnt;
a2fbb9ea 1843
c18487ee
YR
1844 /* Validating that the resource is within range */
1845 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1846 DP(NETIF_MSG_HW,
1847 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1848 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1849 return -EINVAL;
1850 }
a2fbb9ea 1851
4a37fb66
YG
1852 if (func <= 5) {
1853 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1854 } else {
1855 hw_lock_control_reg =
1856 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1857 }
1858
c18487ee 1859 /* Validating that the resource is not already taken */
4a37fb66 1860 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1861 if (lock_status & resource_bit) {
1862 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1863 lock_status, resource_bit);
1864 return -EEXIST;
1865 }
a2fbb9ea 1866
46230476
EG
1867 /* Try for 5 second every 5ms */
1868 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1869 /* Try to acquire the lock */
4a37fb66
YG
1870 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1871 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1872 if (lock_status & resource_bit)
1873 return 0;
a2fbb9ea 1874
c18487ee 1875 msleep(5);
a2fbb9ea 1876 }
c18487ee
YR
1877 DP(NETIF_MSG_HW, "Timeout\n");
1878 return -EAGAIN;
1879}
a2fbb9ea 1880
4a37fb66 1881static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1882{
1883 u32 lock_status;
1884 u32 resource_bit = (1 << resource);
4a37fb66
YG
1885 int func = BP_FUNC(bp);
1886 u32 hw_lock_control_reg;
a2fbb9ea 1887
c18487ee
YR
1888 /* Validating that the resource is within range */
1889 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1890 DP(NETIF_MSG_HW,
1891 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1892 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1893 return -EINVAL;
1894 }
1895
4a37fb66
YG
1896 if (func <= 5) {
1897 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1898 } else {
1899 hw_lock_control_reg =
1900 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1901 }
1902
c18487ee 1903 /* Validating that the resource is currently taken */
4a37fb66 1904 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1905 if (!(lock_status & resource_bit)) {
1906 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1907 lock_status, resource_bit);
1908 return -EFAULT;
a2fbb9ea
ET
1909 }
1910
4a37fb66 1911 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1912 return 0;
1913}
1914
1915/* HW Lock for shared dual port PHYs */
4a37fb66 1916static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1917{
34f80b04 1918 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1919
46c6a674
EG
1920 if (bp->port.need_hw_lock)
1921 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1922}
a2fbb9ea 1923
4a37fb66 1924static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1925{
46c6a674
EG
1926 if (bp->port.need_hw_lock)
1927 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1928
34f80b04 1929 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1930}
a2fbb9ea 1931
4acac6a5
EG
1932int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1933{
1934 /* The GPIO should be swapped if swap register is set and active */
1935 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1936 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1937 int gpio_shift = gpio_num +
1938 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1939 u32 gpio_mask = (1 << gpio_shift);
1940 u32 gpio_reg;
1941 int value;
1942
1943 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1944 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1945 return -EINVAL;
1946 }
1947
1948 /* read GPIO value */
1949 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1950
1951 /* get the requested pin value */
1952 if ((gpio_reg & gpio_mask) == gpio_mask)
1953 value = 1;
1954 else
1955 value = 0;
1956
1957 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1958
1959 return value;
1960}
1961
17de50b7 1962int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1963{
1964 /* The GPIO should be swapped if swap register is set and active */
1965 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1966 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1967 int gpio_shift = gpio_num +
1968 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1969 u32 gpio_mask = (1 << gpio_shift);
1970 u32 gpio_reg;
a2fbb9ea 1971
c18487ee
YR
1972 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1973 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1974 return -EINVAL;
1975 }
a2fbb9ea 1976
4a37fb66 1977 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1978 /* read GPIO and mask except the float bits */
1979 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1980
c18487ee
YR
1981 switch (mode) {
1982 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1983 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1984 gpio_num, gpio_shift);
1985 /* clear FLOAT and set CLR */
1986 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1987 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1988 break;
a2fbb9ea 1989
c18487ee
YR
1990 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1991 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1992 gpio_num, gpio_shift);
1993 /* clear FLOAT and set SET */
1994 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1995 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1996 break;
a2fbb9ea 1997
17de50b7 1998 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1999 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2000 gpio_num, gpio_shift);
2001 /* set FLOAT */
2002 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2003 break;
a2fbb9ea 2004
c18487ee
YR
2005 default:
2006 break;
a2fbb9ea
ET
2007 }
2008
c18487ee 2009 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2010 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2011
c18487ee 2012 return 0;
a2fbb9ea
ET
2013}
2014
4acac6a5
EG
2015int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2016{
2017 /* The GPIO should be swapped if swap register is set and active */
2018 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2019 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2020 int gpio_shift = gpio_num +
2021 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2022 u32 gpio_mask = (1 << gpio_shift);
2023 u32 gpio_reg;
2024
2025 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2026 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2027 return -EINVAL;
2028 }
2029
2030 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2031 /* read GPIO int */
2032 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2033
2034 switch (mode) {
2035 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2036 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2037 "output low\n", gpio_num, gpio_shift);
2038 /* clear SET and set CLR */
2039 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2040 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2041 break;
2042
2043 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2044 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2045 "output high\n", gpio_num, gpio_shift);
2046 /* clear CLR and set SET */
2047 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2048 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2049 break;
2050
2051 default:
2052 break;
2053 }
2054
2055 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2056 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2057
2058 return 0;
2059}
2060
c18487ee 2061static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2062{
c18487ee
YR
2063 u32 spio_mask = (1 << spio_num);
2064 u32 spio_reg;
a2fbb9ea 2065
c18487ee
YR
2066 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2067 (spio_num > MISC_REGISTERS_SPIO_7)) {
2068 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2069 return -EINVAL;
a2fbb9ea
ET
2070 }
2071
4a37fb66 2072 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2073 /* read SPIO and mask except the float bits */
2074 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2075
c18487ee 2076 switch (mode) {
6378c025 2077 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2078 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2079 /* clear FLOAT and set CLR */
2080 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2081 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2082 break;
a2fbb9ea 2083
6378c025 2084 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2085 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2086 /* clear FLOAT and set SET */
2087 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2088 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2089 break;
a2fbb9ea 2090
c18487ee
YR
2091 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2092 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2093 /* set FLOAT */
2094 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2095 break;
a2fbb9ea 2096
c18487ee
YR
2097 default:
2098 break;
a2fbb9ea
ET
2099 }
2100
c18487ee 2101 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2102 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2103
a2fbb9ea
ET
2104 return 0;
2105}
2106
c18487ee 2107static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2108{
ad33ea3a
EG
2109 switch (bp->link_vars.ieee_fc &
2110 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2111 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2112 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2113 ADVERTISED_Pause);
2114 break;
356e2385 2115
c18487ee 2116 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2117 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2118 ADVERTISED_Pause);
2119 break;
356e2385 2120
c18487ee 2121 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2122 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2123 break;
356e2385 2124
c18487ee 2125 default:
34f80b04 2126 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2127 ADVERTISED_Pause);
2128 break;
2129 }
2130}
f1410647 2131
c18487ee
YR
2132static void bnx2x_link_report(struct bnx2x *bp)
2133{
2691d51d
EG
2134 if (bp->state == BNX2X_STATE_DISABLED) {
2135 netif_carrier_off(bp->dev);
2136 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2137 return;
2138 }
2139
c18487ee
YR
2140 if (bp->link_vars.link_up) {
2141 if (bp->state == BNX2X_STATE_OPEN)
2142 netif_carrier_on(bp->dev);
2143 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2144
c18487ee 2145 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2146
c18487ee
YR
2147 if (bp->link_vars.duplex == DUPLEX_FULL)
2148 printk("full duplex");
2149 else
2150 printk("half duplex");
f1410647 2151
c0700f90
DM
2152 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2153 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2154 printk(", receive ");
356e2385
EG
2155 if (bp->link_vars.flow_ctrl &
2156 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2157 printk("& transmit ");
2158 } else {
2159 printk(", transmit ");
2160 }
2161 printk("flow control ON");
2162 }
2163 printk("\n");
f1410647 2164
c18487ee
YR
2165 } else { /* link_down */
2166 netif_carrier_off(bp->dev);
2167 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2168 }
c18487ee
YR
2169}
2170
b5bf9068 2171static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2172{
19680c48
EG
2173 if (!BP_NOMCP(bp)) {
2174 u8 rc;
a2fbb9ea 2175
19680c48 2176 /* Initialize link parameters structure variables */
8c99e7b0
YR
2177 /* It is recommended to turn off RX FC for jumbo frames
2178 for better performance */
0c593270 2179 if (bp->dev->mtu > 5000)
c0700f90 2180 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2181 else
c0700f90 2182 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2183
4a37fb66 2184 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2185
2186 if (load_mode == LOAD_DIAG)
2187 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2188
19680c48 2189 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2190
4a37fb66 2191 bnx2x_release_phy_lock(bp);
a2fbb9ea 2192
3c96c68b
EG
2193 bnx2x_calc_fc_adv(bp);
2194
b5bf9068
EG
2195 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2196 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2197 bnx2x_link_report(bp);
b5bf9068 2198 }
34f80b04 2199
19680c48
EG
2200 return rc;
2201 }
f5372251 2202 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2203 return -EINVAL;
a2fbb9ea
ET
2204}
2205
c18487ee 2206static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2207{
19680c48 2208 if (!BP_NOMCP(bp)) {
4a37fb66 2209 bnx2x_acquire_phy_lock(bp);
19680c48 2210 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2211 bnx2x_release_phy_lock(bp);
a2fbb9ea 2212
19680c48
EG
2213 bnx2x_calc_fc_adv(bp);
2214 } else
f5372251 2215 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2216}
a2fbb9ea 2217
c18487ee
YR
2218static void bnx2x__link_reset(struct bnx2x *bp)
2219{
19680c48 2220 if (!BP_NOMCP(bp)) {
4a37fb66 2221 bnx2x_acquire_phy_lock(bp);
589abe3a 2222 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2223 bnx2x_release_phy_lock(bp);
19680c48 2224 } else
f5372251 2225 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2226}
a2fbb9ea 2227
c18487ee
YR
2228static u8 bnx2x_link_test(struct bnx2x *bp)
2229{
2230 u8 rc;
a2fbb9ea 2231
4a37fb66 2232 bnx2x_acquire_phy_lock(bp);
c18487ee 2233 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2234 bnx2x_release_phy_lock(bp);
a2fbb9ea 2235
c18487ee
YR
2236 return rc;
2237}
a2fbb9ea 2238
8a1c38d1 2239static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2240{
8a1c38d1
EG
2241 u32 r_param = bp->link_vars.line_speed / 8;
2242 u32 fair_periodic_timeout_usec;
2243 u32 t_fair;
34f80b04 2244
8a1c38d1
EG
2245 memset(&(bp->cmng.rs_vars), 0,
2246 sizeof(struct rate_shaping_vars_per_port));
2247 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2248
8a1c38d1
EG
2249 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2250 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2251
8a1c38d1
EG
2252 /* this is the threshold below which no timer arming will occur
2253 1.25 coefficient is for the threshold to be a little bigger
2254 than the real time, to compensate for timer in-accuracy */
2255 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2256 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2257
8a1c38d1
EG
2258 /* resolution of fairness timer */
2259 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2260 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2261 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2262
8a1c38d1
EG
2263 /* this is the threshold below which we won't arm the timer anymore */
2264 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2265
8a1c38d1
EG
2266 /* we multiply by 1e3/8 to get bytes/msec.
2267 We don't want the credits to pass a credit
2268 of the t_fair*FAIR_MEM (algorithm resolution) */
2269 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2270 /* since each tick is 4 usec */
2271 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2272}
2273
2691d51d
EG
2274/* Calculates the sum of vn_min_rates.
2275 It's needed for further normalizing of the min_rates.
2276 Returns:
2277 sum of vn_min_rates.
2278 or
2279 0 - if all the min_rates are 0.
2280 In the later case fainess algorithm should be deactivated.
2281 If not all min_rates are zero then those that are zeroes will be set to 1.
2282 */
2283static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2284{
2285 int all_zero = 1;
2286 int port = BP_PORT(bp);
2287 int vn;
2288
2289 bp->vn_weight_sum = 0;
2290 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2291 int func = 2*vn + port;
2292 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2293 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2294 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2295
2296 /* Skip hidden vns */
2297 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2298 continue;
2299
2300 /* If min rate is zero - set it to 1 */
2301 if (!vn_min_rate)
2302 vn_min_rate = DEF_MIN_RATE;
2303 else
2304 all_zero = 0;
2305
2306 bp->vn_weight_sum += vn_min_rate;
2307 }
2308
2309 /* ... only if all min rates are zeros - disable fairness */
2310 if (all_zero)
2311 bp->vn_weight_sum = 0;
2312}
2313
8a1c38d1 2314static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2315{
2316 struct rate_shaping_vars_per_vn m_rs_vn;
2317 struct fairness_vars_per_vn m_fair_vn;
2318 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2319 u16 vn_min_rate, vn_max_rate;
2320 int i;
2321
2322 /* If function is hidden - set min and max to zeroes */
2323 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2324 vn_min_rate = 0;
2325 vn_max_rate = 0;
2326
2327 } else {
2328 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2329 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2330 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2331 if current min rate is zero - set it to 1.
33471629 2332 This is a requirement of the algorithm. */
8a1c38d1 2333 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2334 vn_min_rate = DEF_MIN_RATE;
2335 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2336 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2337 }
2338
8a1c38d1
EG
2339 DP(NETIF_MSG_IFUP,
2340 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2341 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2342
2343 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2344 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2345
2346 /* global vn counter - maximal Mbps for this vn */
2347 m_rs_vn.vn_counter.rate = vn_max_rate;
2348
2349 /* quota - number of bytes transmitted in this period */
2350 m_rs_vn.vn_counter.quota =
2351 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2352
8a1c38d1 2353 if (bp->vn_weight_sum) {
34f80b04
EG
2354 /* credit for each period of the fairness algorithm:
2355 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2356 vn_weight_sum should not be larger than 10000, thus
2357 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2358 than zero */
34f80b04 2359 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2360 max((u32)(vn_min_rate * (T_FAIR_COEF /
2361 (8 * bp->vn_weight_sum))),
2362 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2363 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2364 m_fair_vn.vn_credit_delta);
2365 }
2366
34f80b04
EG
2367 /* Store it to internal memory */
2368 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2369 REG_WR(bp, BAR_XSTRORM_INTMEM +
2370 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2371 ((u32 *)(&m_rs_vn))[i]);
2372
2373 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2374 REG_WR(bp, BAR_XSTRORM_INTMEM +
2375 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2376 ((u32 *)(&m_fair_vn))[i]);
2377}
2378
8a1c38d1 2379
c18487ee
YR
2380/* This function is called upon link interrupt */
2381static void bnx2x_link_attn(struct bnx2x *bp)
2382{
bb2a0f7a
YG
2383 /* Make sure that we are synced with the current statistics */
2384 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2385
c18487ee 2386 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2387
bb2a0f7a
YG
2388 if (bp->link_vars.link_up) {
2389
1c06328c 2390 /* dropless flow control */
a18f5128 2391 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2392 int port = BP_PORT(bp);
2393 u32 pause_enabled = 0;
2394
2395 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2396 pause_enabled = 1;
2397
2398 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2399 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2400 pause_enabled);
2401 }
2402
bb2a0f7a
YG
2403 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2404 struct host_port_stats *pstats;
2405
2406 pstats = bnx2x_sp(bp, port_stats);
2407 /* reset old bmac stats */
2408 memset(&(pstats->mac_stx[0]), 0,
2409 sizeof(struct mac_stx));
2410 }
2411 if ((bp->state == BNX2X_STATE_OPEN) ||
2412 (bp->state == BNX2X_STATE_DISABLED))
2413 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2414 }
2415
c18487ee
YR
2416 /* indicate link status */
2417 bnx2x_link_report(bp);
34f80b04
EG
2418
2419 if (IS_E1HMF(bp)) {
8a1c38d1 2420 int port = BP_PORT(bp);
34f80b04 2421 int func;
8a1c38d1 2422 int vn;
34f80b04 2423
ab6ad5a4 2424 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2425 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2426 if (vn == BP_E1HVN(bp))
2427 continue;
2428
8a1c38d1 2429 func = ((vn << 1) | port);
34f80b04
EG
2430 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2431 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2432 }
34f80b04 2433
8a1c38d1
EG
2434 if (bp->link_vars.link_up) {
2435 int i;
2436
2437 /* Init rate shaping and fairness contexts */
2438 bnx2x_init_port_minmax(bp);
34f80b04 2439
34f80b04 2440 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2441 bnx2x_init_vn_minmax(bp, 2*vn + port);
2442
2443 /* Store it to internal memory */
2444 for (i = 0;
2445 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2446 REG_WR(bp, BAR_XSTRORM_INTMEM +
2447 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2448 ((u32 *)(&bp->cmng))[i]);
2449 }
34f80b04 2450 }
c18487ee 2451}
a2fbb9ea 2452
c18487ee
YR
2453static void bnx2x__link_status_update(struct bnx2x *bp)
2454{
2691d51d
EG
2455 int func = BP_FUNC(bp);
2456
c18487ee
YR
2457 if (bp->state != BNX2X_STATE_OPEN)
2458 return;
a2fbb9ea 2459
c18487ee 2460 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2461
bb2a0f7a
YG
2462 if (bp->link_vars.link_up)
2463 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2464 else
2465 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2466
2691d51d
EG
2467 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2468 bnx2x_calc_vn_weight_sum(bp);
2469
c18487ee
YR
2470 /* indicate link status */
2471 bnx2x_link_report(bp);
a2fbb9ea 2472}
a2fbb9ea 2473
34f80b04
EG
2474static void bnx2x_pmf_update(struct bnx2x *bp)
2475{
2476 int port = BP_PORT(bp);
2477 u32 val;
2478
2479 bp->port.pmf = 1;
2480 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2481
2482 /* enable nig attention */
2483 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2484 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2485 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2486
2487 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2488}
2489
c18487ee 2490/* end of Link */
a2fbb9ea
ET
2491
2492/* slow path */
2493
2494/*
2495 * General service functions
2496 */
2497
2691d51d
EG
2498/* send the MCP a request, block until there is a reply */
2499u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2500{
2501 int func = BP_FUNC(bp);
2502 u32 seq = ++bp->fw_seq;
2503 u32 rc = 0;
2504 u32 cnt = 1;
2505 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2506
2507 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2508 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2509
2510 do {
2511 /* let the FW do it's magic ... */
2512 msleep(delay);
2513
2514 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2515
2516 /* Give the FW up to 2 second (200*10ms) */
2517 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2518
2519 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2520 cnt*delay, rc, seq);
2521
2522 /* is this a reply to our command? */
2523 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2524 rc &= FW_MSG_CODE_MASK;
2525 else {
2526 /* FW BUG! */
2527 BNX2X_ERR("FW failed to respond!\n");
2528 bnx2x_fw_dump(bp);
2529 rc = 0;
2530 }
2531
2532 return rc;
2533}
2534
2535static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2536static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2537static void bnx2x_set_rx_mode(struct net_device *dev);
2538
2539static void bnx2x_e1h_disable(struct bnx2x *bp)
2540{
2541 int port = BP_PORT(bp);
2542 int i;
2543
2544 bp->rx_mode = BNX2X_RX_MODE_NONE;
2545 bnx2x_set_storm_rx_mode(bp);
2546
2547 netif_tx_disable(bp->dev);
2548 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2549
2550 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2551
e665bfda 2552 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2691d51d
EG
2553
2554 for (i = 0; i < MC_HASH_SIZE; i++)
2555 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2556
2557 netif_carrier_off(bp->dev);
2558}
2559
2560static void bnx2x_e1h_enable(struct bnx2x *bp)
2561{
2562 int port = BP_PORT(bp);
2563
2564 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2565
e665bfda 2566 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2691d51d
EG
2567
2568 /* Tx queue should be only reenabled */
2569 netif_tx_wake_all_queues(bp->dev);
2570
2571 /* Initialize the receive filter. */
2572 bnx2x_set_rx_mode(bp->dev);
2573}
2574
2575static void bnx2x_update_min_max(struct bnx2x *bp)
2576{
2577 int port = BP_PORT(bp);
2578 int vn, i;
2579
2580 /* Init rate shaping and fairness contexts */
2581 bnx2x_init_port_minmax(bp);
2582
2583 bnx2x_calc_vn_weight_sum(bp);
2584
2585 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2586 bnx2x_init_vn_minmax(bp, 2*vn + port);
2587
2588 if (bp->port.pmf) {
2589 int func;
2590
2591 /* Set the attention towards other drivers on the same port */
2592 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2593 if (vn == BP_E1HVN(bp))
2594 continue;
2595
2596 func = ((vn << 1) | port);
2597 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2598 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2599 }
2600
2601 /* Store it to internal memory */
2602 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2603 REG_WR(bp, BAR_XSTRORM_INTMEM +
2604 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2605 ((u32 *)(&bp->cmng))[i]);
2606 }
2607}
2608
2609static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2610{
2611 int func = BP_FUNC(bp);
2612
2613 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2614 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2615
2616 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2617
2618 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2619 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2620 bp->state = BNX2X_STATE_DISABLED;
2621
2622 bnx2x_e1h_disable(bp);
2623 } else {
2624 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2625 bp->state = BNX2X_STATE_OPEN;
2626
2627 bnx2x_e1h_enable(bp);
2628 }
2629 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2630 }
2631 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2632
2633 bnx2x_update_min_max(bp);
2634 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2635 }
2636
2637 /* Report results to MCP */
2638 if (dcc_event)
2639 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2640 else
2641 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2642}
2643
28912902
MC
2644/* must be called under the spq lock */
2645static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2646{
2647 struct eth_spe *next_spe = bp->spq_prod_bd;
2648
2649 if (bp->spq_prod_bd == bp->spq_last_bd) {
2650 bp->spq_prod_bd = bp->spq;
2651 bp->spq_prod_idx = 0;
2652 DP(NETIF_MSG_TIMER, "end of spq\n");
2653 } else {
2654 bp->spq_prod_bd++;
2655 bp->spq_prod_idx++;
2656 }
2657 return next_spe;
2658}
2659
2660/* must be called under the spq lock */
2661static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2662{
2663 int func = BP_FUNC(bp);
2664
2665 /* Make sure that BD data is updated before writing the producer */
2666 wmb();
2667
2668 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2669 bp->spq_prod_idx);
2670 mmiowb();
2671}
2672
a2fbb9ea
ET
2673/* the slow path queue is odd since completions arrive on the fastpath ring */
2674static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2675 u32 data_hi, u32 data_lo, int common)
2676{
28912902 2677 struct eth_spe *spe;
a2fbb9ea 2678
34f80b04
EG
2679 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2680 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2681 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2682 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2683 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2684
2685#ifdef BNX2X_STOP_ON_ERROR
2686 if (unlikely(bp->panic))
2687 return -EIO;
2688#endif
2689
34f80b04 2690 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2691
2692 if (!bp->spq_left) {
2693 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2694 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2695 bnx2x_panic();
2696 return -EBUSY;
2697 }
f1410647 2698
28912902
MC
2699 spe = bnx2x_sp_get_next(bp);
2700
a2fbb9ea 2701 /* CID needs port number to be encoded int it */
28912902 2702 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2703 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2704 HW_CID(bp, cid)));
28912902 2705 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2706 if (common)
28912902 2707 spe->hdr.type |=
a2fbb9ea
ET
2708 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2709
28912902
MC
2710 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2711 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2712
2713 bp->spq_left--;
2714
28912902 2715 bnx2x_sp_prod_update(bp);
34f80b04 2716 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2717 return 0;
2718}
2719
2720/* acquire split MCP access lock register */
4a37fb66 2721static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2722{
a2fbb9ea 2723 u32 i, j, val;
34f80b04 2724 int rc = 0;
a2fbb9ea
ET
2725
2726 might_sleep();
2727 i = 100;
2728 for (j = 0; j < i*10; j++) {
2729 val = (1UL << 31);
2730 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2731 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2732 if (val & (1L << 31))
2733 break;
2734
2735 msleep(5);
2736 }
a2fbb9ea 2737 if (!(val & (1L << 31))) {
19680c48 2738 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2739 rc = -EBUSY;
2740 }
2741
2742 return rc;
2743}
2744
4a37fb66
YG
2745/* release split MCP access lock register */
2746static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2747{
2748 u32 val = 0;
2749
2750 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2751}
2752
2753static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2754{
2755 struct host_def_status_block *def_sb = bp->def_status_blk;
2756 u16 rc = 0;
2757
2758 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2759 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2760 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2761 rc |= 1;
2762 }
2763 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2764 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2765 rc |= 2;
2766 }
2767 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2768 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2769 rc |= 4;
2770 }
2771 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2772 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2773 rc |= 8;
2774 }
2775 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2776 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2777 rc |= 16;
2778 }
2779 return rc;
2780}
2781
2782/*
2783 * slow path service functions
2784 */
2785
2786static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2787{
34f80b04 2788 int port = BP_PORT(bp);
5c862848
EG
2789 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2790 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2791 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2792 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2793 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2794 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2795 u32 aeu_mask;
87942b46 2796 u32 nig_mask = 0;
a2fbb9ea 2797
a2fbb9ea
ET
2798 if (bp->attn_state & asserted)
2799 BNX2X_ERR("IGU ERROR\n");
2800
3fcaf2e5
EG
2801 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2802 aeu_mask = REG_RD(bp, aeu_addr);
2803
a2fbb9ea 2804 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2805 aeu_mask, asserted);
2806 aeu_mask &= ~(asserted & 0xff);
2807 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2808
3fcaf2e5
EG
2809 REG_WR(bp, aeu_addr, aeu_mask);
2810 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2811
3fcaf2e5 2812 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2813 bp->attn_state |= asserted;
3fcaf2e5 2814 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2815
2816 if (asserted & ATTN_HARD_WIRED_MASK) {
2817 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2818
a5e9a7cf
EG
2819 bnx2x_acquire_phy_lock(bp);
2820
877e9aa4 2821 /* save nig interrupt mask */
87942b46 2822 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2823 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2824
c18487ee 2825 bnx2x_link_attn(bp);
a2fbb9ea
ET
2826
2827 /* handle unicore attn? */
2828 }
2829 if (asserted & ATTN_SW_TIMER_4_FUNC)
2830 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2831
2832 if (asserted & GPIO_2_FUNC)
2833 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2834
2835 if (asserted & GPIO_3_FUNC)
2836 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2837
2838 if (asserted & GPIO_4_FUNC)
2839 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2840
2841 if (port == 0) {
2842 if (asserted & ATTN_GENERAL_ATTN_1) {
2843 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2844 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2845 }
2846 if (asserted & ATTN_GENERAL_ATTN_2) {
2847 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2848 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2849 }
2850 if (asserted & ATTN_GENERAL_ATTN_3) {
2851 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2852 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2853 }
2854 } else {
2855 if (asserted & ATTN_GENERAL_ATTN_4) {
2856 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2857 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2858 }
2859 if (asserted & ATTN_GENERAL_ATTN_5) {
2860 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2861 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2862 }
2863 if (asserted & ATTN_GENERAL_ATTN_6) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2866 }
2867 }
2868
2869 } /* if hardwired */
2870
5c862848
EG
2871 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2872 asserted, hc_addr);
2873 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2874
2875 /* now set back the mask */
a5e9a7cf 2876 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2877 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2878 bnx2x_release_phy_lock(bp);
2879 }
a2fbb9ea
ET
2880}
2881
fd4ef40d
EG
2882static inline void bnx2x_fan_failure(struct bnx2x *bp)
2883{
2884 int port = BP_PORT(bp);
2885
2886 /* mark the failure */
2887 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2888 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2889 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2890 bp->link_params.ext_phy_config);
2891
2892 /* log the failure */
2893 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2894 " the driver to shutdown the card to prevent permanent"
2895 " damage. Please contact Dell Support for assistance\n",
2896 bp->dev->name);
2897}
ab6ad5a4 2898
877e9aa4 2899static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2900{
34f80b04 2901 int port = BP_PORT(bp);
877e9aa4 2902 int reg_offset;
4d295db0 2903 u32 val, swap_val, swap_override;
877e9aa4 2904
34f80b04
EG
2905 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2906 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2907
34f80b04 2908 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2909
2910 val = REG_RD(bp, reg_offset);
2911 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2912 REG_WR(bp, reg_offset, val);
2913
2914 BNX2X_ERR("SPIO5 hw attention\n");
2915
fd4ef40d 2916 /* Fan failure attention */
35b19ba5
EG
2917 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2918 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2919 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2920 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2921 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2922 /* The PHY reset is controlled by GPIO 1 */
2923 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2924 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2925 break;
2926
4d295db0
EG
2927 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2928 /* The PHY reset is controlled by GPIO 1 */
2929 /* fake the port number to cancel the swap done in
2930 set_gpio() */
2931 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2932 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2933 port = (swap_val && swap_override) ^ 1;
2934 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2935 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2936 break;
2937
877e9aa4
ET
2938 default:
2939 break;
2940 }
fd4ef40d 2941 bnx2x_fan_failure(bp);
877e9aa4 2942 }
34f80b04 2943
589abe3a
EG
2944 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2945 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2946 bnx2x_acquire_phy_lock(bp);
2947 bnx2x_handle_module_detect_int(&bp->link_params);
2948 bnx2x_release_phy_lock(bp);
2949 }
2950
34f80b04
EG
2951 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2952
2953 val = REG_RD(bp, reg_offset);
2954 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2955 REG_WR(bp, reg_offset, val);
2956
2957 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2958 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2959 bnx2x_panic();
2960 }
877e9aa4
ET
2961}
2962
2963static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2964{
2965 u32 val;
2966
0626b899 2967 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2968
2969 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2970 BNX2X_ERR("DB hw attention 0x%x\n", val);
2971 /* DORQ discard attention */
2972 if (val & 0x2)
2973 BNX2X_ERR("FATAL error from DORQ\n");
2974 }
34f80b04
EG
2975
2976 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2977
2978 int port = BP_PORT(bp);
2979 int reg_offset;
2980
2981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2983
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2986 REG_WR(bp, reg_offset, val);
2987
2988 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2989 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2990 bnx2x_panic();
2991 }
877e9aa4
ET
2992}
2993
2994static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2995{
2996 u32 val;
2997
2998 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2999
3000 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3001 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3002 /* CFC error attention */
3003 if (val & 0x2)
3004 BNX2X_ERR("FATAL error from CFC\n");
3005 }
3006
3007 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3008
3009 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3010 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3011 /* RQ_USDMDP_FIFO_OVERFLOW */
3012 if (val & 0x18000)
3013 BNX2X_ERR("FATAL error from PXP\n");
3014 }
34f80b04
EG
3015
3016 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3017
3018 int port = BP_PORT(bp);
3019 int reg_offset;
3020
3021 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3022 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3023
3024 val = REG_RD(bp, reg_offset);
3025 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3026 REG_WR(bp, reg_offset, val);
3027
3028 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3029 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3030 bnx2x_panic();
3031 }
877e9aa4
ET
3032}
3033
3034static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3035{
34f80b04
EG
3036 u32 val;
3037
877e9aa4
ET
3038 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3039
34f80b04
EG
3040 if (attn & BNX2X_PMF_LINK_ASSERT) {
3041 int func = BP_FUNC(bp);
3042
3043 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3044 val = SHMEM_RD(bp, func_mb[func].drv_status);
3045 if (val & DRV_STATUS_DCC_EVENT_MASK)
3046 bnx2x_dcc_event(bp,
3047 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3048 bnx2x__link_status_update(bp);
2691d51d 3049 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3050 bnx2x_pmf_update(bp);
3051
3052 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3053
3054 BNX2X_ERR("MC assert!\n");
3055 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3056 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3057 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3058 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3059 bnx2x_panic();
3060
3061 } else if (attn & BNX2X_MCP_ASSERT) {
3062
3063 BNX2X_ERR("MCP assert!\n");
3064 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3065 bnx2x_fw_dump(bp);
877e9aa4
ET
3066
3067 } else
3068 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3069 }
3070
3071 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3072 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3073 if (attn & BNX2X_GRC_TIMEOUT) {
3074 val = CHIP_IS_E1H(bp) ?
3075 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3076 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3077 }
3078 if (attn & BNX2X_GRC_RSV) {
3079 val = CHIP_IS_E1H(bp) ?
3080 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3081 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3082 }
877e9aa4 3083 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3084 }
3085}
3086
3087static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3088{
a2fbb9ea
ET
3089 struct attn_route attn;
3090 struct attn_route group_mask;
34f80b04 3091 int port = BP_PORT(bp);
877e9aa4 3092 int index;
a2fbb9ea
ET
3093 u32 reg_addr;
3094 u32 val;
3fcaf2e5 3095 u32 aeu_mask;
a2fbb9ea
ET
3096
3097 /* need to take HW lock because MCP or other port might also
3098 try to handle this event */
4a37fb66 3099 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3100
3101 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3102 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3103 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3104 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3105 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3106 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3107
3108 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3109 if (deasserted & (1 << index)) {
3110 group_mask = bp->attn_group[index];
3111
34f80b04
EG
3112 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3113 index, group_mask.sig[0], group_mask.sig[1],
3114 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3115
877e9aa4
ET
3116 bnx2x_attn_int_deasserted3(bp,
3117 attn.sig[3] & group_mask.sig[3]);
3118 bnx2x_attn_int_deasserted1(bp,
3119 attn.sig[1] & group_mask.sig[1]);
3120 bnx2x_attn_int_deasserted2(bp,
3121 attn.sig[2] & group_mask.sig[2]);
3122 bnx2x_attn_int_deasserted0(bp,
3123 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3124
a2fbb9ea
ET
3125 if ((attn.sig[0] & group_mask.sig[0] &
3126 HW_PRTY_ASSERT_SET_0) ||
3127 (attn.sig[1] & group_mask.sig[1] &
3128 HW_PRTY_ASSERT_SET_1) ||
3129 (attn.sig[2] & group_mask.sig[2] &
3130 HW_PRTY_ASSERT_SET_2))
6378c025 3131 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3132 }
3133 }
3134
4a37fb66 3135 bnx2x_release_alr(bp);
a2fbb9ea 3136
5c862848 3137 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3138
3139 val = ~deasserted;
3fcaf2e5
EG
3140 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3141 val, reg_addr);
5c862848 3142 REG_WR(bp, reg_addr, val);
a2fbb9ea 3143
a2fbb9ea 3144 if (~bp->attn_state & deasserted)
3fcaf2e5 3145 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3146
3147 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3148 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3149
3fcaf2e5
EG
3150 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3151 aeu_mask = REG_RD(bp, reg_addr);
3152
3153 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3154 aeu_mask, deasserted);
3155 aeu_mask |= (deasserted & 0xff);
3156 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3157
3fcaf2e5
EG
3158 REG_WR(bp, reg_addr, aeu_mask);
3159 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3160
3161 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3162 bp->attn_state &= ~deasserted;
3163 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3164}
3165
3166static void bnx2x_attn_int(struct bnx2x *bp)
3167{
3168 /* read local copy of bits */
68d59484
EG
3169 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3170 attn_bits);
3171 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3172 attn_bits_ack);
a2fbb9ea
ET
3173 u32 attn_state = bp->attn_state;
3174
3175 /* look for changed bits */
3176 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3177 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3178
3179 DP(NETIF_MSG_HW,
3180 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3181 attn_bits, attn_ack, asserted, deasserted);
3182
3183 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3184 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3185
3186 /* handle bits that were raised */
3187 if (asserted)
3188 bnx2x_attn_int_asserted(bp, asserted);
3189
3190 if (deasserted)
3191 bnx2x_attn_int_deasserted(bp, deasserted);
3192}
3193
3194static void bnx2x_sp_task(struct work_struct *work)
3195{
1cf167f2 3196 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3197 u16 status;
3198
34f80b04 3199
a2fbb9ea
ET
3200 /* Return here if interrupt is disabled */
3201 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3202 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3203 return;
3204 }
3205
3206 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3207/* if (status == 0) */
3208/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3209
3196a88a 3210 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3211
877e9aa4
ET
3212 /* HW attentions */
3213 if (status & 0x1)
a2fbb9ea 3214 bnx2x_attn_int(bp);
a2fbb9ea 3215
68d59484 3216 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3217 IGU_INT_NOP, 1);
3218 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3219 IGU_INT_NOP, 1);
3220 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3221 IGU_INT_NOP, 1);
3222 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3223 IGU_INT_NOP, 1);
3224 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3225 IGU_INT_ENABLE, 1);
877e9aa4 3226
a2fbb9ea
ET
3227}
3228
3229static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3230{
3231 struct net_device *dev = dev_instance;
3232 struct bnx2x *bp = netdev_priv(dev);
3233
3234 /* Return here if interrupt is disabled */
3235 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3236 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3237 return IRQ_HANDLED;
3238 }
3239
8d9c5f34 3240 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3241
3242#ifdef BNX2X_STOP_ON_ERROR
3243 if (unlikely(bp->panic))
3244 return IRQ_HANDLED;
3245#endif
3246
1cf167f2 3247 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3248
3249 return IRQ_HANDLED;
3250}
3251
3252/* end of slow path */
3253
3254/* Statistics */
3255
3256/****************************************************************************
3257* Macros
3258****************************************************************************/
3259
a2fbb9ea
ET
3260/* sum[hi:lo] += add[hi:lo] */
3261#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3262 do { \
3263 s_lo += a_lo; \
f5ba6772 3264 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3265 } while (0)
3266
3267/* difference = minuend - subtrahend */
3268#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3269 do { \
bb2a0f7a
YG
3270 if (m_lo < s_lo) { \
3271 /* underflow */ \
a2fbb9ea 3272 d_hi = m_hi - s_hi; \
bb2a0f7a 3273 if (d_hi > 0) { \
6378c025 3274 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3275 d_hi--; \
3276 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3277 } else { \
6378c025 3278 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3279 d_hi = 0; \
3280 d_lo = 0; \
3281 } \
bb2a0f7a
YG
3282 } else { \
3283 /* m_lo >= s_lo */ \
a2fbb9ea 3284 if (m_hi < s_hi) { \
bb2a0f7a
YG
3285 d_hi = 0; \
3286 d_lo = 0; \
3287 } else { \
6378c025 3288 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3289 d_hi = m_hi - s_hi; \
3290 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3291 } \
3292 } \
3293 } while (0)
3294
bb2a0f7a 3295#define UPDATE_STAT64(s, t) \
a2fbb9ea 3296 do { \
bb2a0f7a
YG
3297 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3298 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3299 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3300 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3301 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3302 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3303 } while (0)
3304
bb2a0f7a 3305#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3306 do { \
bb2a0f7a
YG
3307 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3308 diff.lo, new->s##_lo, old->s##_lo); \
3309 ADD_64(estats->t##_hi, diff.hi, \
3310 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3311 } while (0)
3312
3313/* sum[hi:lo] += add */
3314#define ADD_EXTEND_64(s_hi, s_lo, a) \
3315 do { \
3316 s_lo += a; \
3317 s_hi += (s_lo < a) ? 1 : 0; \
3318 } while (0)
3319
bb2a0f7a 3320#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3321 do { \
bb2a0f7a
YG
3322 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3323 pstats->mac_stx[1].s##_lo, \
3324 new->s); \
a2fbb9ea
ET
3325 } while (0)
3326
bb2a0f7a 3327#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3328 do { \
4781bfad
EG
3329 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3330 old_tclient->s = tclient->s; \
de832a55
EG
3331 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3332 } while (0)
3333
3334#define UPDATE_EXTEND_USTAT(s, t) \
3335 do { \
3336 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3337 old_uclient->s = uclient->s; \
3338 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3339 } while (0)
3340
3341#define UPDATE_EXTEND_XSTAT(s, t) \
3342 do { \
4781bfad
EG
3343 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3344 old_xclient->s = xclient->s; \
de832a55
EG
3345 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3346 } while (0)
3347
3348/* minuend -= subtrahend */
3349#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3350 do { \
3351 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3352 } while (0)
3353
3354/* minuend[hi:lo] -= subtrahend */
3355#define SUB_EXTEND_64(m_hi, m_lo, s) \
3356 do { \
3357 SUB_64(m_hi, 0, m_lo, s); \
3358 } while (0)
3359
3360#define SUB_EXTEND_USTAT(s, t) \
3361 do { \
3362 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3363 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3364 } while (0)
3365
3366/*
3367 * General service functions
3368 */
3369
3370static inline long bnx2x_hilo(u32 *hiref)
3371{
3372 u32 lo = *(hiref + 1);
3373#if (BITS_PER_LONG == 64)
3374 u32 hi = *hiref;
3375
3376 return HILO_U64(hi, lo);
3377#else
3378 return lo;
3379#endif
3380}
3381
3382/*
3383 * Init service functions
3384 */
3385
bb2a0f7a
YG
3386static void bnx2x_storm_stats_post(struct bnx2x *bp)
3387{
3388 if (!bp->stats_pending) {
3389 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3390 int i, rc;
bb2a0f7a
YG
3391
3392 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3393 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3394 for_each_queue(bp, i)
3395 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3396
3397 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3398 ((u32 *)&ramrod_data)[1],
3399 ((u32 *)&ramrod_data)[0], 0);
3400 if (rc == 0) {
3401 /* stats ramrod has it's own slot on the spq */
3402 bp->spq_left++;
3403 bp->stats_pending = 1;
3404 }
3405 }
3406}
3407
bb2a0f7a
YG
3408static void bnx2x_hw_stats_post(struct bnx2x *bp)
3409{
3410 struct dmae_command *dmae = &bp->stats_dmae;
3411 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3412
3413 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3414 if (CHIP_REV_IS_SLOW(bp))
3415 return;
bb2a0f7a
YG
3416
3417 /* loader */
3418 if (bp->executer_idx) {
3419 int loader_idx = PMF_DMAE_C(bp);
3420
3421 memset(dmae, 0, sizeof(struct dmae_command));
3422
3423 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3424 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3425 DMAE_CMD_DST_RESET |
3426#ifdef __BIG_ENDIAN
3427 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3428#else
3429 DMAE_CMD_ENDIANITY_DW_SWAP |
3430#endif
3431 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3432 DMAE_CMD_PORT_0) |
3433 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3434 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3435 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3436 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3437 sizeof(struct dmae_command) *
3438 (loader_idx + 1)) >> 2;
3439 dmae->dst_addr_hi = 0;
3440 dmae->len = sizeof(struct dmae_command) >> 2;
3441 if (CHIP_IS_E1(bp))
3442 dmae->len--;
3443 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3444 dmae->comp_addr_hi = 0;
3445 dmae->comp_val = 1;
3446
3447 *stats_comp = 0;
3448 bnx2x_post_dmae(bp, dmae, loader_idx);
3449
3450 } else if (bp->func_stx) {
3451 *stats_comp = 0;
3452 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3453 }
3454}
3455
3456static int bnx2x_stats_comp(struct bnx2x *bp)
3457{
3458 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3459 int cnt = 10;
3460
3461 might_sleep();
3462 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3463 if (!cnt) {
3464 BNX2X_ERR("timeout waiting for stats finished\n");
3465 break;
3466 }
3467 cnt--;
12469401 3468 msleep(1);
bb2a0f7a
YG
3469 }
3470 return 1;
3471}
3472
3473/*
3474 * Statistics service functions
3475 */
3476
3477static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3478{
3479 struct dmae_command *dmae;
3480 u32 opcode;
3481 int loader_idx = PMF_DMAE_C(bp);
3482 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3483
3484 /* sanity */
3485 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3486 BNX2X_ERR("BUG!\n");
3487 return;
3488 }
3489
3490 bp->executer_idx = 0;
3491
3492 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3493 DMAE_CMD_C_ENABLE |
3494 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3495#ifdef __BIG_ENDIAN
3496 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3497#else
3498 DMAE_CMD_ENDIANITY_DW_SWAP |
3499#endif
3500 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3501 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3502
3503 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3504 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3505 dmae->src_addr_lo = bp->port.port_stx >> 2;
3506 dmae->src_addr_hi = 0;
3507 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3508 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3509 dmae->len = DMAE_LEN32_RD_MAX;
3510 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3511 dmae->comp_addr_hi = 0;
3512 dmae->comp_val = 1;
3513
3514 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3515 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3516 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3517 dmae->src_addr_hi = 0;
7a9b2557
VZ
3518 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3519 DMAE_LEN32_RD_MAX * 4);
3520 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3521 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3522 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3523 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3524 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3525 dmae->comp_val = DMAE_COMP_VAL;
3526
3527 *stats_comp = 0;
3528 bnx2x_hw_stats_post(bp);
3529 bnx2x_stats_comp(bp);
3530}
3531
3532static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3533{
3534 struct dmae_command *dmae;
34f80b04 3535 int port = BP_PORT(bp);
bb2a0f7a 3536 int vn = BP_E1HVN(bp);
a2fbb9ea 3537 u32 opcode;
bb2a0f7a 3538 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3539 u32 mac_addr;
bb2a0f7a
YG
3540 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3541
3542 /* sanity */
3543 if (!bp->link_vars.link_up || !bp->port.pmf) {
3544 BNX2X_ERR("BUG!\n");
3545 return;
3546 }
a2fbb9ea
ET
3547
3548 bp->executer_idx = 0;
bb2a0f7a
YG
3549
3550 /* MCP */
3551 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3552 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3553 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3554#ifdef __BIG_ENDIAN
bb2a0f7a 3555 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3556#else
bb2a0f7a 3557 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3558#endif
bb2a0f7a
YG
3559 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3560 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3561
bb2a0f7a 3562 if (bp->port.port_stx) {
a2fbb9ea
ET
3563
3564 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3565 dmae->opcode = opcode;
bb2a0f7a
YG
3566 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3567 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3568 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3569 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3570 dmae->len = sizeof(struct host_port_stats) >> 2;
3571 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3572 dmae->comp_addr_hi = 0;
3573 dmae->comp_val = 1;
a2fbb9ea
ET
3574 }
3575
bb2a0f7a
YG
3576 if (bp->func_stx) {
3577
3578 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3579 dmae->opcode = opcode;
3580 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3581 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3582 dmae->dst_addr_lo = bp->func_stx >> 2;
3583 dmae->dst_addr_hi = 0;
3584 dmae->len = sizeof(struct host_func_stats) >> 2;
3585 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3586 dmae->comp_addr_hi = 0;
3587 dmae->comp_val = 1;
a2fbb9ea
ET
3588 }
3589
bb2a0f7a 3590 /* MAC */
a2fbb9ea
ET
3591 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3592 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3593 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3594#ifdef __BIG_ENDIAN
3595 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3596#else
3597 DMAE_CMD_ENDIANITY_DW_SWAP |
3598#endif
bb2a0f7a
YG
3599 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3600 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3601
c18487ee 3602 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3603
3604 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3605 NIG_REG_INGRESS_BMAC0_MEM);
3606
3607 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3608 BIGMAC_REGISTER_TX_STAT_GTBYT */
3609 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3610 dmae->opcode = opcode;
3611 dmae->src_addr_lo = (mac_addr +
3612 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3613 dmae->src_addr_hi = 0;
3614 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3615 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3616 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3617 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3618 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3619 dmae->comp_addr_hi = 0;
3620 dmae->comp_val = 1;
3621
3622 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3623 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3624 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3625 dmae->opcode = opcode;
3626 dmae->src_addr_lo = (mac_addr +
3627 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3628 dmae->src_addr_hi = 0;
3629 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3630 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3631 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3632 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3633 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3634 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3635 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3636 dmae->comp_addr_hi = 0;
3637 dmae->comp_val = 1;
3638
c18487ee 3639 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3640
3641 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3642
3643 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3644 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3645 dmae->opcode = opcode;
3646 dmae->src_addr_lo = (mac_addr +
3647 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3648 dmae->src_addr_hi = 0;
3649 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3650 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3651 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3652 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3653 dmae->comp_addr_hi = 0;
3654 dmae->comp_val = 1;
3655
3656 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3657 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3658 dmae->opcode = opcode;
3659 dmae->src_addr_lo = (mac_addr +
3660 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3661 dmae->src_addr_hi = 0;
3662 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3663 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3664 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3665 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3666 dmae->len = 1;
3667 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3668 dmae->comp_addr_hi = 0;
3669 dmae->comp_val = 1;
3670
3671 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3672 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3673 dmae->opcode = opcode;
3674 dmae->src_addr_lo = (mac_addr +
3675 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3676 dmae->src_addr_hi = 0;
3677 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3678 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3679 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3680 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3681 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3682 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3683 dmae->comp_addr_hi = 0;
3684 dmae->comp_val = 1;
3685 }
3686
3687 /* NIG */
bb2a0f7a
YG
3688 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3689 dmae->opcode = opcode;
3690 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3691 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3692 dmae->src_addr_hi = 0;
3693 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3694 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3695 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3696 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3697 dmae->comp_addr_hi = 0;
3698 dmae->comp_val = 1;
3699
3700 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3701 dmae->opcode = opcode;
3702 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3703 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3704 dmae->src_addr_hi = 0;
3705 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3706 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3707 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3708 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3709 dmae->len = (2*sizeof(u32)) >> 2;
3710 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3711 dmae->comp_addr_hi = 0;
3712 dmae->comp_val = 1;
3713
a2fbb9ea
ET
3714 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3715 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3716 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3717 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3718#ifdef __BIG_ENDIAN
3719 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3720#else
3721 DMAE_CMD_ENDIANITY_DW_SWAP |
3722#endif
bb2a0f7a
YG
3723 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3724 (vn << DMAE_CMD_E1HVN_SHIFT));
3725 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3726 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3727 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3728 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3729 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3730 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3731 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3732 dmae->len = (2*sizeof(u32)) >> 2;
3733 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3734 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3735 dmae->comp_val = DMAE_COMP_VAL;
3736
3737 *stats_comp = 0;
a2fbb9ea
ET
3738}
3739
bb2a0f7a 3740static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3741{
bb2a0f7a
YG
3742 struct dmae_command *dmae = &bp->stats_dmae;
3743 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3744
bb2a0f7a
YG
3745 /* sanity */
3746 if (!bp->func_stx) {
3747 BNX2X_ERR("BUG!\n");
3748 return;
3749 }
a2fbb9ea 3750
bb2a0f7a
YG
3751 bp->executer_idx = 0;
3752 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3753
bb2a0f7a
YG
3754 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3755 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3756 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3757#ifdef __BIG_ENDIAN
3758 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3759#else
3760 DMAE_CMD_ENDIANITY_DW_SWAP |
3761#endif
3762 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3763 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3764 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3765 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3766 dmae->dst_addr_lo = bp->func_stx >> 2;
3767 dmae->dst_addr_hi = 0;
3768 dmae->len = sizeof(struct host_func_stats) >> 2;
3769 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3770 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3771 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3772
bb2a0f7a
YG
3773 *stats_comp = 0;
3774}
a2fbb9ea 3775
bb2a0f7a
YG
3776static void bnx2x_stats_start(struct bnx2x *bp)
3777{
3778 if (bp->port.pmf)
3779 bnx2x_port_stats_init(bp);
3780
3781 else if (bp->func_stx)
3782 bnx2x_func_stats_init(bp);
3783
3784 bnx2x_hw_stats_post(bp);
3785 bnx2x_storm_stats_post(bp);
3786}
3787
3788static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3789{
3790 bnx2x_stats_comp(bp);
3791 bnx2x_stats_pmf_update(bp);
3792 bnx2x_stats_start(bp);
3793}
3794
3795static void bnx2x_stats_restart(struct bnx2x *bp)
3796{
3797 bnx2x_stats_comp(bp);
3798 bnx2x_stats_start(bp);
3799}
3800
3801static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3802{
3803 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3804 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3805 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3806 struct {
3807 u32 lo;
3808 u32 hi;
3809 } diff;
bb2a0f7a
YG
3810
3811 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3812 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3813 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3814 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3815 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3816 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3817 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3818 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3819 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3820 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3821 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3822 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3823 UPDATE_STAT64(tx_stat_gt127,
3824 tx_stat_etherstatspkts65octetsto127octets);
3825 UPDATE_STAT64(tx_stat_gt255,
3826 tx_stat_etherstatspkts128octetsto255octets);
3827 UPDATE_STAT64(tx_stat_gt511,
3828 tx_stat_etherstatspkts256octetsto511octets);
3829 UPDATE_STAT64(tx_stat_gt1023,
3830 tx_stat_etherstatspkts512octetsto1023octets);
3831 UPDATE_STAT64(tx_stat_gt1518,
3832 tx_stat_etherstatspkts1024octetsto1522octets);
3833 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3834 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3835 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3836 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3837 UPDATE_STAT64(tx_stat_gterr,
3838 tx_stat_dot3statsinternalmactransmiterrors);
3839 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3840
3841 estats->pause_frames_received_hi =
3842 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3843 estats->pause_frames_received_lo =
3844 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3845
3846 estats->pause_frames_sent_hi =
3847 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3848 estats->pause_frames_sent_lo =
3849 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3850}
3851
3852static void bnx2x_emac_stats_update(struct bnx2x *bp)
3853{
3854 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3855 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3856 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3857
3858 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3859 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3860 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3861 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3862 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3863 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3864 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3865 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3866 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3867 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3868 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3869 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3870 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3871 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3872 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3873 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3874 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3875 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3876 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3877 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3878 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3879 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3880 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3881 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3882 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3883 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3884 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3885 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3886 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3887 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3888 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3889
3890 estats->pause_frames_received_hi =
3891 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3892 estats->pause_frames_received_lo =
3893 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3894 ADD_64(estats->pause_frames_received_hi,
3895 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3896 estats->pause_frames_received_lo,
3897 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3898
3899 estats->pause_frames_sent_hi =
3900 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3901 estats->pause_frames_sent_lo =
3902 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3903 ADD_64(estats->pause_frames_sent_hi,
3904 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3905 estats->pause_frames_sent_lo,
3906 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3907}
3908
3909static int bnx2x_hw_stats_update(struct bnx2x *bp)
3910{
3911 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3912 struct nig_stats *old = &(bp->port.old_nig_stats);
3913 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3914 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3915 struct {
3916 u32 lo;
3917 u32 hi;
3918 } diff;
de832a55 3919 u32 nig_timer_max;
bb2a0f7a
YG
3920
3921 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3922 bnx2x_bmac_stats_update(bp);
3923
3924 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3925 bnx2x_emac_stats_update(bp);
3926
3927 else { /* unreached */
c3eefaf6 3928 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3929 return -1;
3930 }
a2fbb9ea 3931
bb2a0f7a
YG
3932 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3933 new->brb_discard - old->brb_discard);
66e855f3
YG
3934 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3935 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3936
bb2a0f7a
YG
3937 UPDATE_STAT64_NIG(egress_mac_pkt0,
3938 etherstatspkts1024octetsto1522octets);
3939 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3940
bb2a0f7a 3941 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3942
bb2a0f7a
YG
3943 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3944 sizeof(struct mac_stx));
3945 estats->brb_drop_hi = pstats->brb_drop_hi;
3946 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3947
bb2a0f7a 3948 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3949
de832a55
EG
3950 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3951 if (nig_timer_max != estats->nig_timer_max) {
3952 estats->nig_timer_max = nig_timer_max;
3953 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3954 }
3955
bb2a0f7a 3956 return 0;
a2fbb9ea
ET
3957}
3958
bb2a0f7a 3959static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3960{
3961 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3962 struct tstorm_per_port_stats *tport =
de832a55 3963 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3964 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3965 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3966 int i;
3967
6fe49bb9
EG
3968 memcpy(&(fstats->total_bytes_received_hi),
3969 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3970 sizeof(struct host_func_stats) - 2*sizeof(u32));
3971 estats->error_bytes_received_hi = 0;
3972 estats->error_bytes_received_lo = 0;
3973 estats->etherstatsoverrsizepkts_hi = 0;
3974 estats->etherstatsoverrsizepkts_lo = 0;
3975 estats->no_buff_discard_hi = 0;
3976 estats->no_buff_discard_lo = 0;
a2fbb9ea 3977
ca00392c 3978 for_each_rx_queue(bp, i) {
de832a55
EG
3979 struct bnx2x_fastpath *fp = &bp->fp[i];
3980 int cl_id = fp->cl_id;
3981 struct tstorm_per_client_stats *tclient =
3982 &stats->tstorm_common.client_statistics[cl_id];
3983 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3984 struct ustorm_per_client_stats *uclient =
3985 &stats->ustorm_common.client_statistics[cl_id];
3986 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3987 struct xstorm_per_client_stats *xclient =
3988 &stats->xstorm_common.client_statistics[cl_id];
3989 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3990 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3991 u32 diff;
3992
3993 /* are storm stats valid? */
3994 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3995 bp->stats_counter) {
de832a55
EG
3996 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3997 " xstorm counter (%d) != stats_counter (%d)\n",
3998 i, xclient->stats_counter, bp->stats_counter);
3999 return -1;
4000 }
4001 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4002 bp->stats_counter) {
de832a55
EG
4003 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4004 " tstorm counter (%d) != stats_counter (%d)\n",
4005 i, tclient->stats_counter, bp->stats_counter);
4006 return -2;
4007 }
4008 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4009 bp->stats_counter) {
4010 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4011 " ustorm counter (%d) != stats_counter (%d)\n",
4012 i, uclient->stats_counter, bp->stats_counter);
4013 return -4;
4014 }
a2fbb9ea 4015
de832a55 4016 qstats->total_bytes_received_hi =
ca00392c 4017 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4018 qstats->total_bytes_received_lo =
ca00392c
EG
4019 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4020
4021 ADD_64(qstats->total_bytes_received_hi,
4022 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4023 qstats->total_bytes_received_lo,
4024 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4025
4026 ADD_64(qstats->total_bytes_received_hi,
4027 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4028 qstats->total_bytes_received_lo,
4029 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4030
4031 qstats->valid_bytes_received_hi =
4032 qstats->total_bytes_received_hi;
de832a55 4033 qstats->valid_bytes_received_lo =
ca00392c 4034 qstats->total_bytes_received_lo;
bb2a0f7a 4035
de832a55 4036 qstats->error_bytes_received_hi =
bb2a0f7a 4037 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4038 qstats->error_bytes_received_lo =
bb2a0f7a 4039 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4040
de832a55
EG
4041 ADD_64(qstats->total_bytes_received_hi,
4042 qstats->error_bytes_received_hi,
4043 qstats->total_bytes_received_lo,
4044 qstats->error_bytes_received_lo);
4045
4046 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4047 total_unicast_packets_received);
4048 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4049 total_multicast_packets_received);
4050 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4051 total_broadcast_packets_received);
4052 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4053 etherstatsoverrsizepkts);
4054 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4055
4056 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4057 total_unicast_packets_received);
4058 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4059 total_multicast_packets_received);
4060 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4061 total_broadcast_packets_received);
4062 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4063 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4064 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4065
4066 qstats->total_bytes_transmitted_hi =
ca00392c 4067 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4068 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4069 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4070
4071 ADD_64(qstats->total_bytes_transmitted_hi,
4072 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4073 qstats->total_bytes_transmitted_lo,
4074 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4075
4076 ADD_64(qstats->total_bytes_transmitted_hi,
4077 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4078 qstats->total_bytes_transmitted_lo,
4079 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4080
de832a55
EG
4081 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4082 total_unicast_packets_transmitted);
4083 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4084 total_multicast_packets_transmitted);
4085 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4086 total_broadcast_packets_transmitted);
4087
4088 old_tclient->checksum_discard = tclient->checksum_discard;
4089 old_tclient->ttl0_discard = tclient->ttl0_discard;
4090
4091 ADD_64(fstats->total_bytes_received_hi,
4092 qstats->total_bytes_received_hi,
4093 fstats->total_bytes_received_lo,
4094 qstats->total_bytes_received_lo);
4095 ADD_64(fstats->total_bytes_transmitted_hi,
4096 qstats->total_bytes_transmitted_hi,
4097 fstats->total_bytes_transmitted_lo,
4098 qstats->total_bytes_transmitted_lo);
4099 ADD_64(fstats->total_unicast_packets_received_hi,
4100 qstats->total_unicast_packets_received_hi,
4101 fstats->total_unicast_packets_received_lo,
4102 qstats->total_unicast_packets_received_lo);
4103 ADD_64(fstats->total_multicast_packets_received_hi,
4104 qstats->total_multicast_packets_received_hi,
4105 fstats->total_multicast_packets_received_lo,
4106 qstats->total_multicast_packets_received_lo);
4107 ADD_64(fstats->total_broadcast_packets_received_hi,
4108 qstats->total_broadcast_packets_received_hi,
4109 fstats->total_broadcast_packets_received_lo,
4110 qstats->total_broadcast_packets_received_lo);
4111 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4112 qstats->total_unicast_packets_transmitted_hi,
4113 fstats->total_unicast_packets_transmitted_lo,
4114 qstats->total_unicast_packets_transmitted_lo);
4115 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4116 qstats->total_multicast_packets_transmitted_hi,
4117 fstats->total_multicast_packets_transmitted_lo,
4118 qstats->total_multicast_packets_transmitted_lo);
4119 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4120 qstats->total_broadcast_packets_transmitted_hi,
4121 fstats->total_broadcast_packets_transmitted_lo,
4122 qstats->total_broadcast_packets_transmitted_lo);
4123 ADD_64(fstats->valid_bytes_received_hi,
4124 qstats->valid_bytes_received_hi,
4125 fstats->valid_bytes_received_lo,
4126 qstats->valid_bytes_received_lo);
4127
4128 ADD_64(estats->error_bytes_received_hi,
4129 qstats->error_bytes_received_hi,
4130 estats->error_bytes_received_lo,
4131 qstats->error_bytes_received_lo);
4132 ADD_64(estats->etherstatsoverrsizepkts_hi,
4133 qstats->etherstatsoverrsizepkts_hi,
4134 estats->etherstatsoverrsizepkts_lo,
4135 qstats->etherstatsoverrsizepkts_lo);
4136 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4137 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4138 }
4139
4140 ADD_64(fstats->total_bytes_received_hi,
4141 estats->rx_stat_ifhcinbadoctets_hi,
4142 fstats->total_bytes_received_lo,
4143 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4144
4145 memcpy(estats, &(fstats->total_bytes_received_hi),
4146 sizeof(struct host_func_stats) - 2*sizeof(u32));
4147
de832a55
EG
4148 ADD_64(estats->etherstatsoverrsizepkts_hi,
4149 estats->rx_stat_dot3statsframestoolong_hi,
4150 estats->etherstatsoverrsizepkts_lo,
4151 estats->rx_stat_dot3statsframestoolong_lo);
4152 ADD_64(estats->error_bytes_received_hi,
4153 estats->rx_stat_ifhcinbadoctets_hi,
4154 estats->error_bytes_received_lo,
4155 estats->rx_stat_ifhcinbadoctets_lo);
4156
4157 if (bp->port.pmf) {
4158 estats->mac_filter_discard =
4159 le32_to_cpu(tport->mac_filter_discard);
4160 estats->xxoverflow_discard =
4161 le32_to_cpu(tport->xxoverflow_discard);
4162 estats->brb_truncate_discard =
bb2a0f7a 4163 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4164 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4165 }
bb2a0f7a
YG
4166
4167 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4168
de832a55
EG
4169 bp->stats_pending = 0;
4170
a2fbb9ea
ET
4171 return 0;
4172}
4173
bb2a0f7a 4174static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4175{
bb2a0f7a 4176 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4177 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4178 int i;
a2fbb9ea
ET
4179
4180 nstats->rx_packets =
4181 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4182 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4183 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4184
4185 nstats->tx_packets =
4186 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4187 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4188 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4189
de832a55 4190 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4191
0e39e645 4192 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4193
de832a55 4194 nstats->rx_dropped = estats->mac_discard;
ca00392c 4195 for_each_rx_queue(bp, i)
de832a55
EG
4196 nstats->rx_dropped +=
4197 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4198
a2fbb9ea
ET
4199 nstats->tx_dropped = 0;
4200
4201 nstats->multicast =
de832a55 4202 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4203
bb2a0f7a 4204 nstats->collisions =
de832a55 4205 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4206
4207 nstats->rx_length_errors =
de832a55
EG
4208 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4209 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4210 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4211 bnx2x_hilo(&estats->brb_truncate_hi);
4212 nstats->rx_crc_errors =
4213 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4214 nstats->rx_frame_errors =
4215 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4216 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4217 nstats->rx_missed_errors = estats->xxoverflow_discard;
4218
4219 nstats->rx_errors = nstats->rx_length_errors +
4220 nstats->rx_over_errors +
4221 nstats->rx_crc_errors +
4222 nstats->rx_frame_errors +
0e39e645
ET
4223 nstats->rx_fifo_errors +
4224 nstats->rx_missed_errors;
a2fbb9ea 4225
bb2a0f7a 4226 nstats->tx_aborted_errors =
de832a55
EG
4227 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4228 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4229 nstats->tx_carrier_errors =
4230 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4231 nstats->tx_fifo_errors = 0;
4232 nstats->tx_heartbeat_errors = 0;
4233 nstats->tx_window_errors = 0;
4234
4235 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4236 nstats->tx_carrier_errors +
4237 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4238}
4239
4240static void bnx2x_drv_stats_update(struct bnx2x *bp)
4241{
4242 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4243 int i;
4244
4245 estats->driver_xoff = 0;
4246 estats->rx_err_discard_pkt = 0;
4247 estats->rx_skb_alloc_failed = 0;
4248 estats->hw_csum_err = 0;
ca00392c 4249 for_each_rx_queue(bp, i) {
de832a55
EG
4250 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4251
4252 estats->driver_xoff += qstats->driver_xoff;
4253 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4254 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4255 estats->hw_csum_err += qstats->hw_csum_err;
4256 }
a2fbb9ea
ET
4257}
4258
bb2a0f7a 4259static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4260{
bb2a0f7a 4261 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4262
bb2a0f7a
YG
4263 if (*stats_comp != DMAE_COMP_VAL)
4264 return;
4265
4266 if (bp->port.pmf)
de832a55 4267 bnx2x_hw_stats_update(bp);
a2fbb9ea 4268
de832a55
EG
4269 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4270 BNX2X_ERR("storm stats were not updated for 3 times\n");
4271 bnx2x_panic();
4272 return;
a2fbb9ea
ET
4273 }
4274
de832a55
EG
4275 bnx2x_net_stats_update(bp);
4276 bnx2x_drv_stats_update(bp);
4277
a2fbb9ea 4278 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4279 struct bnx2x_fastpath *fp0_rx = bp->fp;
4280 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4281 struct tstorm_per_client_stats *old_tclient =
4282 &bp->fp->old_tclient;
4283 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4284 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4285 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4286 int i;
a2fbb9ea
ET
4287
4288 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4289 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4290 " tx pkt (%lx)\n",
ca00392c
EG
4291 bnx2x_tx_avail(fp0_tx),
4292 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4293 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4294 " rx pkt (%lx)\n",
ca00392c
EG
4295 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4296 fp0_rx->rx_comp_cons),
4297 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4298 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4299 "brb truncate %u\n",
4300 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4301 qstats->driver_xoff,
4302 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4303 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4304 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4305 "mac_discard %u mac_filter_discard %u "
4306 "xxovrflow_discard %u brb_truncate_discard %u "
4307 "ttl0_discard %u\n",
4781bfad 4308 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4309 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4310 bnx2x_hilo(&qstats->no_buff_discard_hi),
4311 estats->mac_discard, estats->mac_filter_discard,
4312 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4313 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4314
4315 for_each_queue(bp, i) {
4316 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4317 bnx2x_fp(bp, i, tx_pkt),
4318 bnx2x_fp(bp, i, rx_pkt),
4319 bnx2x_fp(bp, i, rx_calls));
4320 }
4321 }
4322
bb2a0f7a
YG
4323 bnx2x_hw_stats_post(bp);
4324 bnx2x_storm_stats_post(bp);
4325}
a2fbb9ea 4326
bb2a0f7a
YG
4327static void bnx2x_port_stats_stop(struct bnx2x *bp)
4328{
4329 struct dmae_command *dmae;
4330 u32 opcode;
4331 int loader_idx = PMF_DMAE_C(bp);
4332 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4333
bb2a0f7a 4334 bp->executer_idx = 0;
a2fbb9ea 4335
bb2a0f7a
YG
4336 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4337 DMAE_CMD_C_ENABLE |
4338 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4339#ifdef __BIG_ENDIAN
bb2a0f7a 4340 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4341#else
bb2a0f7a 4342 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4343#endif
bb2a0f7a
YG
4344 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4345 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4346
4347 if (bp->port.port_stx) {
4348
4349 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4350 if (bp->func_stx)
4351 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4352 else
4353 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4354 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4355 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4356 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4357 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4358 dmae->len = sizeof(struct host_port_stats) >> 2;
4359 if (bp->func_stx) {
4360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4361 dmae->comp_addr_hi = 0;
4362 dmae->comp_val = 1;
4363 } else {
4364 dmae->comp_addr_lo =
4365 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4366 dmae->comp_addr_hi =
4367 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4368 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4369
bb2a0f7a
YG
4370 *stats_comp = 0;
4371 }
a2fbb9ea
ET
4372 }
4373
bb2a0f7a
YG
4374 if (bp->func_stx) {
4375
4376 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4377 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4378 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4379 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4380 dmae->dst_addr_lo = bp->func_stx >> 2;
4381 dmae->dst_addr_hi = 0;
4382 dmae->len = sizeof(struct host_func_stats) >> 2;
4383 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4384 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4385 dmae->comp_val = DMAE_COMP_VAL;
4386
4387 *stats_comp = 0;
a2fbb9ea 4388 }
bb2a0f7a
YG
4389}
4390
4391static void bnx2x_stats_stop(struct bnx2x *bp)
4392{
4393 int update = 0;
4394
4395 bnx2x_stats_comp(bp);
4396
4397 if (bp->port.pmf)
4398 update = (bnx2x_hw_stats_update(bp) == 0);
4399
4400 update |= (bnx2x_storm_stats_update(bp) == 0);
4401
4402 if (update) {
4403 bnx2x_net_stats_update(bp);
a2fbb9ea 4404
bb2a0f7a
YG
4405 if (bp->port.pmf)
4406 bnx2x_port_stats_stop(bp);
4407
4408 bnx2x_hw_stats_post(bp);
4409 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4410 }
4411}
4412
bb2a0f7a
YG
4413static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4414{
4415}
4416
4417static const struct {
4418 void (*action)(struct bnx2x *bp);
4419 enum bnx2x_stats_state next_state;
4420} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4421/* state event */
4422{
4423/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4424/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4425/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4426/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4427},
4428{
4429/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4430/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4431/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4432/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4433}
4434};
4435
4436static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4437{
4438 enum bnx2x_stats_state state = bp->stats_state;
4439
4440 bnx2x_stats_stm[state][event].action(bp);
4441 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4442
8924665a
EG
4443 /* Make sure the state has been "changed" */
4444 smp_wmb();
4445
bb2a0f7a
YG
4446 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4447 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4448 state, event, bp->stats_state);
4449}
4450
6fe49bb9
EG
4451static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4452{
4453 struct dmae_command *dmae;
4454 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4455
4456 /* sanity */
4457 if (!bp->port.pmf || !bp->port.port_stx) {
4458 BNX2X_ERR("BUG!\n");
4459 return;
4460 }
4461
4462 bp->executer_idx = 0;
4463
4464 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4465 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4466 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4467 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4468#ifdef __BIG_ENDIAN
4469 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4470#else
4471 DMAE_CMD_ENDIANITY_DW_SWAP |
4472#endif
4473 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4474 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4475 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4476 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4477 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4478 dmae->dst_addr_hi = 0;
4479 dmae->len = sizeof(struct host_port_stats) >> 2;
4480 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4481 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4482 dmae->comp_val = DMAE_COMP_VAL;
4483
4484 *stats_comp = 0;
4485 bnx2x_hw_stats_post(bp);
4486 bnx2x_stats_comp(bp);
4487}
4488
4489static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4490{
4491 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4492 int port = BP_PORT(bp);
4493 int func;
4494 u32 func_stx;
4495
4496 /* sanity */
4497 if (!bp->port.pmf || !bp->func_stx) {
4498 BNX2X_ERR("BUG!\n");
4499 return;
4500 }
4501
4502 /* save our func_stx */
4503 func_stx = bp->func_stx;
4504
4505 for (vn = VN_0; vn < vn_max; vn++) {
4506 func = 2*vn + port;
4507
4508 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4509 bnx2x_func_stats_init(bp);
4510 bnx2x_hw_stats_post(bp);
4511 bnx2x_stats_comp(bp);
4512 }
4513
4514 /* restore our func_stx */
4515 bp->func_stx = func_stx;
4516}
4517
4518static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4519{
4520 struct dmae_command *dmae = &bp->stats_dmae;
4521 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4522
4523 /* sanity */
4524 if (!bp->func_stx) {
4525 BNX2X_ERR("BUG!\n");
4526 return;
4527 }
4528
4529 bp->executer_idx = 0;
4530 memset(dmae, 0, sizeof(struct dmae_command));
4531
4532 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4533 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4534 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4535#ifdef __BIG_ENDIAN
4536 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4537#else
4538 DMAE_CMD_ENDIANITY_DW_SWAP |
4539#endif
4540 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4541 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4542 dmae->src_addr_lo = bp->func_stx >> 2;
4543 dmae->src_addr_hi = 0;
4544 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4545 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4546 dmae->len = sizeof(struct host_func_stats) >> 2;
4547 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4548 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4549 dmae->comp_val = DMAE_COMP_VAL;
4550
4551 *stats_comp = 0;
4552 bnx2x_hw_stats_post(bp);
4553 bnx2x_stats_comp(bp);
4554}
4555
4556static void bnx2x_stats_init(struct bnx2x *bp)
4557{
4558 int port = BP_PORT(bp);
4559 int func = BP_FUNC(bp);
4560 int i;
4561
4562 bp->stats_pending = 0;
4563 bp->executer_idx = 0;
4564 bp->stats_counter = 0;
4565
4566 /* port and func stats for management */
4567 if (!BP_NOMCP(bp)) {
4568 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4569 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4570
4571 } else {
4572 bp->port.port_stx = 0;
4573 bp->func_stx = 0;
4574 }
4575 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4576 bp->port.port_stx, bp->func_stx);
4577
4578 /* port stats */
4579 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4580 bp->port.old_nig_stats.brb_discard =
4581 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4582 bp->port.old_nig_stats.brb_truncate =
4583 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4584 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4585 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4586 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4587 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4588
4589 /* function stats */
4590 for_each_queue(bp, i) {
4591 struct bnx2x_fastpath *fp = &bp->fp[i];
4592
4593 memset(&fp->old_tclient, 0,
4594 sizeof(struct tstorm_per_client_stats));
4595 memset(&fp->old_uclient, 0,
4596 sizeof(struct ustorm_per_client_stats));
4597 memset(&fp->old_xclient, 0,
4598 sizeof(struct xstorm_per_client_stats));
4599 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4600 }
4601
4602 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4603 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4604
4605 bp->stats_state = STATS_STATE_DISABLED;
4606
4607 if (bp->port.pmf) {
4608 if (bp->port.port_stx)
4609 bnx2x_port_stats_base_init(bp);
4610
4611 if (bp->func_stx)
4612 bnx2x_func_stats_base_init(bp);
4613
4614 } else if (bp->func_stx)
4615 bnx2x_func_stats_base_update(bp);
4616}
4617
a2fbb9ea
ET
4618static void bnx2x_timer(unsigned long data)
4619{
4620 struct bnx2x *bp = (struct bnx2x *) data;
4621
4622 if (!netif_running(bp->dev))
4623 return;
4624
4625 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4626 goto timer_restart;
a2fbb9ea
ET
4627
4628 if (poll) {
4629 struct bnx2x_fastpath *fp = &bp->fp[0];
4630 int rc;
4631
7961f791 4632 bnx2x_tx_int(fp);
a2fbb9ea
ET
4633 rc = bnx2x_rx_int(fp, 1000);
4634 }
4635
34f80b04
EG
4636 if (!BP_NOMCP(bp)) {
4637 int func = BP_FUNC(bp);
a2fbb9ea
ET
4638 u32 drv_pulse;
4639 u32 mcp_pulse;
4640
4641 ++bp->fw_drv_pulse_wr_seq;
4642 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4643 /* TBD - add SYSTEM_TIME */
4644 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4645 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4646
34f80b04 4647 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4648 MCP_PULSE_SEQ_MASK);
4649 /* The delta between driver pulse and mcp response
4650 * should be 1 (before mcp response) or 0 (after mcp response)
4651 */
4652 if ((drv_pulse != mcp_pulse) &&
4653 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4654 /* someone lost a heartbeat... */
4655 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4656 drv_pulse, mcp_pulse);
4657 }
4658 }
4659
bb2a0f7a
YG
4660 if ((bp->state == BNX2X_STATE_OPEN) ||
4661 (bp->state == BNX2X_STATE_DISABLED))
4662 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4663
f1410647 4664timer_restart:
a2fbb9ea
ET
4665 mod_timer(&bp->timer, jiffies + bp->current_interval);
4666}
4667
4668/* end of Statistics */
4669
4670/* nic init */
4671
4672/*
4673 * nic init service functions
4674 */
4675
34f80b04 4676static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4677{
34f80b04
EG
4678 int port = BP_PORT(bp);
4679
ca00392c
EG
4680 /* "CSTORM" */
4681 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4682 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4683 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4684 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4685 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4686 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4687}
4688
5c862848
EG
4689static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4690 dma_addr_t mapping, int sb_id)
34f80b04
EG
4691{
4692 int port = BP_PORT(bp);
bb2a0f7a 4693 int func = BP_FUNC(bp);
a2fbb9ea 4694 int index;
34f80b04 4695 u64 section;
a2fbb9ea
ET
4696
4697 /* USTORM */
4698 section = ((u64)mapping) + offsetof(struct host_status_block,
4699 u_status_block);
34f80b04 4700 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4701
ca00392c
EG
4702 REG_WR(bp, BAR_CSTRORM_INTMEM +
4703 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4704 REG_WR(bp, BAR_CSTRORM_INTMEM +
4705 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4706 U64_HI(section));
ca00392c
EG
4707 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4708 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4709
4710 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4711 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4712 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4713
4714 /* CSTORM */
4715 section = ((u64)mapping) + offsetof(struct host_status_block,
4716 c_status_block);
34f80b04 4717 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4718
4719 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4720 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4721 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4722 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4723 U64_HI(section));
7a9b2557 4724 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4725 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4726
4727 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4728 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4729 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4730
4731 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4732}
4733
4734static void bnx2x_zero_def_sb(struct bnx2x *bp)
4735{
4736 int func = BP_FUNC(bp);
a2fbb9ea 4737
ca00392c 4738 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4739 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4740 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4741 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4742 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4743 sizeof(struct cstorm_def_status_block_u)/4);
4744 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4745 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4746 sizeof(struct cstorm_def_status_block_c)/4);
4747 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4748 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4749 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4750}
4751
4752static void bnx2x_init_def_sb(struct bnx2x *bp,
4753 struct host_def_status_block *def_sb,
34f80b04 4754 dma_addr_t mapping, int sb_id)
a2fbb9ea 4755{
34f80b04
EG
4756 int port = BP_PORT(bp);
4757 int func = BP_FUNC(bp);
a2fbb9ea
ET
4758 int index, val, reg_offset;
4759 u64 section;
4760
4761 /* ATTN */
4762 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4763 atten_status_block);
34f80b04 4764 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4765
49d66772
ET
4766 bp->attn_state = 0;
4767
a2fbb9ea
ET
4768 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4769 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4770
34f80b04 4771 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4772 bp->attn_group[index].sig[0] = REG_RD(bp,
4773 reg_offset + 0x10*index);
4774 bp->attn_group[index].sig[1] = REG_RD(bp,
4775 reg_offset + 0x4 + 0x10*index);
4776 bp->attn_group[index].sig[2] = REG_RD(bp,
4777 reg_offset + 0x8 + 0x10*index);
4778 bp->attn_group[index].sig[3] = REG_RD(bp,
4779 reg_offset + 0xc + 0x10*index);
4780 }
4781
a2fbb9ea
ET
4782 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4783 HC_REG_ATTN_MSG0_ADDR_L);
4784
4785 REG_WR(bp, reg_offset, U64_LO(section));
4786 REG_WR(bp, reg_offset + 4, U64_HI(section));
4787
4788 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4789
4790 val = REG_RD(bp, reg_offset);
34f80b04 4791 val |= sb_id;
a2fbb9ea
ET
4792 REG_WR(bp, reg_offset, val);
4793
4794 /* USTORM */
4795 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4796 u_def_status_block);
34f80b04 4797 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4798
ca00392c
EG
4799 REG_WR(bp, BAR_CSTRORM_INTMEM +
4800 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4801 REG_WR(bp, BAR_CSTRORM_INTMEM +
4802 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4803 U64_HI(section));
ca00392c
EG
4804 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4805 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4806
4807 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4808 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4809 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4810
4811 /* CSTORM */
4812 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4813 c_def_status_block);
34f80b04 4814 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4815
4816 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4817 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4818 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4819 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4820 U64_HI(section));
5c862848 4821 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4822 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4823
4824 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4825 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4826 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4827
4828 /* TSTORM */
4829 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4830 t_def_status_block);
34f80b04 4831 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4832
4833 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4834 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4835 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4836 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4837 U64_HI(section));
5c862848 4838 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4839 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4840
4841 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4842 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4843 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4844
4845 /* XSTORM */
4846 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4847 x_def_status_block);
34f80b04 4848 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4849
4850 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4851 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4852 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4853 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4854 U64_HI(section));
5c862848 4855 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4856 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4857
4858 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4859 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4860 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4861
bb2a0f7a 4862 bp->stats_pending = 0;
66e855f3 4863 bp->set_mac_pending = 0;
bb2a0f7a 4864
34f80b04 4865 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4866}
4867
4868static void bnx2x_update_coalesce(struct bnx2x *bp)
4869{
34f80b04 4870 int port = BP_PORT(bp);
a2fbb9ea
ET
4871 int i;
4872
4873 for_each_queue(bp, i) {
34f80b04 4874 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4875
4876 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4877 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4878 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4879 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4880 bp->rx_ticks/12);
ca00392c
EG
4881 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4882 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4883 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4884 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4885
4886 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4887 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4888 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4889 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4890 bp->tx_ticks/12);
a2fbb9ea 4891 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4892 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4893 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4894 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4895 }
4896}
4897
7a9b2557
VZ
4898static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4899 struct bnx2x_fastpath *fp, int last)
4900{
4901 int i;
4902
4903 for (i = 0; i < last; i++) {
4904 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4905 struct sk_buff *skb = rx_buf->skb;
4906
4907 if (skb == NULL) {
4908 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4909 continue;
4910 }
4911
4912 if (fp->tpa_state[i] == BNX2X_TPA_START)
4913 pci_unmap_single(bp->pdev,
4914 pci_unmap_addr(rx_buf, mapping),
356e2385 4915 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4916
4917 dev_kfree_skb(skb);
4918 rx_buf->skb = NULL;
4919 }
4920}
4921
a2fbb9ea
ET
4922static void bnx2x_init_rx_rings(struct bnx2x *bp)
4923{
7a9b2557 4924 int func = BP_FUNC(bp);
32626230
EG
4925 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4926 ETH_MAX_AGGREGATION_QUEUES_E1H;
4927 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4928 int i, j;
a2fbb9ea 4929
87942b46 4930 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4931 DP(NETIF_MSG_IFUP,
4932 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4933
7a9b2557 4934 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4935
555f6c78 4936 for_each_rx_queue(bp, j) {
32626230 4937 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4938
32626230 4939 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4940 fp->tpa_pool[i].skb =
4941 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4942 if (!fp->tpa_pool[i].skb) {
4943 BNX2X_ERR("Failed to allocate TPA "
4944 "skb pool for queue[%d] - "
4945 "disabling TPA on this "
4946 "queue!\n", j);
4947 bnx2x_free_tpa_pool(bp, fp, i);
4948 fp->disable_tpa = 1;
4949 break;
4950 }
4951 pci_unmap_addr_set((struct sw_rx_bd *)
4952 &bp->fp->tpa_pool[i],
4953 mapping, 0);
4954 fp->tpa_state[i] = BNX2X_TPA_STOP;
4955 }
4956 }
4957 }
4958
555f6c78 4959 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4960 struct bnx2x_fastpath *fp = &bp->fp[j];
4961
4962 fp->rx_bd_cons = 0;
4963 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4964 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4965
ca00392c
EG
4966 /* Mark queue as Rx */
4967 fp->is_rx_queue = 1;
4968
7a9b2557
VZ
4969 /* "next page" elements initialization */
4970 /* SGE ring */
4971 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4972 struct eth_rx_sge *sge;
4973
4974 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4975 sge->addr_hi =
4976 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4977 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4978 sge->addr_lo =
4979 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4980 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4981 }
4982
4983 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4984
7a9b2557 4985 /* RX BD ring */
a2fbb9ea
ET
4986 for (i = 1; i <= NUM_RX_RINGS; i++) {
4987 struct eth_rx_bd *rx_bd;
4988
4989 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4990 rx_bd->addr_hi =
4991 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4992 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4993 rx_bd->addr_lo =
4994 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4995 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4996 }
4997
34f80b04 4998 /* CQ ring */
a2fbb9ea
ET
4999 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5000 struct eth_rx_cqe_next_page *nextpg;
5001
5002 nextpg = (struct eth_rx_cqe_next_page *)
5003 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5004 nextpg->addr_hi =
5005 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5006 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5007 nextpg->addr_lo =
5008 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5009 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5010 }
5011
7a9b2557
VZ
5012 /* Allocate SGEs and initialize the ring elements */
5013 for (i = 0, ring_prod = 0;
5014 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5015
7a9b2557
VZ
5016 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5017 BNX2X_ERR("was only able to allocate "
5018 "%d rx sges\n", i);
5019 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5020 /* Cleanup already allocated elements */
5021 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5022 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5023 fp->disable_tpa = 1;
5024 ring_prod = 0;
5025 break;
5026 }
5027 ring_prod = NEXT_SGE_IDX(ring_prod);
5028 }
5029 fp->rx_sge_prod = ring_prod;
5030
5031 /* Allocate BDs and initialize BD ring */
66e855f3 5032 fp->rx_comp_cons = 0;
7a9b2557 5033 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5034 for (i = 0; i < bp->rx_ring_size; i++) {
5035 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5036 BNX2X_ERR("was only able to allocate "
de832a55
EG
5037 "%d rx skbs on queue[%d]\n", i, j);
5038 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5039 break;
5040 }
5041 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5042 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5043 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5044 }
5045
7a9b2557
VZ
5046 fp->rx_bd_prod = ring_prod;
5047 /* must not have more available CQEs than BDs */
5048 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5049 cqe_ring_prod);
a2fbb9ea
ET
5050 fp->rx_pkt = fp->rx_calls = 0;
5051
7a9b2557
VZ
5052 /* Warning!
5053 * this will generate an interrupt (to the TSTORM)
5054 * must only be done after chip is initialized
5055 */
5056 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5057 fp->rx_sge_prod);
a2fbb9ea
ET
5058 if (j != 0)
5059 continue;
5060
5061 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5062 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5063 U64_LO(fp->rx_comp_mapping));
5064 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5065 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5066 U64_HI(fp->rx_comp_mapping));
5067 }
5068}
5069
5070static void bnx2x_init_tx_ring(struct bnx2x *bp)
5071{
5072 int i, j;
5073
555f6c78 5074 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5075 struct bnx2x_fastpath *fp = &bp->fp[j];
5076
5077 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5078 struct eth_tx_next_bd *tx_next_bd =
5079 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5080
ca00392c 5081 tx_next_bd->addr_hi =
a2fbb9ea 5082 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5083 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5084 tx_next_bd->addr_lo =
a2fbb9ea 5085 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5086 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5087 }
5088
ca00392c
EG
5089 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5090 fp->tx_db.data.zero_fill1 = 0;
5091 fp->tx_db.data.prod = 0;
5092
a2fbb9ea
ET
5093 fp->tx_pkt_prod = 0;
5094 fp->tx_pkt_cons = 0;
5095 fp->tx_bd_prod = 0;
5096 fp->tx_bd_cons = 0;
5097 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5098 fp->tx_pkt = 0;
5099 }
6fe49bb9
EG
5100
5101 /* clean tx statistics */
5102 for_each_rx_queue(bp, i)
5103 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5104}
5105
5106static void bnx2x_init_sp_ring(struct bnx2x *bp)
5107{
34f80b04 5108 int func = BP_FUNC(bp);
a2fbb9ea
ET
5109
5110 spin_lock_init(&bp->spq_lock);
5111
5112 bp->spq_left = MAX_SPQ_PENDING;
5113 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5114 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5115 bp->spq_prod_bd = bp->spq;
5116 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5117
34f80b04 5118 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5119 U64_LO(bp->spq_mapping));
34f80b04
EG
5120 REG_WR(bp,
5121 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5122 U64_HI(bp->spq_mapping));
5123
34f80b04 5124 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5125 bp->spq_prod_idx);
5126}
5127
5128static void bnx2x_init_context(struct bnx2x *bp)
5129{
5130 int i;
5131
ca00392c 5132 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5133 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5134 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5135 u8 cl_id = fp->cl_id;
a2fbb9ea 5136
34f80b04
EG
5137 context->ustorm_st_context.common.sb_index_numbers =
5138 BNX2X_RX_SB_INDEX_NUM;
0626b899 5139 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5140 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5141 context->ustorm_st_context.common.flags =
de832a55
EG
5142 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5143 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5144 context->ustorm_st_context.common.statistics_counter_id =
5145 cl_id;
8d9c5f34 5146 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5147 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5148 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5149 bp->rx_buf_size;
34f80b04 5150 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5151 U64_HI(fp->rx_desc_mapping);
34f80b04 5152 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5153 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5154 if (!fp->disable_tpa) {
5155 context->ustorm_st_context.common.flags |=
ca00392c 5156 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5157 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5158 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5159 (u32)0xffff);
7a9b2557
VZ
5160 context->ustorm_st_context.common.sge_page_base_hi =
5161 U64_HI(fp->rx_sge_mapping);
5162 context->ustorm_st_context.common.sge_page_base_lo =
5163 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5164
5165 context->ustorm_st_context.common.max_sges_for_packet =
5166 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5167 context->ustorm_st_context.common.max_sges_for_packet =
5168 ((context->ustorm_st_context.common.
5169 max_sges_for_packet + PAGES_PER_SGE - 1) &
5170 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5171 }
5172
8d9c5f34
EG
5173 context->ustorm_ag_context.cdu_usage =
5174 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5175 CDU_REGION_NUMBER_UCM_AG,
5176 ETH_CONNECTION_TYPE);
5177
ca00392c
EG
5178 context->xstorm_ag_context.cdu_reserved =
5179 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5180 CDU_REGION_NUMBER_XCM_AG,
5181 ETH_CONNECTION_TYPE);
5182 }
5183
5184 for_each_tx_queue(bp, i) {
5185 struct bnx2x_fastpath *fp = &bp->fp[i];
5186 struct eth_context *context =
5187 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5188
5189 context->cstorm_st_context.sb_index_number =
5190 C_SB_ETH_TX_CQ_INDEX;
5191 context->cstorm_st_context.status_block_id = fp->sb_id;
5192
8d9c5f34
EG
5193 context->xstorm_st_context.tx_bd_page_base_hi =
5194 U64_HI(fp->tx_desc_mapping);
5195 context->xstorm_st_context.tx_bd_page_base_lo =
5196 U64_LO(fp->tx_desc_mapping);
ca00392c 5197 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5198 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5199 }
5200}
5201
5202static void bnx2x_init_ind_table(struct bnx2x *bp)
5203{
26c8fa4d 5204 int func = BP_FUNC(bp);
a2fbb9ea
ET
5205 int i;
5206
555f6c78 5207 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5208 return;
5209
555f6c78
EG
5210 DP(NETIF_MSG_IFUP,
5211 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5212 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5213 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5214 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5215 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5216}
5217
49d66772
ET
5218static void bnx2x_set_client_config(struct bnx2x *bp)
5219{
49d66772 5220 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5221 int port = BP_PORT(bp);
5222 int i;
49d66772 5223
e7799c5f 5224 tstorm_client.mtu = bp->dev->mtu;
49d66772 5225 tstorm_client.config_flags =
de832a55
EG
5226 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5227 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5228#ifdef BCM_VLAN
0c6671b0 5229 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5230 tstorm_client.config_flags |=
8d9c5f34 5231 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5232 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5233 }
5234#endif
49d66772
ET
5235
5236 for_each_queue(bp, i) {
de832a55
EG
5237 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5238
49d66772 5239 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5240 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5241 ((u32 *)&tstorm_client)[0]);
5242 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5243 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5244 ((u32 *)&tstorm_client)[1]);
5245 }
5246
34f80b04
EG
5247 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5248 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5249}
5250
a2fbb9ea
ET
5251static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5252{
a2fbb9ea 5253 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
5254 int mode = bp->rx_mode;
5255 int mask = (1 << BP_L_ID(bp));
5256 int func = BP_FUNC(bp);
581ce43d 5257 int port = BP_PORT(bp);
a2fbb9ea 5258 int i;
581ce43d
EG
5259 /* All but management unicast packets should pass to the host as well */
5260 u32 llh_mask =
5261 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5262 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5263 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5264 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5265
3196a88a 5266 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5267
5268 switch (mode) {
5269 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5270 tstorm_mac_filter.ucast_drop_all = mask;
5271 tstorm_mac_filter.mcast_drop_all = mask;
5272 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5273 break;
356e2385 5274
a2fbb9ea 5275 case BNX2X_RX_MODE_NORMAL:
34f80b04 5276 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5277 break;
356e2385 5278
a2fbb9ea 5279 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5280 tstorm_mac_filter.mcast_accept_all = mask;
5281 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5282 break;
356e2385 5283
a2fbb9ea 5284 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5285 tstorm_mac_filter.ucast_accept_all = mask;
5286 tstorm_mac_filter.mcast_accept_all = mask;
5287 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5288 /* pass management unicast packets as well */
5289 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5290 break;
356e2385 5291
a2fbb9ea 5292 default:
34f80b04
EG
5293 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5294 break;
a2fbb9ea
ET
5295 }
5296
581ce43d
EG
5297 REG_WR(bp,
5298 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5299 llh_mask);
5300
a2fbb9ea
ET
5301 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5302 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5303 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5304 ((u32 *)&tstorm_mac_filter)[i]);
5305
34f80b04 5306/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5307 ((u32 *)&tstorm_mac_filter)[i]); */
5308 }
a2fbb9ea 5309
49d66772
ET
5310 if (mode != BNX2X_RX_MODE_NONE)
5311 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5312}
5313
471de716
EG
5314static void bnx2x_init_internal_common(struct bnx2x *bp)
5315{
5316 int i;
5317
5318 /* Zero this manually as its initialization is
5319 currently missing in the initTool */
5320 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5321 REG_WR(bp, BAR_USTRORM_INTMEM +
5322 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5323}
5324
5325static void bnx2x_init_internal_port(struct bnx2x *bp)
5326{
5327 int port = BP_PORT(bp);
5328
ca00392c
EG
5329 REG_WR(bp,
5330 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5331 REG_WR(bp,
5332 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5333 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5334 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5335}
5336
5337static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5338{
a2fbb9ea
ET
5339 struct tstorm_eth_function_common_config tstorm_config = {0};
5340 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5341 int port = BP_PORT(bp);
5342 int func = BP_FUNC(bp);
de832a55
EG
5343 int i, j;
5344 u32 offset;
471de716 5345 u16 max_agg_size;
a2fbb9ea
ET
5346
5347 if (is_multi(bp)) {
555f6c78 5348 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5349 tstorm_config.rss_result_mask = MULTI_MASK;
5350 }
ca00392c
EG
5351
5352 /* Enable TPA if needed */
5353 if (bp->flags & TPA_ENABLE_FLAG)
5354 tstorm_config.config_flags |=
5355 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5356
8d9c5f34
EG
5357 if (IS_E1HMF(bp))
5358 tstorm_config.config_flags |=
5359 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5360
34f80b04
EG
5361 tstorm_config.leading_client_id = BP_L_ID(bp);
5362
a2fbb9ea 5363 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5364 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5365 (*(u32 *)&tstorm_config));
5366
c14423fe 5367 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5368 bnx2x_set_storm_rx_mode(bp);
5369
de832a55
EG
5370 for_each_queue(bp, i) {
5371 u8 cl_id = bp->fp[i].cl_id;
5372
5373 /* reset xstorm per client statistics */
5374 offset = BAR_XSTRORM_INTMEM +
5375 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5376 for (j = 0;
5377 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5378 REG_WR(bp, offset + j*4, 0);
5379
5380 /* reset tstorm per client statistics */
5381 offset = BAR_TSTRORM_INTMEM +
5382 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5383 for (j = 0;
5384 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5385 REG_WR(bp, offset + j*4, 0);
5386
5387 /* reset ustorm per client statistics */
5388 offset = BAR_USTRORM_INTMEM +
5389 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5390 for (j = 0;
5391 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5392 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5393 }
5394
5395 /* Init statistics related context */
34f80b04 5396 stats_flags.collect_eth = 1;
a2fbb9ea 5397
66e855f3 5398 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5399 ((u32 *)&stats_flags)[0]);
66e855f3 5400 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5401 ((u32 *)&stats_flags)[1]);
5402
66e855f3 5403 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5404 ((u32 *)&stats_flags)[0]);
66e855f3 5405 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5406 ((u32 *)&stats_flags)[1]);
5407
de832a55
EG
5408 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5409 ((u32 *)&stats_flags)[0]);
5410 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5411 ((u32 *)&stats_flags)[1]);
5412
66e855f3 5413 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5414 ((u32 *)&stats_flags)[0]);
66e855f3 5415 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5416 ((u32 *)&stats_flags)[1]);
5417
66e855f3
YG
5418 REG_WR(bp, BAR_XSTRORM_INTMEM +
5419 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5420 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5421 REG_WR(bp, BAR_XSTRORM_INTMEM +
5422 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5423 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5424
5425 REG_WR(bp, BAR_TSTRORM_INTMEM +
5426 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5427 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5428 REG_WR(bp, BAR_TSTRORM_INTMEM +
5429 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5430 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5431
de832a55
EG
5432 REG_WR(bp, BAR_USTRORM_INTMEM +
5433 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5434 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5435 REG_WR(bp, BAR_USTRORM_INTMEM +
5436 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5437 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5438
34f80b04
EG
5439 if (CHIP_IS_E1H(bp)) {
5440 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5441 IS_E1HMF(bp));
5442 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5443 IS_E1HMF(bp));
5444 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5445 IS_E1HMF(bp));
5446 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5447 IS_E1HMF(bp));
5448
7a9b2557
VZ
5449 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5450 bp->e1hov);
34f80b04
EG
5451 }
5452
4f40f2cb
EG
5453 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5454 max_agg_size =
5455 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5456 SGE_PAGE_SIZE * PAGES_PER_SGE),
5457 (u32)0xffff);
555f6c78 5458 for_each_rx_queue(bp, i) {
7a9b2557 5459 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5460
5461 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5462 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5463 U64_LO(fp->rx_comp_mapping));
5464 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5465 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5466 U64_HI(fp->rx_comp_mapping));
5467
ca00392c
EG
5468 /* Next page */
5469 REG_WR(bp, BAR_USTRORM_INTMEM +
5470 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5471 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5472 REG_WR(bp, BAR_USTRORM_INTMEM +
5473 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5474 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5475
7a9b2557 5476 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5477 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5478 max_agg_size);
5479 }
8a1c38d1 5480
1c06328c
EG
5481 /* dropless flow control */
5482 if (CHIP_IS_E1H(bp)) {
5483 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5484
5485 rx_pause.bd_thr_low = 250;
5486 rx_pause.cqe_thr_low = 250;
5487 rx_pause.cos = 1;
5488 rx_pause.sge_thr_low = 0;
5489 rx_pause.bd_thr_high = 350;
5490 rx_pause.cqe_thr_high = 350;
5491 rx_pause.sge_thr_high = 0;
5492
5493 for_each_rx_queue(bp, i) {
5494 struct bnx2x_fastpath *fp = &bp->fp[i];
5495
5496 if (!fp->disable_tpa) {
5497 rx_pause.sge_thr_low = 150;
5498 rx_pause.sge_thr_high = 250;
5499 }
5500
5501
5502 offset = BAR_USTRORM_INTMEM +
5503 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5504 fp->cl_id);
5505 for (j = 0;
5506 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5507 j++)
5508 REG_WR(bp, offset + j*4,
5509 ((u32 *)&rx_pause)[j]);
5510 }
5511 }
5512
8a1c38d1
EG
5513 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5514
5515 /* Init rate shaping and fairness contexts */
5516 if (IS_E1HMF(bp)) {
5517 int vn;
5518
5519 /* During init there is no active link
5520 Until link is up, set link rate to 10Gbps */
5521 bp->link_vars.line_speed = SPEED_10000;
5522 bnx2x_init_port_minmax(bp);
5523
5524 bnx2x_calc_vn_weight_sum(bp);
5525
5526 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5527 bnx2x_init_vn_minmax(bp, 2*vn + port);
5528
5529 /* Enable rate shaping and fairness */
5530 bp->cmng.flags.cmng_enables =
5531 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5532 if (bp->vn_weight_sum)
5533 bp->cmng.flags.cmng_enables |=
5534 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5535 else
5536 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5537 " fairness will be disabled\n");
5538 } else {
5539 /* rate shaping and fairness are disabled */
5540 DP(NETIF_MSG_IFUP,
5541 "single function mode minmax will be disabled\n");
5542 }
5543
5544
5545 /* Store it to internal memory */
5546 if (bp->port.pmf)
5547 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5548 REG_WR(bp, BAR_XSTRORM_INTMEM +
5549 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5550 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5551}
5552
471de716
EG
5553static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5554{
5555 switch (load_code) {
5556 case FW_MSG_CODE_DRV_LOAD_COMMON:
5557 bnx2x_init_internal_common(bp);
5558 /* no break */
5559
5560 case FW_MSG_CODE_DRV_LOAD_PORT:
5561 bnx2x_init_internal_port(bp);
5562 /* no break */
5563
5564 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5565 bnx2x_init_internal_func(bp);
5566 break;
5567
5568 default:
5569 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5570 break;
5571 }
5572}
5573
5574static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5575{
5576 int i;
5577
5578 for_each_queue(bp, i) {
5579 struct bnx2x_fastpath *fp = &bp->fp[i];
5580
34f80b04 5581 fp->bp = bp;
a2fbb9ea 5582 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5583 fp->index = i;
34f80b04
EG
5584 fp->cl_id = BP_L_ID(bp) + i;
5585 fp->sb_id = fp->cl_id;
ca00392c
EG
5586 /* Suitable Rx and Tx SBs are served by the same client */
5587 if (i >= bp->num_rx_queues)
5588 fp->cl_id -= bp->num_rx_queues;
34f80b04 5589 DP(NETIF_MSG_IFUP,
f5372251
EG
5590 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5591 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5592 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5593 fp->sb_id);
5c862848 5594 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5595 }
5596
16119785
EG
5597 /* ensure status block indices were read */
5598 rmb();
5599
5600
5c862848
EG
5601 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5602 DEF_SB_ID);
5603 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5604 bnx2x_update_coalesce(bp);
5605 bnx2x_init_rx_rings(bp);
5606 bnx2x_init_tx_ring(bp);
5607 bnx2x_init_sp_ring(bp);
5608 bnx2x_init_context(bp);
471de716 5609 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5610 bnx2x_init_ind_table(bp);
0ef00459
EG
5611 bnx2x_stats_init(bp);
5612
5613 /* At this point, we are ready for interrupts */
5614 atomic_set(&bp->intr_sem, 0);
5615
5616 /* flush all before enabling interrupts */
5617 mb();
5618 mmiowb();
5619
615f8fd9 5620 bnx2x_int_enable(bp);
eb8da205
EG
5621
5622 /* Check for SPIO5 */
5623 bnx2x_attn_int_deasserted0(bp,
5624 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5625 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5626}
5627
5628/* end of nic init */
5629
5630/*
5631 * gzip service functions
5632 */
5633
5634static int bnx2x_gunzip_init(struct bnx2x *bp)
5635{
5636 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5637 &bp->gunzip_mapping);
5638 if (bp->gunzip_buf == NULL)
5639 goto gunzip_nomem1;
5640
5641 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5642 if (bp->strm == NULL)
5643 goto gunzip_nomem2;
5644
5645 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5646 GFP_KERNEL);
5647 if (bp->strm->workspace == NULL)
5648 goto gunzip_nomem3;
5649
5650 return 0;
5651
5652gunzip_nomem3:
5653 kfree(bp->strm);
5654 bp->strm = NULL;
5655
5656gunzip_nomem2:
5657 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5658 bp->gunzip_mapping);
5659 bp->gunzip_buf = NULL;
5660
5661gunzip_nomem1:
5662 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5663 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5664 return -ENOMEM;
5665}
5666
5667static void bnx2x_gunzip_end(struct bnx2x *bp)
5668{
5669 kfree(bp->strm->workspace);
5670
5671 kfree(bp->strm);
5672 bp->strm = NULL;
5673
5674 if (bp->gunzip_buf) {
5675 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5676 bp->gunzip_mapping);
5677 bp->gunzip_buf = NULL;
5678 }
5679}
5680
94a78b79 5681static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5682{
5683 int n, rc;
5684
5685 /* check gzip header */
94a78b79
VZ
5686 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5687 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5688 return -EINVAL;
94a78b79 5689 }
a2fbb9ea
ET
5690
5691 n = 10;
5692
34f80b04 5693#define FNAME 0x8
a2fbb9ea
ET
5694
5695 if (zbuf[3] & FNAME)
5696 while ((zbuf[n++] != 0) && (n < len));
5697
94a78b79 5698 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5699 bp->strm->avail_in = len - n;
5700 bp->strm->next_out = bp->gunzip_buf;
5701 bp->strm->avail_out = FW_BUF_SIZE;
5702
5703 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5704 if (rc != Z_OK)
5705 return rc;
5706
5707 rc = zlib_inflate(bp->strm, Z_FINISH);
5708 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5709 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5710 bp->dev->name, bp->strm->msg);
5711
5712 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5713 if (bp->gunzip_outlen & 0x3)
5714 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5715 " gunzip_outlen (%d) not aligned\n",
5716 bp->dev->name, bp->gunzip_outlen);
5717 bp->gunzip_outlen >>= 2;
5718
5719 zlib_inflateEnd(bp->strm);
5720
5721 if (rc == Z_STREAM_END)
5722 return 0;
5723
5724 return rc;
5725}
5726
5727/* nic load/unload */
5728
5729/*
34f80b04 5730 * General service functions
a2fbb9ea
ET
5731 */
5732
5733/* send a NIG loopback debug packet */
5734static void bnx2x_lb_pckt(struct bnx2x *bp)
5735{
a2fbb9ea 5736 u32 wb_write[3];
a2fbb9ea
ET
5737
5738 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5739 wb_write[0] = 0x55555555;
5740 wb_write[1] = 0x55555555;
34f80b04 5741 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5742 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5743
5744 /* NON-IP protocol */
a2fbb9ea
ET
5745 wb_write[0] = 0x09000000;
5746 wb_write[1] = 0x55555555;
34f80b04 5747 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5748 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5749}
5750
5751/* some of the internal memories
5752 * are not directly readable from the driver
5753 * to test them we send debug packets
5754 */
5755static int bnx2x_int_mem_test(struct bnx2x *bp)
5756{
5757 int factor;
5758 int count, i;
5759 u32 val = 0;
5760
ad8d3948 5761 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5762 factor = 120;
ad8d3948
EG
5763 else if (CHIP_REV_IS_EMUL(bp))
5764 factor = 200;
5765 else
a2fbb9ea 5766 factor = 1;
a2fbb9ea
ET
5767
5768 DP(NETIF_MSG_HW, "start part1\n");
5769
5770 /* Disable inputs of parser neighbor blocks */
5771 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5772 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5773 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5774 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5775
5776 /* Write 0 to parser credits for CFC search request */
5777 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5778
5779 /* send Ethernet packet */
5780 bnx2x_lb_pckt(bp);
5781
5782 /* TODO do i reset NIG statistic? */
5783 /* Wait until NIG register shows 1 packet of size 0x10 */
5784 count = 1000 * factor;
5785 while (count) {
34f80b04 5786
a2fbb9ea
ET
5787 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5788 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5789 if (val == 0x10)
5790 break;
5791
5792 msleep(10);
5793 count--;
5794 }
5795 if (val != 0x10) {
5796 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5797 return -1;
5798 }
5799
5800 /* Wait until PRS register shows 1 packet */
5801 count = 1000 * factor;
5802 while (count) {
5803 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5804 if (val == 1)
5805 break;
5806
5807 msleep(10);
5808 count--;
5809 }
5810 if (val != 0x1) {
5811 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5812 return -2;
5813 }
5814
5815 /* Reset and init BRB, PRS */
34f80b04 5816 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5817 msleep(50);
34f80b04 5818 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5819 msleep(50);
94a78b79
VZ
5820 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5821 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5822
5823 DP(NETIF_MSG_HW, "part2\n");
5824
5825 /* Disable inputs of parser neighbor blocks */
5826 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5827 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5828 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5829 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5830
5831 /* Write 0 to parser credits for CFC search request */
5832 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5833
5834 /* send 10 Ethernet packets */
5835 for (i = 0; i < 10; i++)
5836 bnx2x_lb_pckt(bp);
5837
5838 /* Wait until NIG register shows 10 + 1
5839 packets of size 11*0x10 = 0xb0 */
5840 count = 1000 * factor;
5841 while (count) {
34f80b04 5842
a2fbb9ea
ET
5843 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5844 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5845 if (val == 0xb0)
5846 break;
5847
5848 msleep(10);
5849 count--;
5850 }
5851 if (val != 0xb0) {
5852 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5853 return -3;
5854 }
5855
5856 /* Wait until PRS register shows 2 packets */
5857 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5858 if (val != 2)
5859 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5860
5861 /* Write 1 to parser credits for CFC search request */
5862 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5863
5864 /* Wait until PRS register shows 3 packets */
5865 msleep(10 * factor);
5866 /* Wait until NIG register shows 1 packet of size 0x10 */
5867 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5868 if (val != 3)
5869 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5870
5871 /* clear NIG EOP FIFO */
5872 for (i = 0; i < 11; i++)
5873 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5874 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5875 if (val != 1) {
5876 BNX2X_ERR("clear of NIG failed\n");
5877 return -4;
5878 }
5879
5880 /* Reset and init BRB, PRS, NIG */
5881 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5882 msleep(50);
5883 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5884 msleep(50);
94a78b79
VZ
5885 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5886 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5887#ifndef BCM_ISCSI
5888 /* set NIC mode */
5889 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5890#endif
5891
5892 /* Enable inputs of parser neighbor blocks */
5893 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5894 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5895 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5896 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5897
5898 DP(NETIF_MSG_HW, "done\n");
5899
5900 return 0; /* OK */
5901}
5902
5903static void enable_blocks_attention(struct bnx2x *bp)
5904{
5905 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5906 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5907 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5908 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5909 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5910 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5911 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5912 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5913 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5914/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5915/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5916 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5917 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5918 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5919/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5920/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5921 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5922 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5923 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5924 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5925/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5926/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5927 if (CHIP_REV_IS_FPGA(bp))
5928 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5929 else
5930 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5931 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5932 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5933 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5934/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5935/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5936 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5937 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5938/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5939 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5940}
5941
34f80b04 5942
81f75bbf
EG
5943static void bnx2x_reset_common(struct bnx2x *bp)
5944{
5945 /* reset_common */
5946 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5947 0xd3ffff7f);
5948 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5949}
5950
573f2035
EG
5951static void bnx2x_init_pxp(struct bnx2x *bp)
5952{
5953 u16 devctl;
5954 int r_order, w_order;
5955
5956 pci_read_config_word(bp->pdev,
5957 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5958 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5959 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5960 if (bp->mrrs == -1)
5961 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5962 else {
5963 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5964 r_order = bp->mrrs;
5965 }
5966
5967 bnx2x_init_pxp_arb(bp, r_order, w_order);
5968}
fd4ef40d
EG
5969
5970static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5971{
5972 u32 val;
5973 u8 port;
5974 u8 is_required = 0;
5975
5976 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5977 SHARED_HW_CFG_FAN_FAILURE_MASK;
5978
5979 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5980 is_required = 1;
5981
5982 /*
5983 * The fan failure mechanism is usually related to the PHY type since
5984 * the power consumption of the board is affected by the PHY. Currently,
5985 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5986 */
5987 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5988 for (port = PORT_0; port < PORT_MAX; port++) {
5989 u32 phy_type =
5990 SHMEM_RD(bp, dev_info.port_hw_config[port].
5991 external_phy_config) &
5992 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5993 is_required |=
5994 ((phy_type ==
5995 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5996 (phy_type ==
5997 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5998 (phy_type ==
5999 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6000 }
6001
6002 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6003
6004 if (is_required == 0)
6005 return;
6006
6007 /* Fan failure is indicated by SPIO 5 */
6008 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6009 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6010
6011 /* set to active low mode */
6012 val = REG_RD(bp, MISC_REG_SPIO_INT);
6013 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6014 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6015 REG_WR(bp, MISC_REG_SPIO_INT, val);
6016
6017 /* enable interrupt to signal the IGU */
6018 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6019 val |= (1 << MISC_REGISTERS_SPIO_5);
6020 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6021}
6022
34f80b04 6023static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6024{
a2fbb9ea 6025 u32 val, i;
a2fbb9ea 6026
34f80b04 6027 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6028
81f75bbf 6029 bnx2x_reset_common(bp);
34f80b04
EG
6030 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6031 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6032
94a78b79 6033 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6034 if (CHIP_IS_E1H(bp))
6035 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6036
34f80b04
EG
6037 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6038 msleep(30);
6039 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6040
94a78b79 6041 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6042 if (CHIP_IS_E1(bp)) {
6043 /* enable HW interrupt from PXP on USDM overflow
6044 bit 16 on INT_MASK_0 */
6045 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6046 }
a2fbb9ea 6047
94a78b79 6048 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6049 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6050
6051#ifdef __BIG_ENDIAN
34f80b04
EG
6052 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6053 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6054 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6055 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6056 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6057 /* make sure this value is 0 */
6058 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6059
6060/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6061 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6062 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6063 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6064 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6065#endif
6066
34f80b04 6067 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 6068#ifdef BCM_ISCSI
34f80b04
EG
6069 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6070 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6071 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6072#endif
6073
34f80b04
EG
6074 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6075 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6076
34f80b04
EG
6077 /* let the HW do it's magic ... */
6078 msleep(100);
6079 /* finish PXP init */
6080 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6081 if (val != 1) {
6082 BNX2X_ERR("PXP2 CFG failed\n");
6083 return -EBUSY;
6084 }
6085 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6086 if (val != 1) {
6087 BNX2X_ERR("PXP2 RD_INIT failed\n");
6088 return -EBUSY;
6089 }
a2fbb9ea 6090
34f80b04
EG
6091 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6092 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6093
94a78b79 6094 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6095
34f80b04
EG
6096 /* clean the DMAE memory */
6097 bp->dmae_ready = 1;
6098 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6099
94a78b79
VZ
6100 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6101 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6102 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6103 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6104
34f80b04
EG
6105 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6106 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6107 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6108 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6109
94a78b79 6110 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
6111 /* soft reset pulse */
6112 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6113 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
6114
6115#ifdef BCM_ISCSI
94a78b79 6116 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6117#endif
a2fbb9ea 6118
94a78b79 6119 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6120 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6121 if (!CHIP_REV_IS_SLOW(bp)) {
6122 /* enable hw interrupt from doorbell Q */
6123 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6124 }
a2fbb9ea 6125
94a78b79
VZ
6126 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6127 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6128 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
6129 /* set NIC mode */
6130 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
6131 if (CHIP_IS_E1H(bp))
6132 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6133
94a78b79
VZ
6134 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6135 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6136 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6137 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6138
ca00392c
EG
6139 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6140 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6141 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6142 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6143
94a78b79
VZ
6144 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6145 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6146 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6147 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6148
34f80b04
EG
6149 /* sync semi rtc */
6150 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6151 0x80000000);
6152 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6153 0x80000000);
a2fbb9ea 6154
94a78b79
VZ
6155 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6156 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6157 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6158
34f80b04
EG
6159 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6160 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6161 REG_WR(bp, i, 0xc0cac01a);
6162 /* TODO: replace with something meaningful */
6163 }
94a78b79 6164 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 6165 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6166
34f80b04
EG
6167 if (sizeof(union cdu_context) != 1024)
6168 /* we currently assume that a context is 1024 bytes */
6169 printk(KERN_ALERT PFX "please adjust the size of"
6170 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6171
94a78b79 6172 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6173 val = (4 << 24) + (0 << 12) + 1024;
6174 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6175
94a78b79 6176 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6177 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6178 /* enable context validation interrupt from CFC */
6179 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6180
6181 /* set the thresholds to prevent CFC/CDU race */
6182 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6183
94a78b79
VZ
6184 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6185 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6186
94a78b79 6187 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6188 /* Reset PCIE errors for debug */
6189 REG_WR(bp, 0x2814, 0xffffffff);
6190 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6191
94a78b79 6192 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6193 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6194 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6195 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6196
94a78b79 6197 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6198 if (CHIP_IS_E1H(bp)) {
6199 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6200 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6201 }
6202
6203 if (CHIP_REV_IS_SLOW(bp))
6204 msleep(200);
6205
6206 /* finish CFC init */
6207 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6208 if (val != 1) {
6209 BNX2X_ERR("CFC LL_INIT failed\n");
6210 return -EBUSY;
6211 }
6212 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6213 if (val != 1) {
6214 BNX2X_ERR("CFC AC_INIT failed\n");
6215 return -EBUSY;
6216 }
6217 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6218 if (val != 1) {
6219 BNX2X_ERR("CFC CAM_INIT failed\n");
6220 return -EBUSY;
6221 }
6222 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6223
34f80b04
EG
6224 /* read NIG statistic
6225 to see if this is our first up since powerup */
6226 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6227 val = *bnx2x_sp(bp, wb_data[0]);
6228
6229 /* do internal memory self test */
6230 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6231 BNX2X_ERR("internal mem self test failed\n");
6232 return -EBUSY;
6233 }
6234
35b19ba5 6235 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6236 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6237 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6238 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6239 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6240 bp->port.need_hw_lock = 1;
6241 break;
6242
34f80b04
EG
6243 default:
6244 break;
6245 }
f1410647 6246
fd4ef40d
EG
6247 bnx2x_setup_fan_failure_detection(bp);
6248
34f80b04
EG
6249 /* clear PXP2 attentions */
6250 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6251
34f80b04 6252 enable_blocks_attention(bp);
a2fbb9ea 6253
6bbca910
YR
6254 if (!BP_NOMCP(bp)) {
6255 bnx2x_acquire_phy_lock(bp);
6256 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6257 bnx2x_release_phy_lock(bp);
6258 } else
6259 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6260
34f80b04
EG
6261 return 0;
6262}
a2fbb9ea 6263
34f80b04
EG
6264static int bnx2x_init_port(struct bnx2x *bp)
6265{
6266 int port = BP_PORT(bp);
94a78b79 6267 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6268 u32 low, high;
34f80b04 6269 u32 val;
a2fbb9ea 6270
34f80b04
EG
6271 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6272
6273 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6274
94a78b79 6275 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6276 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6277
6278 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6279 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6280 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
6281#ifdef BCM_ISCSI
6282 /* Port0 1
6283 * Port1 385 */
6284 i++;
6285 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6286 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6287 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6288 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6289
6290 /* Port0 2
6291 * Port1 386 */
6292 i++;
6293 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6294 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6295 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6296 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6297
6298 /* Port0 3
6299 * Port1 387 */
6300 i++;
6301 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6302 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6303 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6304 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6305#endif
94a78b79 6306 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6307
a2fbb9ea
ET
6308#ifdef BCM_ISCSI
6309 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6310 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6311
94a78b79 6312 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 6313#endif
94a78b79 6314 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6315
94a78b79 6316 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6317 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6318 /* no pause for emulation and FPGA */
6319 low = 0;
6320 high = 513;
6321 } else {
6322 if (IS_E1HMF(bp))
6323 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6324 else if (bp->dev->mtu > 4096) {
6325 if (bp->flags & ONE_PORT_FLAG)
6326 low = 160;
6327 else {
6328 val = bp->dev->mtu;
6329 /* (24*1024 + val*4)/256 */
6330 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6331 }
6332 } else
6333 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6334 high = low + 56; /* 14*1024/256 */
6335 }
6336 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6337 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6338
6339
94a78b79 6340 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6341
94a78b79 6342 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6343 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6344 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6345 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6346
94a78b79
VZ
6347 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6348 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6349 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6350 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6351
94a78b79 6352 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6353 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6354
94a78b79 6355 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6356
6357 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6358 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6359
6360 /* update threshold */
34f80b04 6361 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6362 /* update init credit */
34f80b04 6363 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6364
6365 /* probe changes */
34f80b04 6366 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6367 msleep(5);
34f80b04 6368 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6369
6370#ifdef BCM_ISCSI
6371 /* tell the searcher where the T2 table is */
6372 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6373
6374 wb_write[0] = U64_LO(bp->t2_mapping);
6375 wb_write[1] = U64_HI(bp->t2_mapping);
6376 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6377 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6378 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6379 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6380
6381 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6382#endif
94a78b79 6383 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6384 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6385
6386 if (CHIP_IS_E1(bp)) {
6387 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6388 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6389 }
94a78b79 6390 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6391
94a78b79 6392 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6393 /* init aeu_mask_attn_func_0/1:
6394 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6395 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6396 * bits 4-7 are used for "per vn group attention" */
6397 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6398 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6399
94a78b79 6400 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6401 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6402 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6403 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6404 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6405
94a78b79 6406 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6407
6408 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6409
6410 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6411 /* 0x2 disable e1hov, 0x1 enable */
6412 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6413 (IS_E1HMF(bp) ? 0x1 : 0x2));
6414
1c06328c
EG
6415 {
6416 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6417 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6418 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6419 }
34f80b04
EG
6420 }
6421
94a78b79 6422 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6423 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6424
35b19ba5 6425 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6426 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6427 {
6428 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6429
6430 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6431 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6432
6433 /* The GPIO should be swapped if the swap register is
6434 set and active */
6435 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6436 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6437
6438 /* Select function upon port-swap configuration */
6439 if (port == 0) {
6440 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6441 aeu_gpio_mask = (swap_val && swap_override) ?
6442 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6443 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6444 } else {
6445 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6446 aeu_gpio_mask = (swap_val && swap_override) ?
6447 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6448 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6449 }
6450 val = REG_RD(bp, offset);
6451 /* add GPIO3 to group */
6452 val |= aeu_gpio_mask;
6453 REG_WR(bp, offset, val);
6454 }
6455 break;
6456
35b19ba5 6457 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6458 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6459 /* add SPIO 5 to group 0 */
4d295db0
EG
6460 {
6461 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6462 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6463 val = REG_RD(bp, reg_addr);
f1410647 6464 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6465 REG_WR(bp, reg_addr, val);
6466 }
f1410647
ET
6467 break;
6468
6469 default:
6470 break;
6471 }
6472
c18487ee 6473 bnx2x__link_reset(bp);
a2fbb9ea 6474
34f80b04
EG
6475 return 0;
6476}
6477
6478#define ILT_PER_FUNC (768/2)
6479#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6480/* the phys address is shifted right 12 bits and has an added
6481 1=valid bit added to the 53rd bit
6482 then since this is a wide register(TM)
6483 we split it into two 32 bit writes
6484 */
6485#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6486#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6487#define PXP_ONE_ILT(x) (((x) << 10) | x)
6488#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6489
6490#define CNIC_ILT_LINES 0
6491
6492static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6493{
6494 int reg;
6495
6496 if (CHIP_IS_E1H(bp))
6497 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6498 else /* E1 */
6499 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6500
6501 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6502}
6503
6504static int bnx2x_init_func(struct bnx2x *bp)
6505{
6506 int port = BP_PORT(bp);
6507 int func = BP_FUNC(bp);
8badd27a 6508 u32 addr, val;
34f80b04
EG
6509 int i;
6510
6511 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6512
8badd27a
EG
6513 /* set MSI reconfigure capability */
6514 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6515 val = REG_RD(bp, addr);
6516 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6517 REG_WR(bp, addr, val);
6518
34f80b04
EG
6519 i = FUNC_ILT_BASE(func);
6520
6521 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6522 if (CHIP_IS_E1H(bp)) {
6523 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6524 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6525 } else /* E1 */
6526 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6527 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6528
6529
6530 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6531 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6532 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6533 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6534 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6535 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6536 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6537 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6538 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6539 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6540
6541 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6542 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6543 }
6544
6545 /* HC init per function */
6546 if (CHIP_IS_E1H(bp)) {
6547 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6548
6549 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6550 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6551 }
94a78b79 6552 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6553
c14423fe 6554 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6555 REG_WR(bp, 0x2114, 0xffffffff);
6556 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6557
34f80b04
EG
6558 return 0;
6559}
6560
6561static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6562{
6563 int i, rc = 0;
a2fbb9ea 6564
34f80b04
EG
6565 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6566 BP_FUNC(bp), load_code);
a2fbb9ea 6567
34f80b04
EG
6568 bp->dmae_ready = 0;
6569 mutex_init(&bp->dmae_mutex);
54016b26
EG
6570 rc = bnx2x_gunzip_init(bp);
6571 if (rc)
6572 return rc;
a2fbb9ea 6573
34f80b04
EG
6574 switch (load_code) {
6575 case FW_MSG_CODE_DRV_LOAD_COMMON:
6576 rc = bnx2x_init_common(bp);
6577 if (rc)
6578 goto init_hw_err;
6579 /* no break */
6580
6581 case FW_MSG_CODE_DRV_LOAD_PORT:
6582 bp->dmae_ready = 1;
6583 rc = bnx2x_init_port(bp);
6584 if (rc)
6585 goto init_hw_err;
6586 /* no break */
6587
6588 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6589 bp->dmae_ready = 1;
6590 rc = bnx2x_init_func(bp);
6591 if (rc)
6592 goto init_hw_err;
6593 break;
6594
6595 default:
6596 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6597 break;
6598 }
6599
6600 if (!BP_NOMCP(bp)) {
6601 int func = BP_FUNC(bp);
a2fbb9ea
ET
6602
6603 bp->fw_drv_pulse_wr_seq =
34f80b04 6604 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6605 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6606 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6607 }
a2fbb9ea 6608
34f80b04
EG
6609 /* this needs to be done before gunzip end */
6610 bnx2x_zero_def_sb(bp);
6611 for_each_queue(bp, i)
6612 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6613
6614init_hw_err:
6615 bnx2x_gunzip_end(bp);
6616
6617 return rc;
a2fbb9ea
ET
6618}
6619
a2fbb9ea
ET
6620static void bnx2x_free_mem(struct bnx2x *bp)
6621{
6622
6623#define BNX2X_PCI_FREE(x, y, size) \
6624 do { \
6625 if (x) { \
6626 pci_free_consistent(bp->pdev, size, x, y); \
6627 x = NULL; \
6628 y = 0; \
6629 } \
6630 } while (0)
6631
6632#define BNX2X_FREE(x) \
6633 do { \
6634 if (x) { \
6635 vfree(x); \
6636 x = NULL; \
6637 } \
6638 } while (0)
6639
6640 int i;
6641
6642 /* fastpath */
555f6c78 6643 /* Common */
a2fbb9ea
ET
6644 for_each_queue(bp, i) {
6645
555f6c78 6646 /* status blocks */
a2fbb9ea
ET
6647 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6648 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6649 sizeof(struct host_status_block));
555f6c78
EG
6650 }
6651 /* Rx */
6652 for_each_rx_queue(bp, i) {
a2fbb9ea 6653
555f6c78 6654 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6655 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6656 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6657 bnx2x_fp(bp, i, rx_desc_mapping),
6658 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6659
6660 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6661 bnx2x_fp(bp, i, rx_comp_mapping),
6662 sizeof(struct eth_fast_path_rx_cqe) *
6663 NUM_RCQ_BD);
a2fbb9ea 6664
7a9b2557 6665 /* SGE ring */
32626230 6666 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6667 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6668 bnx2x_fp(bp, i, rx_sge_mapping),
6669 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6670 }
555f6c78
EG
6671 /* Tx */
6672 for_each_tx_queue(bp, i) {
6673
6674 /* fastpath tx rings: tx_buf tx_desc */
6675 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6676 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6677 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6678 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6679 }
a2fbb9ea
ET
6680 /* end of fastpath */
6681
6682 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6683 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6684
6685 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6686 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6687
6688#ifdef BCM_ISCSI
6689 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6690 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6691 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6692 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6693#endif
7a9b2557 6694 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6695
6696#undef BNX2X_PCI_FREE
6697#undef BNX2X_KFREE
6698}
6699
6700static int bnx2x_alloc_mem(struct bnx2x *bp)
6701{
6702
6703#define BNX2X_PCI_ALLOC(x, y, size) \
6704 do { \
6705 x = pci_alloc_consistent(bp->pdev, size, y); \
6706 if (x == NULL) \
6707 goto alloc_mem_err; \
6708 memset(x, 0, size); \
6709 } while (0)
6710
6711#define BNX2X_ALLOC(x, size) \
6712 do { \
6713 x = vmalloc(size); \
6714 if (x == NULL) \
6715 goto alloc_mem_err; \
6716 memset(x, 0, size); \
6717 } while (0)
6718
6719 int i;
6720
6721 /* fastpath */
555f6c78 6722 /* Common */
a2fbb9ea
ET
6723 for_each_queue(bp, i) {
6724 bnx2x_fp(bp, i, bp) = bp;
6725
555f6c78 6726 /* status blocks */
a2fbb9ea
ET
6727 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6728 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6729 sizeof(struct host_status_block));
555f6c78
EG
6730 }
6731 /* Rx */
6732 for_each_rx_queue(bp, i) {
a2fbb9ea 6733
555f6c78 6734 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6735 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6736 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6737 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6738 &bnx2x_fp(bp, i, rx_desc_mapping),
6739 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6740
6741 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6742 &bnx2x_fp(bp, i, rx_comp_mapping),
6743 sizeof(struct eth_fast_path_rx_cqe) *
6744 NUM_RCQ_BD);
6745
7a9b2557
VZ
6746 /* SGE ring */
6747 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6748 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6749 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6750 &bnx2x_fp(bp, i, rx_sge_mapping),
6751 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6752 }
555f6c78
EG
6753 /* Tx */
6754 for_each_tx_queue(bp, i) {
6755
555f6c78
EG
6756 /* fastpath tx rings: tx_buf tx_desc */
6757 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6758 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6759 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6760 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6761 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6762 }
a2fbb9ea
ET
6763 /* end of fastpath */
6764
6765 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6766 sizeof(struct host_def_status_block));
6767
6768 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6769 sizeof(struct bnx2x_slowpath));
6770
6771#ifdef BCM_ISCSI
6772 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6773
6774 /* Initialize T1 */
6775 for (i = 0; i < 64*1024; i += 64) {
6776 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6777 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6778 }
6779
6780 /* allocate searcher T2 table
6781 we allocate 1/4 of alloc num for T2
6782 (which is not entered into the ILT) */
6783 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6784
6785 /* Initialize T2 */
6786 for (i = 0; i < 16*1024; i += 64)
6787 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6788
c14423fe 6789 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6790 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6791
6792 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6793 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6794
6795 /* QM queues (128*MAX_CONN) */
6796 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6797#endif
6798
6799 /* Slow path ring */
6800 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6801
6802 return 0;
6803
6804alloc_mem_err:
6805 bnx2x_free_mem(bp);
6806 return -ENOMEM;
6807
6808#undef BNX2X_PCI_ALLOC
6809#undef BNX2X_ALLOC
6810}
6811
6812static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6813{
6814 int i;
6815
555f6c78 6816 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6817 struct bnx2x_fastpath *fp = &bp->fp[i];
6818
6819 u16 bd_cons = fp->tx_bd_cons;
6820 u16 sw_prod = fp->tx_pkt_prod;
6821 u16 sw_cons = fp->tx_pkt_cons;
6822
a2fbb9ea
ET
6823 while (sw_cons != sw_prod) {
6824 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6825 sw_cons++;
6826 }
6827 }
6828}
6829
6830static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6831{
6832 int i, j;
6833
555f6c78 6834 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6835 struct bnx2x_fastpath *fp = &bp->fp[j];
6836
a2fbb9ea
ET
6837 for (i = 0; i < NUM_RX_BD; i++) {
6838 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6839 struct sk_buff *skb = rx_buf->skb;
6840
6841 if (skb == NULL)
6842 continue;
6843
6844 pci_unmap_single(bp->pdev,
6845 pci_unmap_addr(rx_buf, mapping),
356e2385 6846 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6847
6848 rx_buf->skb = NULL;
6849 dev_kfree_skb(skb);
6850 }
7a9b2557 6851 if (!fp->disable_tpa)
32626230
EG
6852 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6853 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6854 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6855 }
6856}
6857
6858static void bnx2x_free_skbs(struct bnx2x *bp)
6859{
6860 bnx2x_free_tx_skbs(bp);
6861 bnx2x_free_rx_skbs(bp);
6862}
6863
6864static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6865{
34f80b04 6866 int i, offset = 1;
a2fbb9ea
ET
6867
6868 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6869 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6870 bp->msix_table[0].vector);
6871
6872 for_each_queue(bp, i) {
c14423fe 6873 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6874 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6875 bnx2x_fp(bp, i, state));
6876
34f80b04 6877 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6878 }
a2fbb9ea
ET
6879}
6880
6881static void bnx2x_free_irq(struct bnx2x *bp)
6882{
a2fbb9ea 6883 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6884 bnx2x_free_msix_irqs(bp);
6885 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6886 bp->flags &= ~USING_MSIX_FLAG;
6887
8badd27a
EG
6888 } else if (bp->flags & USING_MSI_FLAG) {
6889 free_irq(bp->pdev->irq, bp->dev);
6890 pci_disable_msi(bp->pdev);
6891 bp->flags &= ~USING_MSI_FLAG;
6892
a2fbb9ea
ET
6893 } else
6894 free_irq(bp->pdev->irq, bp->dev);
6895}
6896
6897static int bnx2x_enable_msix(struct bnx2x *bp)
6898{
8badd27a
EG
6899 int i, rc, offset = 1;
6900 int igu_vec = 0;
a2fbb9ea 6901
8badd27a
EG
6902 bp->msix_table[0].entry = igu_vec;
6903 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6904
34f80b04 6905 for_each_queue(bp, i) {
8badd27a 6906 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6907 bp->msix_table[i + offset].entry = igu_vec;
6908 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6909 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6910 }
6911
34f80b04 6912 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6913 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6914 if (rc) {
8badd27a
EG
6915 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6916 return rc;
34f80b04 6917 }
8badd27a 6918
a2fbb9ea
ET
6919 bp->flags |= USING_MSIX_FLAG;
6920
6921 return 0;
a2fbb9ea
ET
6922}
6923
a2fbb9ea
ET
6924static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6925{
34f80b04 6926 int i, rc, offset = 1;
a2fbb9ea 6927
a2fbb9ea
ET
6928 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6929 bp->dev->name, bp->dev);
a2fbb9ea
ET
6930 if (rc) {
6931 BNX2X_ERR("request sp irq failed\n");
6932 return -EBUSY;
6933 }
6934
6935 for_each_queue(bp, i) {
555f6c78
EG
6936 struct bnx2x_fastpath *fp = &bp->fp[i];
6937
ca00392c
EG
6938 if (i < bp->num_rx_queues)
6939 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6940 else
6941 sprintf(fp->name, "%s-tx-%d",
6942 bp->dev->name, i - bp->num_rx_queues);
6943
34f80b04 6944 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6945 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6946 if (rc) {
555f6c78 6947 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6948 bnx2x_free_msix_irqs(bp);
6949 return -EBUSY;
6950 }
6951
555f6c78 6952 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6953 }
6954
555f6c78 6955 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6956 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6957 " ... fp[%d] %d\n",
6958 bp->dev->name, bp->msix_table[0].vector,
6959 0, bp->msix_table[offset].vector,
6960 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6961
a2fbb9ea 6962 return 0;
a2fbb9ea
ET
6963}
6964
8badd27a
EG
6965static int bnx2x_enable_msi(struct bnx2x *bp)
6966{
6967 int rc;
6968
6969 rc = pci_enable_msi(bp->pdev);
6970 if (rc) {
6971 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6972 return -1;
6973 }
6974 bp->flags |= USING_MSI_FLAG;
6975
6976 return 0;
6977}
6978
a2fbb9ea
ET
6979static int bnx2x_req_irq(struct bnx2x *bp)
6980{
8badd27a 6981 unsigned long flags;
34f80b04 6982 int rc;
a2fbb9ea 6983
8badd27a
EG
6984 if (bp->flags & USING_MSI_FLAG)
6985 flags = 0;
6986 else
6987 flags = IRQF_SHARED;
6988
6989 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6990 bp->dev->name, bp->dev);
a2fbb9ea
ET
6991 if (!rc)
6992 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6993
6994 return rc;
a2fbb9ea
ET
6995}
6996
65abd74d
YG
6997static void bnx2x_napi_enable(struct bnx2x *bp)
6998{
6999 int i;
7000
555f6c78 7001 for_each_rx_queue(bp, i)
65abd74d
YG
7002 napi_enable(&bnx2x_fp(bp, i, napi));
7003}
7004
7005static void bnx2x_napi_disable(struct bnx2x *bp)
7006{
7007 int i;
7008
555f6c78 7009 for_each_rx_queue(bp, i)
65abd74d
YG
7010 napi_disable(&bnx2x_fp(bp, i, napi));
7011}
7012
7013static void bnx2x_netif_start(struct bnx2x *bp)
7014{
e1510706
EG
7015 int intr_sem;
7016
7017 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7018 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7019
7020 if (intr_sem) {
65abd74d 7021 if (netif_running(bp->dev)) {
65abd74d
YG
7022 bnx2x_napi_enable(bp);
7023 bnx2x_int_enable(bp);
555f6c78
EG
7024 if (bp->state == BNX2X_STATE_OPEN)
7025 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7026 }
7027 }
7028}
7029
f8ef6e44 7030static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7031{
f8ef6e44 7032 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7033 bnx2x_napi_disable(bp);
762d5f6c
EG
7034 netif_tx_disable(bp->dev);
7035 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
7036}
7037
a2fbb9ea
ET
7038/*
7039 * Init service functions
7040 */
7041
e665bfda
MC
7042/**
7043 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7044 *
7045 * @param bp driver descriptor
7046 * @param set set or clear an entry (1 or 0)
7047 * @param mac pointer to a buffer containing a MAC
7048 * @param cl_bit_vec bit vector of clients to register a MAC for
7049 * @param cam_offset offset in a CAM to use
7050 * @param with_bcast set broadcast MAC as well
7051 */
7052static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7053 u32 cl_bit_vec, u8 cam_offset,
7054 u8 with_bcast)
a2fbb9ea
ET
7055{
7056 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7057 int port = BP_PORT(bp);
a2fbb9ea
ET
7058
7059 /* CAM allocation
7060 * unicasts 0-31:port0 32-63:port1
7061 * multicast 64-127:port0 128-191:port1
7062 */
e665bfda
MC
7063 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7064 config->hdr.offset = cam_offset;
7065 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7066 config->hdr.reserved1 = 0;
7067
7068 /* primary MAC */
7069 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7070 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7071 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7072 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7073 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7074 swab16(*(u16 *)&mac[4]);
34f80b04 7075 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7076 if (set)
7077 config->config_table[0].target_table_entry.flags = 0;
7078 else
7079 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7080 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7081 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7082 config->config_table[0].target_table_entry.vlan_id = 0;
7083
3101c2bc
YG
7084 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7085 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7086 config->config_table[0].cam_entry.msb_mac_addr,
7087 config->config_table[0].cam_entry.middle_mac_addr,
7088 config->config_table[0].cam_entry.lsb_mac_addr);
7089
7090 /* broadcast */
e665bfda
MC
7091 if (with_bcast) {
7092 config->config_table[1].cam_entry.msb_mac_addr =
7093 cpu_to_le16(0xffff);
7094 config->config_table[1].cam_entry.middle_mac_addr =
7095 cpu_to_le16(0xffff);
7096 config->config_table[1].cam_entry.lsb_mac_addr =
7097 cpu_to_le16(0xffff);
7098 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7099 if (set)
7100 config->config_table[1].target_table_entry.flags =
7101 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7102 else
7103 CAM_INVALIDATE(config->config_table[1]);
7104 config->config_table[1].target_table_entry.clients_bit_vector =
7105 cpu_to_le32(cl_bit_vec);
7106 config->config_table[1].target_table_entry.vlan_id = 0;
7107 }
a2fbb9ea
ET
7108
7109 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7110 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7111 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7112}
7113
e665bfda
MC
7114/**
7115 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7116 *
7117 * @param bp driver descriptor
7118 * @param set set or clear an entry (1 or 0)
7119 * @param mac pointer to a buffer containing a MAC
7120 * @param cl_bit_vec bit vector of clients to register a MAC for
7121 * @param cam_offset offset in a CAM to use
7122 */
7123static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7124 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7125{
7126 struct mac_configuration_cmd_e1h *config =
7127 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7128
8d9c5f34 7129 config->hdr.length = 1;
e665bfda
MC
7130 config->hdr.offset = cam_offset;
7131 config->hdr.client_id = 0xff;
34f80b04
EG
7132 config->hdr.reserved1 = 0;
7133
7134 /* primary MAC */
7135 config->config_table[0].msb_mac_addr =
e665bfda 7136 swab16(*(u16 *)&mac[0]);
34f80b04 7137 config->config_table[0].middle_mac_addr =
e665bfda 7138 swab16(*(u16 *)&mac[2]);
34f80b04 7139 config->config_table[0].lsb_mac_addr =
e665bfda 7140 swab16(*(u16 *)&mac[4]);
ca00392c 7141 config->config_table[0].clients_bit_vector =
e665bfda 7142 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7143 config->config_table[0].vlan_id = 0;
7144 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7145 if (set)
7146 config->config_table[0].flags = BP_PORT(bp);
7147 else
7148 config->config_table[0].flags =
7149 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7150
e665bfda 7151 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7152 (set ? "setting" : "clearing"),
34f80b04
EG
7153 config->config_table[0].msb_mac_addr,
7154 config->config_table[0].middle_mac_addr,
e665bfda 7155 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7156
7157 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7158 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7159 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7160}
7161
a2fbb9ea
ET
7162static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7163 int *state_p, int poll)
7164{
7165 /* can take a while if any port is running */
8b3a0f0b 7166 int cnt = 5000;
a2fbb9ea 7167
c14423fe
ET
7168 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7169 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7170
7171 might_sleep();
34f80b04 7172 while (cnt--) {
a2fbb9ea
ET
7173 if (poll) {
7174 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7175 /* if index is different from 0
7176 * the reply for some commands will
3101c2bc 7177 * be on the non default queue
a2fbb9ea
ET
7178 */
7179 if (idx)
7180 bnx2x_rx_int(&bp->fp[idx], 10);
7181 }
a2fbb9ea 7182
3101c2bc 7183 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7184 if (*state_p == state) {
7185#ifdef BNX2X_STOP_ON_ERROR
7186 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7187#endif
a2fbb9ea 7188 return 0;
8b3a0f0b 7189 }
a2fbb9ea 7190
a2fbb9ea 7191 msleep(1);
e3553b29
EG
7192
7193 if (bp->panic)
7194 return -EIO;
a2fbb9ea
ET
7195 }
7196
a2fbb9ea 7197 /* timeout! */
49d66772
ET
7198 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7199 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7200#ifdef BNX2X_STOP_ON_ERROR
7201 bnx2x_panic();
7202#endif
a2fbb9ea 7203
49d66772 7204 return -EBUSY;
a2fbb9ea
ET
7205}
7206
e665bfda
MC
7207static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7208{
7209 bp->set_mac_pending++;
7210 smp_wmb();
7211
7212 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7213 (1 << bp->fp->cl_id), BP_FUNC(bp));
7214
7215 /* Wait for a completion */
7216 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7217}
7218
7219static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7220{
7221 bp->set_mac_pending++;
7222 smp_wmb();
7223
7224 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7225 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7226 1);
7227
7228 /* Wait for a completion */
7229 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7230}
7231
a2fbb9ea
ET
7232static int bnx2x_setup_leading(struct bnx2x *bp)
7233{
34f80b04 7234 int rc;
a2fbb9ea 7235
c14423fe 7236 /* reset IGU state */
34f80b04 7237 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7238
7239 /* SETUP ramrod */
7240 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7241
34f80b04
EG
7242 /* Wait for completion */
7243 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7244
34f80b04 7245 return rc;
a2fbb9ea
ET
7246}
7247
7248static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7249{
555f6c78
EG
7250 struct bnx2x_fastpath *fp = &bp->fp[index];
7251
a2fbb9ea 7252 /* reset IGU state */
555f6c78 7253 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7254
228241eb 7255 /* SETUP ramrod */
555f6c78
EG
7256 fp->state = BNX2X_FP_STATE_OPENING;
7257 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7258 fp->cl_id, 0);
a2fbb9ea
ET
7259
7260 /* Wait for completion */
7261 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7262 &(fp->state), 0);
a2fbb9ea
ET
7263}
7264
a2fbb9ea 7265static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7266
ca00392c
EG
7267static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7268 int *num_tx_queues_out)
7269{
7270 int _num_rx_queues = 0, _num_tx_queues = 0;
7271
7272 switch (bp->multi_mode) {
7273 case ETH_RSS_MODE_DISABLED:
7274 _num_rx_queues = 1;
7275 _num_tx_queues = 1;
7276 break;
7277
7278 case ETH_RSS_MODE_REGULAR:
7279 if (num_rx_queues)
7280 _num_rx_queues = min_t(u32, num_rx_queues,
7281 BNX2X_MAX_QUEUES(bp));
7282 else
7283 _num_rx_queues = min_t(u32, num_online_cpus(),
7284 BNX2X_MAX_QUEUES(bp));
7285
7286 if (num_tx_queues)
7287 _num_tx_queues = min_t(u32, num_tx_queues,
7288 BNX2X_MAX_QUEUES(bp));
7289 else
7290 _num_tx_queues = min_t(u32, num_online_cpus(),
7291 BNX2X_MAX_QUEUES(bp));
7292
7293 /* There must be not more Tx queues than Rx queues */
7294 if (_num_tx_queues > _num_rx_queues) {
7295 BNX2X_ERR("number of tx queues (%d) > "
7296 "number of rx queues (%d)"
7297 " defaulting to %d\n",
7298 _num_tx_queues, _num_rx_queues,
7299 _num_rx_queues);
7300 _num_tx_queues = _num_rx_queues;
7301 }
7302 break;
7303
7304
7305 default:
7306 _num_rx_queues = 1;
7307 _num_tx_queues = 1;
7308 break;
7309 }
7310
7311 *num_rx_queues_out = _num_rx_queues;
7312 *num_tx_queues_out = _num_tx_queues;
7313}
7314
7315static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7316{
ca00392c 7317 int rc = 0;
a2fbb9ea 7318
8badd27a
EG
7319 switch (int_mode) {
7320 case INT_MODE_INTx:
7321 case INT_MODE_MSI:
ca00392c
EG
7322 bp->num_rx_queues = 1;
7323 bp->num_tx_queues = 1;
7324 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7325 break;
7326
7327 case INT_MODE_MSIX:
7328 default:
ca00392c
EG
7329 /* Set interrupt mode according to bp->multi_mode value */
7330 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7331 &bp->num_tx_queues);
7332
7333 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7334 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7335
2dfe0e1f
EG
7336 /* if we can't use MSI-X we only need one fp,
7337 * so try to enable MSI-X with the requested number of fp's
7338 * and fallback to MSI or legacy INTx with one fp
7339 */
ca00392c
EG
7340 rc = bnx2x_enable_msix(bp);
7341 if (rc) {
34f80b04 7342 /* failed to enable MSI-X */
555f6c78
EG
7343 if (bp->multi_mode)
7344 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7345 "enable MSI-X (rx %d tx %d), "
7346 "set number of queues to 1\n",
7347 bp->num_rx_queues, bp->num_tx_queues);
7348 bp->num_rx_queues = 1;
7349 bp->num_tx_queues = 1;
a2fbb9ea 7350 }
8badd27a 7351 break;
a2fbb9ea 7352 }
555f6c78 7353 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7354 return rc;
8badd27a
EG
7355}
7356
8badd27a
EG
7357
7358/* must be called with rtnl_lock */
7359static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7360{
7361 u32 load_code;
ca00392c
EG
7362 int i, rc;
7363
8badd27a 7364#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7365 if (unlikely(bp->panic))
7366 return -EPERM;
7367#endif
7368
7369 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7370
ca00392c 7371 rc = bnx2x_set_int_mode(bp);
c14423fe 7372
a2fbb9ea
ET
7373 if (bnx2x_alloc_mem(bp))
7374 return -ENOMEM;
7375
555f6c78 7376 for_each_rx_queue(bp, i)
7a9b2557
VZ
7377 bnx2x_fp(bp, i, disable_tpa) =
7378 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7379
555f6c78 7380 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7381 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7382 bnx2x_poll, 128);
7383
2dfe0e1f
EG
7384 bnx2x_napi_enable(bp);
7385
34f80b04
EG
7386 if (bp->flags & USING_MSIX_FLAG) {
7387 rc = bnx2x_req_msix_irqs(bp);
7388 if (rc) {
7389 pci_disable_msix(bp->pdev);
2dfe0e1f 7390 goto load_error1;
34f80b04
EG
7391 }
7392 } else {
ca00392c
EG
7393 /* Fall to INTx if failed to enable MSI-X due to lack of
7394 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7395 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7396 bnx2x_enable_msi(bp);
34f80b04
EG
7397 bnx2x_ack_int(bp);
7398 rc = bnx2x_req_irq(bp);
7399 if (rc) {
2dfe0e1f 7400 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7401 if (bp->flags & USING_MSI_FLAG)
7402 pci_disable_msi(bp->pdev);
2dfe0e1f 7403 goto load_error1;
a2fbb9ea 7404 }
8badd27a
EG
7405 if (bp->flags & USING_MSI_FLAG) {
7406 bp->dev->irq = bp->pdev->irq;
7407 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7408 bp->dev->name, bp->pdev->irq);
7409 }
a2fbb9ea
ET
7410 }
7411
2dfe0e1f
EG
7412 /* Send LOAD_REQUEST command to MCP
7413 Returns the type of LOAD command:
7414 if it is the first port to be initialized
7415 common blocks should be initialized, otherwise - not
7416 */
7417 if (!BP_NOMCP(bp)) {
7418 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7419 if (!load_code) {
7420 BNX2X_ERR("MCP response failure, aborting\n");
7421 rc = -EBUSY;
7422 goto load_error2;
7423 }
7424 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7425 rc = -EBUSY; /* other port in diagnostic mode */
7426 goto load_error2;
7427 }
7428
7429 } else {
7430 int port = BP_PORT(bp);
7431
f5372251 7432 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7433 load_count[0], load_count[1], load_count[2]);
7434 load_count[0]++;
7435 load_count[1 + port]++;
f5372251 7436 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7437 load_count[0], load_count[1], load_count[2]);
7438 if (load_count[0] == 1)
7439 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7440 else if (load_count[1 + port] == 1)
7441 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7442 else
7443 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7444 }
7445
7446 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7447 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7448 bp->port.pmf = 1;
7449 else
7450 bp->port.pmf = 0;
7451 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7452
a2fbb9ea 7453 /* Initialize HW */
34f80b04
EG
7454 rc = bnx2x_init_hw(bp, load_code);
7455 if (rc) {
a2fbb9ea 7456 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7457 goto load_error2;
a2fbb9ea
ET
7458 }
7459
a2fbb9ea 7460 /* Setup NIC internals and enable interrupts */
471de716 7461 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7462
2691d51d
EG
7463 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7464 (bp->common.shmem2_base))
7465 SHMEM2_WR(bp, dcc_support,
7466 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7467 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7468
a2fbb9ea 7469 /* Send LOAD_DONE command to MCP */
34f80b04 7470 if (!BP_NOMCP(bp)) {
228241eb
ET
7471 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7472 if (!load_code) {
da5a662a 7473 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7474 rc = -EBUSY;
2dfe0e1f 7475 goto load_error3;
a2fbb9ea
ET
7476 }
7477 }
7478
7479 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7480
34f80b04
EG
7481 rc = bnx2x_setup_leading(bp);
7482 if (rc) {
da5a662a 7483 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7484#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7485 goto load_error3;
e3553b29
EG
7486#else
7487 bp->panic = 1;
7488 return -EBUSY;
7489#endif
34f80b04 7490 }
a2fbb9ea 7491
34f80b04
EG
7492 if (CHIP_IS_E1H(bp))
7493 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7494 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7495 bp->state = BNX2X_STATE_DISABLED;
7496 }
a2fbb9ea 7497
ca00392c 7498 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7499 for_each_nondefault_queue(bp, i) {
7500 rc = bnx2x_setup_multi(bp, i);
7501 if (rc)
2dfe0e1f 7502 goto load_error3;
34f80b04 7503 }
a2fbb9ea 7504
ca00392c 7505 if (CHIP_IS_E1(bp))
e665bfda 7506 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7507 else
e665bfda 7508 bnx2x_set_eth_mac_addr_e1h(bp, 1);
ca00392c 7509 }
34f80b04
EG
7510
7511 if (bp->port.pmf)
b5bf9068 7512 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7513
7514 /* Start fast path */
34f80b04
EG
7515 switch (load_mode) {
7516 case LOAD_NORMAL:
ca00392c
EG
7517 if (bp->state == BNX2X_STATE_OPEN) {
7518 /* Tx queue should be only reenabled */
7519 netif_tx_wake_all_queues(bp->dev);
7520 }
2dfe0e1f 7521 /* Initialize the receive filter. */
34f80b04
EG
7522 bnx2x_set_rx_mode(bp->dev);
7523 break;
7524
7525 case LOAD_OPEN:
555f6c78 7526 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7527 if (bp->state != BNX2X_STATE_OPEN)
7528 netif_tx_disable(bp->dev);
2dfe0e1f 7529 /* Initialize the receive filter. */
34f80b04 7530 bnx2x_set_rx_mode(bp->dev);
34f80b04 7531 break;
a2fbb9ea 7532
34f80b04 7533 case LOAD_DIAG:
2dfe0e1f 7534 /* Initialize the receive filter. */
a2fbb9ea 7535 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7536 bp->state = BNX2X_STATE_DIAG;
7537 break;
7538
7539 default:
7540 break;
a2fbb9ea
ET
7541 }
7542
34f80b04
EG
7543 if (!bp->port.pmf)
7544 bnx2x__link_status_update(bp);
7545
a2fbb9ea
ET
7546 /* start the timer */
7547 mod_timer(&bp->timer, jiffies + bp->current_interval);
7548
34f80b04 7549
a2fbb9ea
ET
7550 return 0;
7551
2dfe0e1f
EG
7552load_error3:
7553 bnx2x_int_disable_sync(bp, 1);
7554 if (!BP_NOMCP(bp)) {
7555 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7556 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7557 }
7558 bp->port.pmf = 0;
7a9b2557
VZ
7559 /* Free SKBs, SGEs, TPA pool and driver internals */
7560 bnx2x_free_skbs(bp);
555f6c78 7561 for_each_rx_queue(bp, i)
3196a88a 7562 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7563load_error2:
d1014634
YG
7564 /* Release IRQs */
7565 bnx2x_free_irq(bp);
2dfe0e1f
EG
7566load_error1:
7567 bnx2x_napi_disable(bp);
555f6c78 7568 for_each_rx_queue(bp, i)
7cde1c8b 7569 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7570 bnx2x_free_mem(bp);
7571
34f80b04 7572 return rc;
a2fbb9ea
ET
7573}
7574
7575static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7576{
555f6c78 7577 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7578 int rc;
7579
c14423fe 7580 /* halt the connection */
555f6c78
EG
7581 fp->state = BNX2X_FP_STATE_HALTING;
7582 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7583
34f80b04 7584 /* Wait for completion */
a2fbb9ea 7585 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7586 &(fp->state), 1);
c14423fe 7587 if (rc) /* timeout */
a2fbb9ea
ET
7588 return rc;
7589
7590 /* delete cfc entry */
7591 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7592
34f80b04
EG
7593 /* Wait for completion */
7594 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7595 &(fp->state), 1);
34f80b04 7596 return rc;
a2fbb9ea
ET
7597}
7598
da5a662a 7599static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7600{
4781bfad 7601 __le16 dsb_sp_prod_idx;
c14423fe 7602 /* if the other port is handling traffic,
a2fbb9ea 7603 this can take a lot of time */
34f80b04
EG
7604 int cnt = 500;
7605 int rc;
a2fbb9ea
ET
7606
7607 might_sleep();
7608
7609 /* Send HALT ramrod */
7610 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7611 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7612
34f80b04
EG
7613 /* Wait for completion */
7614 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7615 &(bp->fp[0].state), 1);
7616 if (rc) /* timeout */
da5a662a 7617 return rc;
a2fbb9ea 7618
49d66772 7619 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7620
228241eb 7621 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7622 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7623
49d66772 7624 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7625 we are going to reset the chip anyway
7626 so there is not much to do if this times out
7627 */
34f80b04 7628 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7629 if (!cnt) {
7630 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7631 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7632 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7633#ifdef BNX2X_STOP_ON_ERROR
7634 bnx2x_panic();
7635#endif
36e552ab 7636 rc = -EBUSY;
34f80b04
EG
7637 break;
7638 }
7639 cnt--;
da5a662a 7640 msleep(1);
5650d9d4 7641 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7642 }
7643 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7644 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7645
7646 return rc;
a2fbb9ea
ET
7647}
7648
34f80b04
EG
7649static void bnx2x_reset_func(struct bnx2x *bp)
7650{
7651 int port = BP_PORT(bp);
7652 int func = BP_FUNC(bp);
7653 int base, i;
7654
7655 /* Configure IGU */
7656 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7658
34f80b04
EG
7659 /* Clear ILT */
7660 base = FUNC_ILT_BASE(func);
7661 for (i = base; i < base + ILT_PER_FUNC; i++)
7662 bnx2x_ilt_wr(bp, i, 0);
7663}
7664
7665static void bnx2x_reset_port(struct bnx2x *bp)
7666{
7667 int port = BP_PORT(bp);
7668 u32 val;
7669
7670 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7671
7672 /* Do not rcv packets to BRB */
7673 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7674 /* Do not direct rcv packets that are not for MCP to the BRB */
7675 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7676 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7677
7678 /* Configure AEU */
7679 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7680
7681 msleep(100);
7682 /* Check for BRB port occupancy */
7683 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7684 if (val)
7685 DP(NETIF_MSG_IFDOWN,
33471629 7686 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7687
7688 /* TODO: Close Doorbell port? */
7689}
7690
34f80b04
EG
7691static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7692{
7693 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7694 BP_FUNC(bp), reset_code);
7695
7696 switch (reset_code) {
7697 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7698 bnx2x_reset_port(bp);
7699 bnx2x_reset_func(bp);
7700 bnx2x_reset_common(bp);
7701 break;
7702
7703 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7704 bnx2x_reset_port(bp);
7705 bnx2x_reset_func(bp);
7706 break;
7707
7708 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7709 bnx2x_reset_func(bp);
7710 break;
49d66772 7711
34f80b04
EG
7712 default:
7713 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7714 break;
7715 }
7716}
7717
33471629 7718/* must be called with rtnl_lock */
34f80b04 7719static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7720{
da5a662a 7721 int port = BP_PORT(bp);
a2fbb9ea 7722 u32 reset_code = 0;
da5a662a 7723 int i, cnt, rc;
a2fbb9ea
ET
7724
7725 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7726
ab6ad5a4 7727 /* Set "drop all" */
228241eb
ET
7728 bp->rx_mode = BNX2X_RX_MODE_NONE;
7729 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7730
ab6ad5a4 7731 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7732 bnx2x_netif_stop(bp, 1);
e94d8af3 7733
34f80b04
EG
7734 del_timer_sync(&bp->timer);
7735 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7736 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7737 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7738
70b9986c
EG
7739 /* Release IRQs */
7740 bnx2x_free_irq(bp);
7741
555f6c78
EG
7742 /* Wait until tx fastpath tasks complete */
7743 for_each_tx_queue(bp, i) {
228241eb
ET
7744 struct bnx2x_fastpath *fp = &bp->fp[i];
7745
34f80b04 7746 cnt = 1000;
e8b5fc51 7747 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7748
7961f791 7749 bnx2x_tx_int(fp);
34f80b04
EG
7750 if (!cnt) {
7751 BNX2X_ERR("timeout waiting for queue[%d]\n",
7752 i);
7753#ifdef BNX2X_STOP_ON_ERROR
7754 bnx2x_panic();
7755 return -EBUSY;
7756#else
7757 break;
7758#endif
7759 }
7760 cnt--;
da5a662a 7761 msleep(1);
34f80b04 7762 }
228241eb 7763 }
da5a662a
VZ
7764 /* Give HW time to discard old tx messages */
7765 msleep(1);
a2fbb9ea 7766
3101c2bc
YG
7767 if (CHIP_IS_E1(bp)) {
7768 struct mac_configuration_cmd *config =
7769 bnx2x_sp(bp, mcast_config);
7770
e665bfda 7771 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7772
8d9c5f34 7773 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7774 CAM_INVALIDATE(config->config_table[i]);
7775
8d9c5f34 7776 config->hdr.length = i;
3101c2bc
YG
7777 if (CHIP_REV_IS_SLOW(bp))
7778 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7779 else
7780 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7781 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7782 config->hdr.reserved1 = 0;
7783
e665bfda
MC
7784 bp->set_mac_pending++;
7785 smp_wmb();
7786
3101c2bc
YG
7787 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7788 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7789 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7790
7791 } else { /* E1H */
65abd74d
YG
7792 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7793
e665bfda 7794 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7795
7796 for (i = 0; i < MC_HASH_SIZE; i++)
7797 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7798
7799 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7800 }
7801
65abd74d
YG
7802 if (unload_mode == UNLOAD_NORMAL)
7803 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7804
7d0446c2 7805 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7806 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7807
7d0446c2 7808 else if (bp->wol) {
65abd74d
YG
7809 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7810 u8 *mac_addr = bp->dev->dev_addr;
7811 u32 val;
7812 /* The mac address is written to entries 1-4 to
7813 preserve entry 0 which is used by the PMF */
7814 u8 entry = (BP_E1HVN(bp) + 1)*8;
7815
7816 val = (mac_addr[0] << 8) | mac_addr[1];
7817 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7818
7819 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7820 (mac_addr[4] << 8) | mac_addr[5];
7821 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7822
7823 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7824
7825 } else
7826 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7827
34f80b04
EG
7828 /* Close multi and leading connections
7829 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7830 for_each_nondefault_queue(bp, i)
7831 if (bnx2x_stop_multi(bp, i))
228241eb 7832 goto unload_error;
a2fbb9ea 7833
da5a662a
VZ
7834 rc = bnx2x_stop_leading(bp);
7835 if (rc) {
34f80b04 7836 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7837#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7838 return -EBUSY;
da5a662a
VZ
7839#else
7840 goto unload_error;
34f80b04 7841#endif
228241eb
ET
7842 }
7843
7844unload_error:
34f80b04 7845 if (!BP_NOMCP(bp))
228241eb 7846 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7847 else {
f5372251 7848 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7849 load_count[0], load_count[1], load_count[2]);
7850 load_count[0]--;
da5a662a 7851 load_count[1 + port]--;
f5372251 7852 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7853 load_count[0], load_count[1], load_count[2]);
7854 if (load_count[0] == 0)
7855 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7856 else if (load_count[1 + port] == 0)
34f80b04
EG
7857 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7858 else
7859 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7860 }
a2fbb9ea 7861
34f80b04
EG
7862 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7863 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7864 bnx2x__link_reset(bp);
a2fbb9ea
ET
7865
7866 /* Reset the chip */
228241eb 7867 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7868
7869 /* Report UNLOAD_DONE to MCP */
34f80b04 7870 if (!BP_NOMCP(bp))
a2fbb9ea 7871 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7872
9a035440 7873 bp->port.pmf = 0;
a2fbb9ea 7874
7a9b2557 7875 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7876 bnx2x_free_skbs(bp);
555f6c78 7877 for_each_rx_queue(bp, i)
3196a88a 7878 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7879 for_each_rx_queue(bp, i)
7cde1c8b 7880 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7881 bnx2x_free_mem(bp);
7882
7883 bp->state = BNX2X_STATE_CLOSED;
228241eb 7884
a2fbb9ea
ET
7885 netif_carrier_off(bp->dev);
7886
7887 return 0;
7888}
7889
34f80b04
EG
7890static void bnx2x_reset_task(struct work_struct *work)
7891{
7892 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7893
7894#ifdef BNX2X_STOP_ON_ERROR
7895 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7896 " so reset not done to allow debug dump,\n"
ad361c98 7897 " you will need to reboot when done\n");
34f80b04
EG
7898 return;
7899#endif
7900
7901 rtnl_lock();
7902
7903 if (!netif_running(bp->dev))
7904 goto reset_task_exit;
7905
7906 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7907 bnx2x_nic_load(bp, LOAD_NORMAL);
7908
7909reset_task_exit:
7910 rtnl_unlock();
7911}
7912
a2fbb9ea
ET
7913/* end of nic load/unload */
7914
7915/* ethtool_ops */
7916
7917/*
7918 * Init service functions
7919 */
7920
f1ef27ef
EG
7921static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7922{
7923 switch (func) {
7924 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7925 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7926 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7927 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7928 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7929 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7930 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7931 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7932 default:
7933 BNX2X_ERR("Unsupported function index: %d\n", func);
7934 return (u32)(-1);
7935 }
7936}
7937
7938static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7939{
7940 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7941
7942 /* Flush all outstanding writes */
7943 mmiowb();
7944
7945 /* Pretend to be function 0 */
7946 REG_WR(bp, reg, 0);
7947 /* Flush the GRC transaction (in the chip) */
7948 new_val = REG_RD(bp, reg);
7949 if (new_val != 0) {
7950 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7951 new_val);
7952 BUG();
7953 }
7954
7955 /* From now we are in the "like-E1" mode */
7956 bnx2x_int_disable(bp);
7957
7958 /* Flush all outstanding writes */
7959 mmiowb();
7960
7961 /* Restore the original funtion settings */
7962 REG_WR(bp, reg, orig_func);
7963 new_val = REG_RD(bp, reg);
7964 if (new_val != orig_func) {
7965 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7966 orig_func, new_val);
7967 BUG();
7968 }
7969}
7970
7971static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7972{
7973 if (CHIP_IS_E1H(bp))
7974 bnx2x_undi_int_disable_e1h(bp, func);
7975 else
7976 bnx2x_int_disable(bp);
7977}
7978
34f80b04
EG
7979static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7980{
7981 u32 val;
7982
7983 /* Check if there is any driver already loaded */
7984 val = REG_RD(bp, MISC_REG_UNPREPARED);
7985 if (val == 0x1) {
7986 /* Check if it is the UNDI driver
7987 * UNDI driver initializes CID offset for normal bell to 0x7
7988 */
4a37fb66 7989 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7990 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7991 if (val == 0x7) {
7992 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7993 /* save our func */
34f80b04 7994 int func = BP_FUNC(bp);
da5a662a
VZ
7995 u32 swap_en;
7996 u32 swap_val;
34f80b04 7997
b4661739
EG
7998 /* clear the UNDI indication */
7999 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8000
34f80b04
EG
8001 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8002
8003 /* try unload UNDI on port 0 */
8004 bp->func = 0;
da5a662a
VZ
8005 bp->fw_seq =
8006 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8007 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8008 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8009
8010 /* if UNDI is loaded on the other port */
8011 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8012
da5a662a
VZ
8013 /* send "DONE" for previous unload */
8014 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8015
8016 /* unload UNDI on port 1 */
34f80b04 8017 bp->func = 1;
da5a662a
VZ
8018 bp->fw_seq =
8019 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8020 DRV_MSG_SEQ_NUMBER_MASK);
8021 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8022
8023 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8024 }
8025
b4661739
EG
8026 /* now it's safe to release the lock */
8027 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8028
f1ef27ef 8029 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8030
8031 /* close input traffic and wait for it */
8032 /* Do not rcv packets to BRB */
8033 REG_WR(bp,
8034 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8035 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8036 /* Do not direct rcv packets that are not for MCP to
8037 * the BRB */
8038 REG_WR(bp,
8039 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8040 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8041 /* clear AEU */
8042 REG_WR(bp,
8043 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8044 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8045 msleep(10);
8046
8047 /* save NIG port swap info */
8048 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8049 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8050 /* reset device */
8051 REG_WR(bp,
8052 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8053 0xd3ffffff);
34f80b04
EG
8054 REG_WR(bp,
8055 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8056 0x1403);
da5a662a
VZ
8057 /* take the NIG out of reset and restore swap values */
8058 REG_WR(bp,
8059 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8060 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8061 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8062 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8063
8064 /* send unload done to the MCP */
8065 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8066
8067 /* restore our func and fw_seq */
8068 bp->func = func;
8069 bp->fw_seq =
8070 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8071 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8072
8073 } else
8074 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8075 }
8076}
8077
8078static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8079{
8080 u32 val, val2, val3, val4, id;
72ce58c3 8081 u16 pmc;
34f80b04
EG
8082
8083 /* Get the chip revision id and number. */
8084 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8085 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8086 id = ((val & 0xffff) << 16);
8087 val = REG_RD(bp, MISC_REG_CHIP_REV);
8088 id |= ((val & 0xf) << 12);
8089 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8090 id |= ((val & 0xff) << 4);
5a40e08e 8091 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8092 id |= (val & 0xf);
8093 bp->common.chip_id = id;
8094 bp->link_params.chip_id = bp->common.chip_id;
8095 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8096
1c06328c
EG
8097 val = (REG_RD(bp, 0x2874) & 0x55);
8098 if ((bp->common.chip_id & 0x1) ||
8099 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8100 bp->flags |= ONE_PORT_FLAG;
8101 BNX2X_DEV_INFO("single port device\n");
8102 }
8103
34f80b04
EG
8104 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8105 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8106 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8107 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8108 bp->common.flash_size, bp->common.flash_size);
8109
8110 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8111 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8112 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8113 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8114 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8115
8116 if (!bp->common.shmem_base ||
8117 (bp->common.shmem_base < 0xA0000) ||
8118 (bp->common.shmem_base >= 0xC0000)) {
8119 BNX2X_DEV_INFO("MCP not active\n");
8120 bp->flags |= NO_MCP_FLAG;
8121 return;
8122 }
8123
8124 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8125 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8126 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8127 BNX2X_ERR("BAD MCP validity signature\n");
8128
8129 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8130 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8131
8132 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8133 SHARED_HW_CFG_LED_MODE_MASK) >>
8134 SHARED_HW_CFG_LED_MODE_SHIFT);
8135
c2c8b03e
EG
8136 bp->link_params.feature_config_flags = 0;
8137 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8138 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8139 bp->link_params.feature_config_flags |=
8140 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8141 else
8142 bp->link_params.feature_config_flags &=
8143 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8144
34f80b04
EG
8145 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8146 bp->common.bc_ver = val;
8147 BNX2X_DEV_INFO("bc_ver %X\n", val);
8148 if (val < BNX2X_BC_VER) {
8149 /* for now only warn
8150 * later we might need to enforce this */
8151 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8152 " please upgrade BC\n", BNX2X_BC_VER, val);
8153 }
4d295db0
EG
8154 bp->link_params.feature_config_flags |=
8155 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8156 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8157
8158 if (BP_E1HVN(bp) == 0) {
8159 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8160 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8161 } else {
8162 /* no WOL capability for E1HVN != 0 */
8163 bp->flags |= NO_WOL_FLAG;
8164 }
8165 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8166 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8167
8168 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8169 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8170 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8171 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8172
8173 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8174 val, val2, val3, val4);
8175}
8176
8177static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8178 u32 switch_cfg)
a2fbb9ea 8179{
34f80b04 8180 int port = BP_PORT(bp);
a2fbb9ea
ET
8181 u32 ext_phy_type;
8182
a2fbb9ea
ET
8183 switch (switch_cfg) {
8184 case SWITCH_CFG_1G:
8185 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8186
c18487ee
YR
8187 ext_phy_type =
8188 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8189 switch (ext_phy_type) {
8190 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8191 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8192 ext_phy_type);
8193
34f80b04
EG
8194 bp->port.supported |= (SUPPORTED_10baseT_Half |
8195 SUPPORTED_10baseT_Full |
8196 SUPPORTED_100baseT_Half |
8197 SUPPORTED_100baseT_Full |
8198 SUPPORTED_1000baseT_Full |
8199 SUPPORTED_2500baseX_Full |
8200 SUPPORTED_TP |
8201 SUPPORTED_FIBRE |
8202 SUPPORTED_Autoneg |
8203 SUPPORTED_Pause |
8204 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8205 break;
8206
8207 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8208 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8209 ext_phy_type);
8210
34f80b04
EG
8211 bp->port.supported |= (SUPPORTED_10baseT_Half |
8212 SUPPORTED_10baseT_Full |
8213 SUPPORTED_100baseT_Half |
8214 SUPPORTED_100baseT_Full |
8215 SUPPORTED_1000baseT_Full |
8216 SUPPORTED_TP |
8217 SUPPORTED_FIBRE |
8218 SUPPORTED_Autoneg |
8219 SUPPORTED_Pause |
8220 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8221 break;
8222
8223 default:
8224 BNX2X_ERR("NVRAM config error. "
8225 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8226 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8227 return;
8228 }
8229
34f80b04
EG
8230 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8231 port*0x10);
8232 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8233 break;
8234
8235 case SWITCH_CFG_10G:
8236 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8237
c18487ee
YR
8238 ext_phy_type =
8239 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8240 switch (ext_phy_type) {
8241 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8242 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8243 ext_phy_type);
8244
34f80b04
EG
8245 bp->port.supported |= (SUPPORTED_10baseT_Half |
8246 SUPPORTED_10baseT_Full |
8247 SUPPORTED_100baseT_Half |
8248 SUPPORTED_100baseT_Full |
8249 SUPPORTED_1000baseT_Full |
8250 SUPPORTED_2500baseX_Full |
8251 SUPPORTED_10000baseT_Full |
8252 SUPPORTED_TP |
8253 SUPPORTED_FIBRE |
8254 SUPPORTED_Autoneg |
8255 SUPPORTED_Pause |
8256 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8257 break;
8258
589abe3a
EG
8259 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8260 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8261 ext_phy_type);
f1410647 8262
34f80b04 8263 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8264 SUPPORTED_1000baseT_Full |
34f80b04 8265 SUPPORTED_FIBRE |
589abe3a 8266 SUPPORTED_Autoneg |
34f80b04
EG
8267 SUPPORTED_Pause |
8268 SUPPORTED_Asym_Pause);
f1410647
ET
8269 break;
8270
589abe3a
EG
8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8272 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8273 ext_phy_type);
8274
34f80b04 8275 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8276 SUPPORTED_2500baseX_Full |
34f80b04 8277 SUPPORTED_1000baseT_Full |
589abe3a
EG
8278 SUPPORTED_FIBRE |
8279 SUPPORTED_Autoneg |
8280 SUPPORTED_Pause |
8281 SUPPORTED_Asym_Pause);
8282 break;
8283
8284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8285 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8286 ext_phy_type);
8287
8288 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8289 SUPPORTED_FIBRE |
8290 SUPPORTED_Pause |
8291 SUPPORTED_Asym_Pause);
f1410647
ET
8292 break;
8293
589abe3a
EG
8294 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8295 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8296 ext_phy_type);
8297
34f80b04
EG
8298 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8299 SUPPORTED_1000baseT_Full |
8300 SUPPORTED_FIBRE |
34f80b04
EG
8301 SUPPORTED_Pause |
8302 SUPPORTED_Asym_Pause);
f1410647
ET
8303 break;
8304
589abe3a
EG
8305 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8306 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8307 ext_phy_type);
8308
34f80b04 8309 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8310 SUPPORTED_1000baseT_Full |
34f80b04 8311 SUPPORTED_Autoneg |
589abe3a 8312 SUPPORTED_FIBRE |
34f80b04
EG
8313 SUPPORTED_Pause |
8314 SUPPORTED_Asym_Pause);
c18487ee
YR
8315 break;
8316
4d295db0
EG
8317 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8318 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8319 ext_phy_type);
8320
8321 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8322 SUPPORTED_1000baseT_Full |
8323 SUPPORTED_Autoneg |
8324 SUPPORTED_FIBRE |
8325 SUPPORTED_Pause |
8326 SUPPORTED_Asym_Pause);
8327 break;
8328
f1410647
ET
8329 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8330 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8331 ext_phy_type);
8332
34f80b04
EG
8333 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8334 SUPPORTED_TP |
8335 SUPPORTED_Autoneg |
8336 SUPPORTED_Pause |
8337 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8338 break;
8339
28577185
EG
8340 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8341 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8342 ext_phy_type);
8343
8344 bp->port.supported |= (SUPPORTED_10baseT_Half |
8345 SUPPORTED_10baseT_Full |
8346 SUPPORTED_100baseT_Half |
8347 SUPPORTED_100baseT_Full |
8348 SUPPORTED_1000baseT_Full |
8349 SUPPORTED_10000baseT_Full |
8350 SUPPORTED_TP |
8351 SUPPORTED_Autoneg |
8352 SUPPORTED_Pause |
8353 SUPPORTED_Asym_Pause);
8354 break;
8355
c18487ee
YR
8356 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8357 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8358 bp->link_params.ext_phy_config);
8359 break;
8360
a2fbb9ea
ET
8361 default:
8362 BNX2X_ERR("NVRAM config error. "
8363 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8364 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8365 return;
8366 }
8367
34f80b04
EG
8368 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8369 port*0x18);
8370 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8371
a2fbb9ea
ET
8372 break;
8373
8374 default:
8375 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8376 bp->port.link_config);
a2fbb9ea
ET
8377 return;
8378 }
34f80b04 8379 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8380
8381 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8382 if (!(bp->link_params.speed_cap_mask &
8383 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8384 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8385
c18487ee
YR
8386 if (!(bp->link_params.speed_cap_mask &
8387 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8388 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8389
c18487ee
YR
8390 if (!(bp->link_params.speed_cap_mask &
8391 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8392 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8393
c18487ee
YR
8394 if (!(bp->link_params.speed_cap_mask &
8395 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8396 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8397
c18487ee
YR
8398 if (!(bp->link_params.speed_cap_mask &
8399 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8400 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8401 SUPPORTED_1000baseT_Full);
a2fbb9ea 8402
c18487ee
YR
8403 if (!(bp->link_params.speed_cap_mask &
8404 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8405 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8406
c18487ee
YR
8407 if (!(bp->link_params.speed_cap_mask &
8408 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8409 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8410
34f80b04 8411 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8412}
8413
34f80b04 8414static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8415{
c18487ee 8416 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8417
34f80b04 8418 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8419 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8420 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8421 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8422 bp->port.advertising = bp->port.supported;
a2fbb9ea 8423 } else {
c18487ee
YR
8424 u32 ext_phy_type =
8425 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8426
8427 if ((ext_phy_type ==
8428 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8429 (ext_phy_type ==
8430 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8431 /* force 10G, no AN */
c18487ee 8432 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8433 bp->port.advertising =
a2fbb9ea
ET
8434 (ADVERTISED_10000baseT_Full |
8435 ADVERTISED_FIBRE);
8436 break;
8437 }
8438 BNX2X_ERR("NVRAM config error. "
8439 "Invalid link_config 0x%x"
8440 " Autoneg not supported\n",
34f80b04 8441 bp->port.link_config);
a2fbb9ea
ET
8442 return;
8443 }
8444 break;
8445
8446 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8447 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8448 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8449 bp->port.advertising = (ADVERTISED_10baseT_Full |
8450 ADVERTISED_TP);
a2fbb9ea
ET
8451 } else {
8452 BNX2X_ERR("NVRAM config error. "
8453 "Invalid link_config 0x%x"
8454 " speed_cap_mask 0x%x\n",
34f80b04 8455 bp->port.link_config,
c18487ee 8456 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8457 return;
8458 }
8459 break;
8460
8461 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8462 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8463 bp->link_params.req_line_speed = SPEED_10;
8464 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8465 bp->port.advertising = (ADVERTISED_10baseT_Half |
8466 ADVERTISED_TP);
a2fbb9ea
ET
8467 } else {
8468 BNX2X_ERR("NVRAM config error. "
8469 "Invalid link_config 0x%x"
8470 " speed_cap_mask 0x%x\n",
34f80b04 8471 bp->port.link_config,
c18487ee 8472 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8473 return;
8474 }
8475 break;
8476
8477 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8478 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8479 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8480 bp->port.advertising = (ADVERTISED_100baseT_Full |
8481 ADVERTISED_TP);
a2fbb9ea
ET
8482 } else {
8483 BNX2X_ERR("NVRAM config error. "
8484 "Invalid link_config 0x%x"
8485 " speed_cap_mask 0x%x\n",
34f80b04 8486 bp->port.link_config,
c18487ee 8487 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8488 return;
8489 }
8490 break;
8491
8492 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8493 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8494 bp->link_params.req_line_speed = SPEED_100;
8495 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8496 bp->port.advertising = (ADVERTISED_100baseT_Half |
8497 ADVERTISED_TP);
a2fbb9ea
ET
8498 } else {
8499 BNX2X_ERR("NVRAM config error. "
8500 "Invalid link_config 0x%x"
8501 " speed_cap_mask 0x%x\n",
34f80b04 8502 bp->port.link_config,
c18487ee 8503 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8504 return;
8505 }
8506 break;
8507
8508 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8509 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8510 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8511 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8512 ADVERTISED_TP);
a2fbb9ea
ET
8513 } else {
8514 BNX2X_ERR("NVRAM config error. "
8515 "Invalid link_config 0x%x"
8516 " speed_cap_mask 0x%x\n",
34f80b04 8517 bp->port.link_config,
c18487ee 8518 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8519 return;
8520 }
8521 break;
8522
8523 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8524 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8525 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8526 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8527 ADVERTISED_TP);
a2fbb9ea
ET
8528 } else {
8529 BNX2X_ERR("NVRAM config error. "
8530 "Invalid link_config 0x%x"
8531 " speed_cap_mask 0x%x\n",
34f80b04 8532 bp->port.link_config,
c18487ee 8533 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8534 return;
8535 }
8536 break;
8537
8538 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8539 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8540 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8541 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8542 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8543 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8544 ADVERTISED_FIBRE);
a2fbb9ea
ET
8545 } else {
8546 BNX2X_ERR("NVRAM config error. "
8547 "Invalid link_config 0x%x"
8548 " speed_cap_mask 0x%x\n",
34f80b04 8549 bp->port.link_config,
c18487ee 8550 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8551 return;
8552 }
8553 break;
8554
8555 default:
8556 BNX2X_ERR("NVRAM config error. "
8557 "BAD link speed link_config 0x%x\n",
34f80b04 8558 bp->port.link_config);
c18487ee 8559 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8560 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8561 break;
8562 }
a2fbb9ea 8563
34f80b04
EG
8564 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8565 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8566 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8567 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8568 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8569
c18487ee 8570 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8571 " advertising 0x%x\n",
c18487ee
YR
8572 bp->link_params.req_line_speed,
8573 bp->link_params.req_duplex,
34f80b04 8574 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8575}
8576
e665bfda
MC
8577static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8578{
8579 mac_hi = cpu_to_be16(mac_hi);
8580 mac_lo = cpu_to_be32(mac_lo);
8581 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8582 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8583}
8584
34f80b04 8585static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8586{
34f80b04
EG
8587 int port = BP_PORT(bp);
8588 u32 val, val2;
589abe3a 8589 u32 config;
c2c8b03e 8590 u16 i;
01cd4528 8591 u32 ext_phy_type;
a2fbb9ea 8592
c18487ee 8593 bp->link_params.bp = bp;
34f80b04 8594 bp->link_params.port = port;
c18487ee 8595
c18487ee 8596 bp->link_params.lane_config =
a2fbb9ea 8597 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8598 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8599 SHMEM_RD(bp,
8600 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8601 /* BCM8727_NOC => BCM8727 no over current */
8602 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8603 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8604 bp->link_params.ext_phy_config &=
8605 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8606 bp->link_params.ext_phy_config |=
8607 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8608 bp->link_params.feature_config_flags |=
8609 FEATURE_CONFIG_BCM8727_NOC;
8610 }
8611
c18487ee 8612 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8613 SHMEM_RD(bp,
8614 dev_info.port_hw_config[port].speed_capability_mask);
8615
34f80b04 8616 bp->port.link_config =
a2fbb9ea
ET
8617 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8618
c2c8b03e
EG
8619 /* Get the 4 lanes xgxs config rx and tx */
8620 for (i = 0; i < 2; i++) {
8621 val = SHMEM_RD(bp,
8622 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8623 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8624 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8625
8626 val = SHMEM_RD(bp,
8627 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8628 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8629 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8630 }
8631
3ce2c3f9
EG
8632 /* If the device is capable of WoL, set the default state according
8633 * to the HW
8634 */
4d295db0 8635 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8636 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8637 (config & PORT_FEATURE_WOL_ENABLED));
8638
c2c8b03e
EG
8639 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8640 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8641 bp->link_params.lane_config,
8642 bp->link_params.ext_phy_config,
34f80b04 8643 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8644
4d295db0
EG
8645 bp->link_params.switch_cfg |= (bp->port.link_config &
8646 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8647 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8648
8649 bnx2x_link_settings_requested(bp);
8650
01cd4528
EG
8651 /*
8652 * If connected directly, work with the internal PHY, otherwise, work
8653 * with the external PHY
8654 */
8655 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8656 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8657 bp->mdio.prtad = bp->link_params.phy_addr;
8658
8659 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8660 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8661 bp->mdio.prtad =
659bc5c4 8662 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8663
a2fbb9ea
ET
8664 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8665 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8666 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8667 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8668 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8669}
8670
8671static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8672{
8673 int func = BP_FUNC(bp);
8674 u32 val, val2;
8675 int rc = 0;
a2fbb9ea 8676
34f80b04 8677 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8678
34f80b04
EG
8679 bp->e1hov = 0;
8680 bp->e1hmf = 0;
8681 if (CHIP_IS_E1H(bp)) {
8682 bp->mf_config =
8683 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8684
2691d51d 8685 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8686 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8687 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8688 bp->e1hmf = 1;
2691d51d
EG
8689 BNX2X_DEV_INFO("%s function mode\n",
8690 IS_E1HMF(bp) ? "multi" : "single");
8691
8692 if (IS_E1HMF(bp)) {
8693 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8694 e1hov_tag) &
8695 FUNC_MF_CFG_E1HOV_TAG_MASK);
8696 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8697 bp->e1hov = val;
8698 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8699 "(0x%04x)\n",
8700 func, bp->e1hov, bp->e1hov);
8701 } else {
34f80b04
EG
8702 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8703 " aborting\n", func);
8704 rc = -EPERM;
8705 }
2691d51d
EG
8706 } else {
8707 if (BP_E1HVN(bp)) {
8708 BNX2X_ERR("!!! VN %d in single function mode,"
8709 " aborting\n", BP_E1HVN(bp));
8710 rc = -EPERM;
8711 }
34f80b04
EG
8712 }
8713 }
a2fbb9ea 8714
34f80b04
EG
8715 if (!BP_NOMCP(bp)) {
8716 bnx2x_get_port_hwinfo(bp);
8717
8718 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8719 DRV_MSG_SEQ_NUMBER_MASK);
8720 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8721 }
8722
8723 if (IS_E1HMF(bp)) {
8724 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8725 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8726 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8727 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8728 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8729 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8730 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8731 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8732 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8733 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8734 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8735 ETH_ALEN);
8736 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8737 ETH_ALEN);
a2fbb9ea 8738 }
34f80b04
EG
8739
8740 return rc;
a2fbb9ea
ET
8741 }
8742
34f80b04
EG
8743 if (BP_NOMCP(bp)) {
8744 /* only supposed to happen on emulation/FPGA */
33471629 8745 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8746 random_ether_addr(bp->dev->dev_addr);
8747 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8748 }
a2fbb9ea 8749
34f80b04
EG
8750 return rc;
8751}
8752
8753static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8754{
8755 int func = BP_FUNC(bp);
87942b46 8756 int timer_interval;
34f80b04
EG
8757 int rc;
8758
da5a662a
VZ
8759 /* Disable interrupt handling until HW is initialized */
8760 atomic_set(&bp->intr_sem, 1);
e1510706 8761 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8762
34f80b04 8763 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8764
1cf167f2 8765 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8766 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8767
8768 rc = bnx2x_get_hwinfo(bp);
8769
8770 /* need to reset chip if undi was active */
8771 if (!BP_NOMCP(bp))
8772 bnx2x_undi_unload(bp);
8773
8774 if (CHIP_REV_IS_FPGA(bp))
8775 printk(KERN_ERR PFX "FPGA detected\n");
8776
8777 if (BP_NOMCP(bp) && (func == 0))
8778 printk(KERN_ERR PFX
8779 "MCP disabled, must load devices in order!\n");
8780
555f6c78 8781 /* Set multi queue mode */
8badd27a
EG
8782 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8783 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8784 printk(KERN_ERR PFX
8badd27a 8785 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8786 multi_mode = ETH_RSS_MODE_DISABLED;
8787 }
8788 bp->multi_mode = multi_mode;
8789
8790
7a9b2557
VZ
8791 /* Set TPA flags */
8792 if (disable_tpa) {
8793 bp->flags &= ~TPA_ENABLE_FLAG;
8794 bp->dev->features &= ~NETIF_F_LRO;
8795 } else {
8796 bp->flags |= TPA_ENABLE_FLAG;
8797 bp->dev->features |= NETIF_F_LRO;
8798 }
8799
a18f5128
EG
8800 if (CHIP_IS_E1(bp))
8801 bp->dropless_fc = 0;
8802 else
8803 bp->dropless_fc = dropless_fc;
8804
8d5726c4 8805 bp->mrrs = mrrs;
7a9b2557 8806
34f80b04
EG
8807 bp->tx_ring_size = MAX_TX_AVAIL;
8808 bp->rx_ring_size = MAX_RX_AVAIL;
8809
8810 bp->rx_csum = 1;
34f80b04
EG
8811
8812 bp->tx_ticks = 50;
8813 bp->rx_ticks = 25;
8814
87942b46
EG
8815 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8816 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8817
8818 init_timer(&bp->timer);
8819 bp->timer.expires = jiffies + bp->current_interval;
8820 bp->timer.data = (unsigned long) bp;
8821 bp->timer.function = bnx2x_timer;
8822
8823 return rc;
a2fbb9ea
ET
8824}
8825
8826/*
8827 * ethtool service functions
8828 */
8829
8830/* All ethtool functions called with rtnl_lock */
8831
8832static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8833{
8834 struct bnx2x *bp = netdev_priv(dev);
8835
34f80b04
EG
8836 cmd->supported = bp->port.supported;
8837 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8838
8839 if (netif_carrier_ok(dev)) {
c18487ee
YR
8840 cmd->speed = bp->link_vars.line_speed;
8841 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8842 } else {
c18487ee
YR
8843 cmd->speed = bp->link_params.req_line_speed;
8844 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8845 }
34f80b04
EG
8846 if (IS_E1HMF(bp)) {
8847 u16 vn_max_rate;
8848
8849 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8850 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8851 if (vn_max_rate < cmd->speed)
8852 cmd->speed = vn_max_rate;
8853 }
a2fbb9ea 8854
c18487ee
YR
8855 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8856 u32 ext_phy_type =
8857 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8858
8859 switch (ext_phy_type) {
8860 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8861 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8862 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8863 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8864 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8865 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8866 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8867 cmd->port = PORT_FIBRE;
8868 break;
8869
8870 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8871 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8872 cmd->port = PORT_TP;
8873 break;
8874
c18487ee
YR
8875 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8876 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8877 bp->link_params.ext_phy_config);
8878 break;
8879
f1410647
ET
8880 default:
8881 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8882 bp->link_params.ext_phy_config);
8883 break;
f1410647
ET
8884 }
8885 } else
a2fbb9ea 8886 cmd->port = PORT_TP;
a2fbb9ea 8887
01cd4528 8888 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
8889 cmd->transceiver = XCVR_INTERNAL;
8890
c18487ee 8891 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8892 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8893 else
a2fbb9ea 8894 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8895
8896 cmd->maxtxpkt = 0;
8897 cmd->maxrxpkt = 0;
8898
8899 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8900 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8901 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8902 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8903 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8904 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8905 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8906
8907 return 0;
8908}
8909
8910static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8911{
8912 struct bnx2x *bp = netdev_priv(dev);
8913 u32 advertising;
8914
34f80b04
EG
8915 if (IS_E1HMF(bp))
8916 return 0;
8917
a2fbb9ea
ET
8918 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8919 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8920 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8921 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8922 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8923 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8924 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8925
a2fbb9ea 8926 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8927 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8928 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8929 return -EINVAL;
f1410647 8930 }
a2fbb9ea
ET
8931
8932 /* advertise the requested speed and duplex if supported */
34f80b04 8933 cmd->advertising &= bp->port.supported;
a2fbb9ea 8934
c18487ee
YR
8935 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8936 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8937 bp->port.advertising |= (ADVERTISED_Autoneg |
8938 cmd->advertising);
a2fbb9ea
ET
8939
8940 } else { /* forced speed */
8941 /* advertise the requested speed and duplex if supported */
8942 switch (cmd->speed) {
8943 case SPEED_10:
8944 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8945 if (!(bp->port.supported &
f1410647
ET
8946 SUPPORTED_10baseT_Full)) {
8947 DP(NETIF_MSG_LINK,
8948 "10M full not supported\n");
a2fbb9ea 8949 return -EINVAL;
f1410647 8950 }
a2fbb9ea
ET
8951
8952 advertising = (ADVERTISED_10baseT_Full |
8953 ADVERTISED_TP);
8954 } else {
34f80b04 8955 if (!(bp->port.supported &
f1410647
ET
8956 SUPPORTED_10baseT_Half)) {
8957 DP(NETIF_MSG_LINK,
8958 "10M half not supported\n");
a2fbb9ea 8959 return -EINVAL;
f1410647 8960 }
a2fbb9ea
ET
8961
8962 advertising = (ADVERTISED_10baseT_Half |
8963 ADVERTISED_TP);
8964 }
8965 break;
8966
8967 case SPEED_100:
8968 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8969 if (!(bp->port.supported &
f1410647
ET
8970 SUPPORTED_100baseT_Full)) {
8971 DP(NETIF_MSG_LINK,
8972 "100M full not supported\n");
a2fbb9ea 8973 return -EINVAL;
f1410647 8974 }
a2fbb9ea
ET
8975
8976 advertising = (ADVERTISED_100baseT_Full |
8977 ADVERTISED_TP);
8978 } else {
34f80b04 8979 if (!(bp->port.supported &
f1410647
ET
8980 SUPPORTED_100baseT_Half)) {
8981 DP(NETIF_MSG_LINK,
8982 "100M half not supported\n");
a2fbb9ea 8983 return -EINVAL;
f1410647 8984 }
a2fbb9ea
ET
8985
8986 advertising = (ADVERTISED_100baseT_Half |
8987 ADVERTISED_TP);
8988 }
8989 break;
8990
8991 case SPEED_1000:
f1410647
ET
8992 if (cmd->duplex != DUPLEX_FULL) {
8993 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8994 return -EINVAL;
f1410647 8995 }
a2fbb9ea 8996
34f80b04 8997 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8998 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8999 return -EINVAL;
f1410647 9000 }
a2fbb9ea
ET
9001
9002 advertising = (ADVERTISED_1000baseT_Full |
9003 ADVERTISED_TP);
9004 break;
9005
9006 case SPEED_2500:
f1410647
ET
9007 if (cmd->duplex != DUPLEX_FULL) {
9008 DP(NETIF_MSG_LINK,
9009 "2.5G half not supported\n");
a2fbb9ea 9010 return -EINVAL;
f1410647 9011 }
a2fbb9ea 9012
34f80b04 9013 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9014 DP(NETIF_MSG_LINK,
9015 "2.5G full not supported\n");
a2fbb9ea 9016 return -EINVAL;
f1410647 9017 }
a2fbb9ea 9018
f1410647 9019 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9020 ADVERTISED_TP);
9021 break;
9022
9023 case SPEED_10000:
f1410647
ET
9024 if (cmd->duplex != DUPLEX_FULL) {
9025 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9026 return -EINVAL;
f1410647 9027 }
a2fbb9ea 9028
34f80b04 9029 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9030 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9031 return -EINVAL;
f1410647 9032 }
a2fbb9ea
ET
9033
9034 advertising = (ADVERTISED_10000baseT_Full |
9035 ADVERTISED_FIBRE);
9036 break;
9037
9038 default:
f1410647 9039 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9040 return -EINVAL;
9041 }
9042
c18487ee
YR
9043 bp->link_params.req_line_speed = cmd->speed;
9044 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9045 bp->port.advertising = advertising;
a2fbb9ea
ET
9046 }
9047
c18487ee 9048 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9049 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9050 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9051 bp->port.advertising);
a2fbb9ea 9052
34f80b04 9053 if (netif_running(dev)) {
bb2a0f7a 9054 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9055 bnx2x_link_set(bp);
9056 }
a2fbb9ea
ET
9057
9058 return 0;
9059}
9060
0a64ea57
EG
9061#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9062#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9063
9064static int bnx2x_get_regs_len(struct net_device *dev)
9065{
0a64ea57 9066 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9067 int regdump_len = 0;
0a64ea57
EG
9068 int i;
9069
0a64ea57
EG
9070 if (CHIP_IS_E1(bp)) {
9071 for (i = 0; i < REGS_COUNT; i++)
9072 if (IS_E1_ONLINE(reg_addrs[i].info))
9073 regdump_len += reg_addrs[i].size;
9074
9075 for (i = 0; i < WREGS_COUNT_E1; i++)
9076 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9077 regdump_len += wreg_addrs_e1[i].size *
9078 (1 + wreg_addrs_e1[i].read_regs_count);
9079
9080 } else { /* E1H */
9081 for (i = 0; i < REGS_COUNT; i++)
9082 if (IS_E1H_ONLINE(reg_addrs[i].info))
9083 regdump_len += reg_addrs[i].size;
9084
9085 for (i = 0; i < WREGS_COUNT_E1H; i++)
9086 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9087 regdump_len += wreg_addrs_e1h[i].size *
9088 (1 + wreg_addrs_e1h[i].read_regs_count);
9089 }
9090 regdump_len *= 4;
9091 regdump_len += sizeof(struct dump_hdr);
9092
9093 return regdump_len;
9094}
9095
9096static void bnx2x_get_regs(struct net_device *dev,
9097 struct ethtool_regs *regs, void *_p)
9098{
9099 u32 *p = _p, i, j;
9100 struct bnx2x *bp = netdev_priv(dev);
9101 struct dump_hdr dump_hdr = {0};
9102
9103 regs->version = 0;
9104 memset(p, 0, regs->len);
9105
9106 if (!netif_running(bp->dev))
9107 return;
9108
9109 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9110 dump_hdr.dump_sign = dump_sign_all;
9111 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9112 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9113 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9114 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9115 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9116
9117 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9118 p += dump_hdr.hdr_size + 1;
9119
9120 if (CHIP_IS_E1(bp)) {
9121 for (i = 0; i < REGS_COUNT; i++)
9122 if (IS_E1_ONLINE(reg_addrs[i].info))
9123 for (j = 0; j < reg_addrs[i].size; j++)
9124 *p++ = REG_RD(bp,
9125 reg_addrs[i].addr + j*4);
9126
9127 } else { /* E1H */
9128 for (i = 0; i < REGS_COUNT; i++)
9129 if (IS_E1H_ONLINE(reg_addrs[i].info))
9130 for (j = 0; j < reg_addrs[i].size; j++)
9131 *p++ = REG_RD(bp,
9132 reg_addrs[i].addr + j*4);
9133 }
9134}
9135
0d28e49a
EG
9136#define PHY_FW_VER_LEN 10
9137
9138static void bnx2x_get_drvinfo(struct net_device *dev,
9139 struct ethtool_drvinfo *info)
9140{
9141 struct bnx2x *bp = netdev_priv(dev);
9142 u8 phy_fw_ver[PHY_FW_VER_LEN];
9143
9144 strcpy(info->driver, DRV_MODULE_NAME);
9145 strcpy(info->version, DRV_MODULE_VERSION);
9146
9147 phy_fw_ver[0] = '\0';
9148 if (bp->port.pmf) {
9149 bnx2x_acquire_phy_lock(bp);
9150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9151 (bp->state != BNX2X_STATE_CLOSED),
9152 phy_fw_ver, PHY_FW_VER_LEN);
9153 bnx2x_release_phy_lock(bp);
9154 }
9155
9156 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9157 (bp->common.bc_ver & 0xff0000) >> 16,
9158 (bp->common.bc_ver & 0xff00) >> 8,
9159 (bp->common.bc_ver & 0xff),
9160 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9161 strcpy(info->bus_info, pci_name(bp->pdev));
9162 info->n_stats = BNX2X_NUM_STATS;
9163 info->testinfo_len = BNX2X_NUM_TESTS;
9164 info->eedump_len = bp->common.flash_size;
9165 info->regdump_len = bnx2x_get_regs_len(dev);
9166}
9167
a2fbb9ea
ET
9168static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9169{
9170 struct bnx2x *bp = netdev_priv(dev);
9171
9172 if (bp->flags & NO_WOL_FLAG) {
9173 wol->supported = 0;
9174 wol->wolopts = 0;
9175 } else {
9176 wol->supported = WAKE_MAGIC;
9177 if (bp->wol)
9178 wol->wolopts = WAKE_MAGIC;
9179 else
9180 wol->wolopts = 0;
9181 }
9182 memset(&wol->sopass, 0, sizeof(wol->sopass));
9183}
9184
9185static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9186{
9187 struct bnx2x *bp = netdev_priv(dev);
9188
9189 if (wol->wolopts & ~WAKE_MAGIC)
9190 return -EINVAL;
9191
9192 if (wol->wolopts & WAKE_MAGIC) {
9193 if (bp->flags & NO_WOL_FLAG)
9194 return -EINVAL;
9195
9196 bp->wol = 1;
34f80b04 9197 } else
a2fbb9ea 9198 bp->wol = 0;
34f80b04 9199
a2fbb9ea
ET
9200 return 0;
9201}
9202
9203static u32 bnx2x_get_msglevel(struct net_device *dev)
9204{
9205 struct bnx2x *bp = netdev_priv(dev);
9206
9207 return bp->msglevel;
9208}
9209
9210static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9211{
9212 struct bnx2x *bp = netdev_priv(dev);
9213
9214 if (capable(CAP_NET_ADMIN))
9215 bp->msglevel = level;
9216}
9217
9218static int bnx2x_nway_reset(struct net_device *dev)
9219{
9220 struct bnx2x *bp = netdev_priv(dev);
9221
34f80b04
EG
9222 if (!bp->port.pmf)
9223 return 0;
a2fbb9ea 9224
34f80b04 9225 if (netif_running(dev)) {
bb2a0f7a 9226 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9227 bnx2x_link_set(bp);
9228 }
a2fbb9ea
ET
9229
9230 return 0;
9231}
9232
ab6ad5a4 9233static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9234{
9235 struct bnx2x *bp = netdev_priv(dev);
9236
9237 return bp->link_vars.link_up;
9238}
9239
a2fbb9ea
ET
9240static int bnx2x_get_eeprom_len(struct net_device *dev)
9241{
9242 struct bnx2x *bp = netdev_priv(dev);
9243
34f80b04 9244 return bp->common.flash_size;
a2fbb9ea
ET
9245}
9246
9247static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9248{
34f80b04 9249 int port = BP_PORT(bp);
a2fbb9ea
ET
9250 int count, i;
9251 u32 val = 0;
9252
9253 /* adjust timeout for emulation/FPGA */
9254 count = NVRAM_TIMEOUT_COUNT;
9255 if (CHIP_REV_IS_SLOW(bp))
9256 count *= 100;
9257
9258 /* request access to nvram interface */
9259 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9260 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9261
9262 for (i = 0; i < count*10; i++) {
9263 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9264 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9265 break;
9266
9267 udelay(5);
9268 }
9269
9270 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9271 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9272 return -EBUSY;
9273 }
9274
9275 return 0;
9276}
9277
9278static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9279{
34f80b04 9280 int port = BP_PORT(bp);
a2fbb9ea
ET
9281 int count, i;
9282 u32 val = 0;
9283
9284 /* adjust timeout for emulation/FPGA */
9285 count = NVRAM_TIMEOUT_COUNT;
9286 if (CHIP_REV_IS_SLOW(bp))
9287 count *= 100;
9288
9289 /* relinquish nvram interface */
9290 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9291 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9292
9293 for (i = 0; i < count*10; i++) {
9294 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9295 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9296 break;
9297
9298 udelay(5);
9299 }
9300
9301 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9302 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9303 return -EBUSY;
9304 }
9305
9306 return 0;
9307}
9308
9309static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9310{
9311 u32 val;
9312
9313 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9314
9315 /* enable both bits, even on read */
9316 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9317 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9318 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9319}
9320
9321static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9322{
9323 u32 val;
9324
9325 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9326
9327 /* disable both bits, even after read */
9328 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9329 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9330 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9331}
9332
4781bfad 9333static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9334 u32 cmd_flags)
9335{
f1410647 9336 int count, i, rc;
a2fbb9ea
ET
9337 u32 val;
9338
9339 /* build the command word */
9340 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9341
9342 /* need to clear DONE bit separately */
9343 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9344
9345 /* address of the NVRAM to read from */
9346 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9347 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9348
9349 /* issue a read command */
9350 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9351
9352 /* adjust timeout for emulation/FPGA */
9353 count = NVRAM_TIMEOUT_COUNT;
9354 if (CHIP_REV_IS_SLOW(bp))
9355 count *= 100;
9356
9357 /* wait for completion */
9358 *ret_val = 0;
9359 rc = -EBUSY;
9360 for (i = 0; i < count; i++) {
9361 udelay(5);
9362 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9363
9364 if (val & MCPR_NVM_COMMAND_DONE) {
9365 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9366 /* we read nvram data in cpu order
9367 * but ethtool sees it as an array of bytes
9368 * converting to big-endian will do the work */
4781bfad 9369 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9370 rc = 0;
9371 break;
9372 }
9373 }
9374
9375 return rc;
9376}
9377
9378static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9379 int buf_size)
9380{
9381 int rc;
9382 u32 cmd_flags;
4781bfad 9383 __be32 val;
a2fbb9ea
ET
9384
9385 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9386 DP(BNX2X_MSG_NVM,
c14423fe 9387 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9388 offset, buf_size);
9389 return -EINVAL;
9390 }
9391
34f80b04
EG
9392 if (offset + buf_size > bp->common.flash_size) {
9393 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9394 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9395 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9396 return -EINVAL;
9397 }
9398
9399 /* request access to nvram interface */
9400 rc = bnx2x_acquire_nvram_lock(bp);
9401 if (rc)
9402 return rc;
9403
9404 /* enable access to nvram interface */
9405 bnx2x_enable_nvram_access(bp);
9406
9407 /* read the first word(s) */
9408 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9409 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9410 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9411 memcpy(ret_buf, &val, 4);
9412
9413 /* advance to the next dword */
9414 offset += sizeof(u32);
9415 ret_buf += sizeof(u32);
9416 buf_size -= sizeof(u32);
9417 cmd_flags = 0;
9418 }
9419
9420 if (rc == 0) {
9421 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9422 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9423 memcpy(ret_buf, &val, 4);
9424 }
9425
9426 /* disable access to nvram interface */
9427 bnx2x_disable_nvram_access(bp);
9428 bnx2x_release_nvram_lock(bp);
9429
9430 return rc;
9431}
9432
9433static int bnx2x_get_eeprom(struct net_device *dev,
9434 struct ethtool_eeprom *eeprom, u8 *eebuf)
9435{
9436 struct bnx2x *bp = netdev_priv(dev);
9437 int rc;
9438
2add3acb
EG
9439 if (!netif_running(dev))
9440 return -EAGAIN;
9441
34f80b04 9442 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9443 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9444 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9445 eeprom->len, eeprom->len);
9446
9447 /* parameters already validated in ethtool_get_eeprom */
9448
9449 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9450
9451 return rc;
9452}
9453
9454static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9455 u32 cmd_flags)
9456{
f1410647 9457 int count, i, rc;
a2fbb9ea
ET
9458
9459 /* build the command word */
9460 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9461
9462 /* need to clear DONE bit separately */
9463 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9464
9465 /* write the data */
9466 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9467
9468 /* address of the NVRAM to write to */
9469 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9470 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9471
9472 /* issue the write command */
9473 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9474
9475 /* adjust timeout for emulation/FPGA */
9476 count = NVRAM_TIMEOUT_COUNT;
9477 if (CHIP_REV_IS_SLOW(bp))
9478 count *= 100;
9479
9480 /* wait for completion */
9481 rc = -EBUSY;
9482 for (i = 0; i < count; i++) {
9483 udelay(5);
9484 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9485 if (val & MCPR_NVM_COMMAND_DONE) {
9486 rc = 0;
9487 break;
9488 }
9489 }
9490
9491 return rc;
9492}
9493
f1410647 9494#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9495
9496static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9497 int buf_size)
9498{
9499 int rc;
9500 u32 cmd_flags;
9501 u32 align_offset;
4781bfad 9502 __be32 val;
a2fbb9ea 9503
34f80b04
EG
9504 if (offset + buf_size > bp->common.flash_size) {
9505 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9506 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9507 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9508 return -EINVAL;
9509 }
9510
9511 /* request access to nvram interface */
9512 rc = bnx2x_acquire_nvram_lock(bp);
9513 if (rc)
9514 return rc;
9515
9516 /* enable access to nvram interface */
9517 bnx2x_enable_nvram_access(bp);
9518
9519 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9520 align_offset = (offset & ~0x03);
9521 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9522
9523 if (rc == 0) {
9524 val &= ~(0xff << BYTE_OFFSET(offset));
9525 val |= (*data_buf << BYTE_OFFSET(offset));
9526
9527 /* nvram data is returned as an array of bytes
9528 * convert it back to cpu order */
9529 val = be32_to_cpu(val);
9530
a2fbb9ea
ET
9531 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9532 cmd_flags);
9533 }
9534
9535 /* disable access to nvram interface */
9536 bnx2x_disable_nvram_access(bp);
9537 bnx2x_release_nvram_lock(bp);
9538
9539 return rc;
9540}
9541
9542static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9543 int buf_size)
9544{
9545 int rc;
9546 u32 cmd_flags;
9547 u32 val;
9548 u32 written_so_far;
9549
34f80b04 9550 if (buf_size == 1) /* ethtool */
a2fbb9ea 9551 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9552
9553 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9554 DP(BNX2X_MSG_NVM,
c14423fe 9555 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9556 offset, buf_size);
9557 return -EINVAL;
9558 }
9559
34f80b04
EG
9560 if (offset + buf_size > bp->common.flash_size) {
9561 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9562 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9563 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9564 return -EINVAL;
9565 }
9566
9567 /* request access to nvram interface */
9568 rc = bnx2x_acquire_nvram_lock(bp);
9569 if (rc)
9570 return rc;
9571
9572 /* enable access to nvram interface */
9573 bnx2x_enable_nvram_access(bp);
9574
9575 written_so_far = 0;
9576 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9577 while ((written_so_far < buf_size) && (rc == 0)) {
9578 if (written_so_far == (buf_size - sizeof(u32)))
9579 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9580 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9581 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9582 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9583 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9584
9585 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9586
9587 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9588
9589 /* advance to the next dword */
9590 offset += sizeof(u32);
9591 data_buf += sizeof(u32);
9592 written_so_far += sizeof(u32);
9593 cmd_flags = 0;
9594 }
9595
9596 /* disable access to nvram interface */
9597 bnx2x_disable_nvram_access(bp);
9598 bnx2x_release_nvram_lock(bp);
9599
9600 return rc;
9601}
9602
9603static int bnx2x_set_eeprom(struct net_device *dev,
9604 struct ethtool_eeprom *eeprom, u8 *eebuf)
9605{
9606 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9607 int port = BP_PORT(bp);
9608 int rc = 0;
a2fbb9ea 9609
9f4c9583
EG
9610 if (!netif_running(dev))
9611 return -EAGAIN;
9612
34f80b04 9613 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9614 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9615 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9616 eeprom->len, eeprom->len);
9617
9618 /* parameters already validated in ethtool_set_eeprom */
9619
f57a6025
EG
9620 /* PHY eeprom can be accessed only by the PMF */
9621 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9622 !bp->port.pmf)
9623 return -EINVAL;
9624
9625 if (eeprom->magic == 0x50485950) {
9626 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9627 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9628
f57a6025
EG
9629 bnx2x_acquire_phy_lock(bp);
9630 rc |= bnx2x_link_reset(&bp->link_params,
9631 &bp->link_vars, 0);
9632 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9633 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9634 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9635 MISC_REGISTERS_GPIO_HIGH, port);
9636 bnx2x_release_phy_lock(bp);
9637 bnx2x_link_report(bp);
9638
9639 } else if (eeprom->magic == 0x50485952) {
9640 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9641 if ((bp->state == BNX2X_STATE_OPEN) ||
9642 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9643 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9644 rc |= bnx2x_link_reset(&bp->link_params,
9645 &bp->link_vars, 1);
9646
9647 rc |= bnx2x_phy_init(&bp->link_params,
9648 &bp->link_vars);
4a37fb66 9649 bnx2x_release_phy_lock(bp);
f57a6025
EG
9650 bnx2x_calc_fc_adv(bp);
9651 }
9652 } else if (eeprom->magic == 0x53985943) {
9653 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9654 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9655 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9656 u8 ext_phy_addr =
659bc5c4 9657 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9658
9659 /* DSP Remove Download Mode */
9660 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9661 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9662
f57a6025
EG
9663 bnx2x_acquire_phy_lock(bp);
9664
9665 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9666
9667 /* wait 0.5 sec to allow it to run */
9668 msleep(500);
9669 bnx2x_ext_phy_hw_reset(bp, port);
9670 msleep(500);
9671 bnx2x_release_phy_lock(bp);
9672 }
9673 } else
c18487ee 9674 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9675
9676 return rc;
9677}
9678
9679static int bnx2x_get_coalesce(struct net_device *dev,
9680 struct ethtool_coalesce *coal)
9681{
9682 struct bnx2x *bp = netdev_priv(dev);
9683
9684 memset(coal, 0, sizeof(struct ethtool_coalesce));
9685
9686 coal->rx_coalesce_usecs = bp->rx_ticks;
9687 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9688
9689 return 0;
9690}
9691
ca00392c 9692#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9693static int bnx2x_set_coalesce(struct net_device *dev,
9694 struct ethtool_coalesce *coal)
9695{
9696 struct bnx2x *bp = netdev_priv(dev);
9697
9698 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9699 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9700 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9701
9702 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9703 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9704 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9705
34f80b04 9706 if (netif_running(dev))
a2fbb9ea
ET
9707 bnx2x_update_coalesce(bp);
9708
9709 return 0;
9710}
9711
9712static void bnx2x_get_ringparam(struct net_device *dev,
9713 struct ethtool_ringparam *ering)
9714{
9715 struct bnx2x *bp = netdev_priv(dev);
9716
9717 ering->rx_max_pending = MAX_RX_AVAIL;
9718 ering->rx_mini_max_pending = 0;
9719 ering->rx_jumbo_max_pending = 0;
9720
9721 ering->rx_pending = bp->rx_ring_size;
9722 ering->rx_mini_pending = 0;
9723 ering->rx_jumbo_pending = 0;
9724
9725 ering->tx_max_pending = MAX_TX_AVAIL;
9726 ering->tx_pending = bp->tx_ring_size;
9727}
9728
9729static int bnx2x_set_ringparam(struct net_device *dev,
9730 struct ethtool_ringparam *ering)
9731{
9732 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9733 int rc = 0;
a2fbb9ea
ET
9734
9735 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9736 (ering->tx_pending > MAX_TX_AVAIL) ||
9737 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9738 return -EINVAL;
9739
9740 bp->rx_ring_size = ering->rx_pending;
9741 bp->tx_ring_size = ering->tx_pending;
9742
34f80b04
EG
9743 if (netif_running(dev)) {
9744 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9745 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9746 }
9747
34f80b04 9748 return rc;
a2fbb9ea
ET
9749}
9750
9751static void bnx2x_get_pauseparam(struct net_device *dev,
9752 struct ethtool_pauseparam *epause)
9753{
9754 struct bnx2x *bp = netdev_priv(dev);
9755
356e2385
EG
9756 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9757 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9758 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9759
c0700f90
DM
9760 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9761 BNX2X_FLOW_CTRL_RX);
9762 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9763 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9764
9765 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9766 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9767 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9768}
9769
9770static int bnx2x_set_pauseparam(struct net_device *dev,
9771 struct ethtool_pauseparam *epause)
9772{
9773 struct bnx2x *bp = netdev_priv(dev);
9774
34f80b04
EG
9775 if (IS_E1HMF(bp))
9776 return 0;
9777
a2fbb9ea
ET
9778 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9779 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9780 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9781
c0700f90 9782 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9783
f1410647 9784 if (epause->rx_pause)
c0700f90 9785 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9786
f1410647 9787 if (epause->tx_pause)
c0700f90 9788 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9789
c0700f90
DM
9790 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9791 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9792
c18487ee 9793 if (epause->autoneg) {
34f80b04 9794 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9795 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9796 return -EINVAL;
9797 }
a2fbb9ea 9798
c18487ee 9799 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9800 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9801 }
a2fbb9ea 9802
c18487ee
YR
9803 DP(NETIF_MSG_LINK,
9804 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9805
9806 if (netif_running(dev)) {
bb2a0f7a 9807 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9808 bnx2x_link_set(bp);
9809 }
a2fbb9ea
ET
9810
9811 return 0;
9812}
9813
df0f2343
VZ
9814static int bnx2x_set_flags(struct net_device *dev, u32 data)
9815{
9816 struct bnx2x *bp = netdev_priv(dev);
9817 int changed = 0;
9818 int rc = 0;
9819
9820 /* TPA requires Rx CSUM offloading */
9821 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9822 if (!(dev->features & NETIF_F_LRO)) {
9823 dev->features |= NETIF_F_LRO;
9824 bp->flags |= TPA_ENABLE_FLAG;
9825 changed = 1;
9826 }
9827
9828 } else if (dev->features & NETIF_F_LRO) {
9829 dev->features &= ~NETIF_F_LRO;
9830 bp->flags &= ~TPA_ENABLE_FLAG;
9831 changed = 1;
9832 }
9833
9834 if (changed && netif_running(dev)) {
9835 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9836 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9837 }
9838
9839 return rc;
9840}
9841
a2fbb9ea
ET
9842static u32 bnx2x_get_rx_csum(struct net_device *dev)
9843{
9844 struct bnx2x *bp = netdev_priv(dev);
9845
9846 return bp->rx_csum;
9847}
9848
9849static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9850{
9851 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9852 int rc = 0;
a2fbb9ea
ET
9853
9854 bp->rx_csum = data;
df0f2343
VZ
9855
9856 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9857 TPA'ed packets will be discarded due to wrong TCP CSUM */
9858 if (!data) {
9859 u32 flags = ethtool_op_get_flags(dev);
9860
9861 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9862 }
9863
9864 return rc;
a2fbb9ea
ET
9865}
9866
9867static int bnx2x_set_tso(struct net_device *dev, u32 data)
9868{
755735eb 9869 if (data) {
a2fbb9ea 9870 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9871 dev->features |= NETIF_F_TSO6;
9872 } else {
a2fbb9ea 9873 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9874 dev->features &= ~NETIF_F_TSO6;
9875 }
9876
a2fbb9ea
ET
9877 return 0;
9878}
9879
f3c87cdd 9880static const struct {
a2fbb9ea
ET
9881 char string[ETH_GSTRING_LEN];
9882} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9883 { "register_test (offline)" },
9884 { "memory_test (offline)" },
9885 { "loopback_test (offline)" },
9886 { "nvram_test (online)" },
9887 { "interrupt_test (online)" },
9888 { "link_test (online)" },
d3d4f495 9889 { "idle check (online)" }
a2fbb9ea
ET
9890};
9891
f3c87cdd
YG
9892static int bnx2x_test_registers(struct bnx2x *bp)
9893{
9894 int idx, i, rc = -ENODEV;
9895 u32 wr_val = 0;
9dabc424 9896 int port = BP_PORT(bp);
f3c87cdd
YG
9897 static const struct {
9898 u32 offset0;
9899 u32 offset1;
9900 u32 mask;
9901 } reg_tbl[] = {
9902/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9903 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9904 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9905 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9906 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9907 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9908 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9909 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9910 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9911 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9912/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9913 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9914 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9915 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9916 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9917 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9918 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9919 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9920 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9921 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9922/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9923 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9924 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9925 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9926 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9927 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9928 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9929 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9930 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9931 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9932/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9933 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9934 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9935 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9936 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9937 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9938 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9939
9940 { 0xffffffff, 0, 0x00000000 }
9941 };
9942
9943 if (!netif_running(bp->dev))
9944 return rc;
9945
9946 /* Repeat the test twice:
9947 First by writing 0x00000000, second by writing 0xffffffff */
9948 for (idx = 0; idx < 2; idx++) {
9949
9950 switch (idx) {
9951 case 0:
9952 wr_val = 0;
9953 break;
9954 case 1:
9955 wr_val = 0xffffffff;
9956 break;
9957 }
9958
9959 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9960 u32 offset, mask, save_val, val;
f3c87cdd
YG
9961
9962 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9963 mask = reg_tbl[i].mask;
9964
9965 save_val = REG_RD(bp, offset);
9966
9967 REG_WR(bp, offset, wr_val);
9968 val = REG_RD(bp, offset);
9969
9970 /* Restore the original register's value */
9971 REG_WR(bp, offset, save_val);
9972
9973 /* verify that value is as expected value */
9974 if ((val & mask) != (wr_val & mask))
9975 goto test_reg_exit;
9976 }
9977 }
9978
9979 rc = 0;
9980
9981test_reg_exit:
9982 return rc;
9983}
9984
9985static int bnx2x_test_memory(struct bnx2x *bp)
9986{
9987 int i, j, rc = -ENODEV;
9988 u32 val;
9989 static const struct {
9990 u32 offset;
9991 int size;
9992 } mem_tbl[] = {
9993 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9994 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9995 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9996 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9997 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9998 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9999 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10000
10001 { 0xffffffff, 0 }
10002 };
10003 static const struct {
10004 char *name;
10005 u32 offset;
9dabc424
YG
10006 u32 e1_mask;
10007 u32 e1h_mask;
f3c87cdd 10008 } prty_tbl[] = {
9dabc424
YG
10009 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10010 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10011 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10012 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10013 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10014 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10015
10016 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10017 };
10018
10019 if (!netif_running(bp->dev))
10020 return rc;
10021
10022 /* Go through all the memories */
10023 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10024 for (j = 0; j < mem_tbl[i].size; j++)
10025 REG_RD(bp, mem_tbl[i].offset + j*4);
10026
10027 /* Check the parity status */
10028 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10029 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10030 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10031 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10032 DP(NETIF_MSG_HW,
10033 "%s is 0x%x\n", prty_tbl[i].name, val);
10034 goto test_mem_exit;
10035 }
10036 }
10037
10038 rc = 0;
10039
10040test_mem_exit:
10041 return rc;
10042}
10043
f3c87cdd
YG
10044static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10045{
10046 int cnt = 1000;
10047
10048 if (link_up)
10049 while (bnx2x_link_test(bp) && cnt--)
10050 msleep(10);
10051}
10052
10053static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10054{
10055 unsigned int pkt_size, num_pkts, i;
10056 struct sk_buff *skb;
10057 unsigned char *packet;
ca00392c
EG
10058 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10059 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
10060 u16 tx_start_idx, tx_idx;
10061 u16 rx_start_idx, rx_idx;
ca00392c 10062 u16 pkt_prod, bd_prod;
f3c87cdd 10063 struct sw_tx_bd *tx_buf;
ca00392c
EG
10064 struct eth_tx_start_bd *tx_start_bd;
10065 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10066 dma_addr_t mapping;
10067 union eth_rx_cqe *cqe;
10068 u8 cqe_fp_flags;
10069 struct sw_rx_bd *rx_buf;
10070 u16 len;
10071 int rc = -ENODEV;
10072
b5bf9068
EG
10073 /* check the loopback mode */
10074 switch (loopback_mode) {
10075 case BNX2X_PHY_LOOPBACK:
10076 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10077 return -EINVAL;
10078 break;
10079 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10080 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10081 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10082 break;
10083 default:
f3c87cdd 10084 return -EINVAL;
b5bf9068 10085 }
f3c87cdd 10086
b5bf9068
EG
10087 /* prepare the loopback packet */
10088 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10089 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10090 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10091 if (!skb) {
10092 rc = -ENOMEM;
10093 goto test_loopback_exit;
10094 }
10095 packet = skb_put(skb, pkt_size);
10096 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10097 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10098 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10099 for (i = ETH_HLEN; i < pkt_size; i++)
10100 packet[i] = (unsigned char) (i & 0xff);
10101
b5bf9068 10102 /* send the loopback packet */
f3c87cdd 10103 num_pkts = 0;
ca00392c
EG
10104 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10105 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10106
ca00392c
EG
10107 pkt_prod = fp_tx->tx_pkt_prod++;
10108 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10109 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10110 tx_buf->skb = skb;
ca00392c 10111 tx_buf->flags = 0;
f3c87cdd 10112
ca00392c
EG
10113 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10114 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10115 mapping = pci_map_single(bp->pdev, skb->data,
10116 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10117 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10118 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10119 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10120 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10121 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10122 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10123 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10124 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10125
10126 /* turn on parsing and get a BD */
10127 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10128 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10129
10130 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10131
58f4c4cf
EG
10132 wmb();
10133
ca00392c
EG
10134 fp_tx->tx_db.data.prod += 2;
10135 barrier();
10136 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10137
10138 mmiowb();
10139
10140 num_pkts++;
ca00392c 10141 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10142 bp->dev->trans_start = jiffies;
10143
10144 udelay(100);
10145
ca00392c 10146 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10147 if (tx_idx != tx_start_idx + num_pkts)
10148 goto test_loopback_exit;
10149
ca00392c 10150 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10151 if (rx_idx != rx_start_idx + num_pkts)
10152 goto test_loopback_exit;
10153
ca00392c 10154 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10155 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10156 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10157 goto test_loopback_rx_exit;
10158
10159 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10160 if (len != pkt_size)
10161 goto test_loopback_rx_exit;
10162
ca00392c 10163 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10164 skb = rx_buf->skb;
10165 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10166 for (i = ETH_HLEN; i < pkt_size; i++)
10167 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10168 goto test_loopback_rx_exit;
10169
10170 rc = 0;
10171
10172test_loopback_rx_exit:
f3c87cdd 10173
ca00392c
EG
10174 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10175 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10176 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10177 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10178
10179 /* Update producers */
ca00392c
EG
10180 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10181 fp_rx->rx_sge_prod);
f3c87cdd
YG
10182
10183test_loopback_exit:
10184 bp->link_params.loopback_mode = LOOPBACK_NONE;
10185
10186 return rc;
10187}
10188
10189static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10190{
b5bf9068 10191 int rc = 0, res;
f3c87cdd
YG
10192
10193 if (!netif_running(bp->dev))
10194 return BNX2X_LOOPBACK_FAILED;
10195
f8ef6e44 10196 bnx2x_netif_stop(bp, 1);
3910c8ae 10197 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10198
b5bf9068
EG
10199 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10200 if (res) {
10201 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10202 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10203 }
10204
b5bf9068
EG
10205 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10206 if (res) {
10207 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10208 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10209 }
10210
3910c8ae 10211 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10212 bnx2x_netif_start(bp);
10213
10214 return rc;
10215}
10216
10217#define CRC32_RESIDUAL 0xdebb20e3
10218
10219static int bnx2x_test_nvram(struct bnx2x *bp)
10220{
10221 static const struct {
10222 int offset;
10223 int size;
10224 } nvram_tbl[] = {
10225 { 0, 0x14 }, /* bootstrap */
10226 { 0x14, 0xec }, /* dir */
10227 { 0x100, 0x350 }, /* manuf_info */
10228 { 0x450, 0xf0 }, /* feature_info */
10229 { 0x640, 0x64 }, /* upgrade_key_info */
10230 { 0x6a4, 0x64 },
10231 { 0x708, 0x70 }, /* manuf_key_info */
10232 { 0x778, 0x70 },
10233 { 0, 0 }
10234 };
4781bfad 10235 __be32 buf[0x350 / 4];
f3c87cdd
YG
10236 u8 *data = (u8 *)buf;
10237 int i, rc;
ab6ad5a4 10238 u32 magic, crc;
f3c87cdd
YG
10239
10240 rc = bnx2x_nvram_read(bp, 0, data, 4);
10241 if (rc) {
f5372251 10242 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10243 goto test_nvram_exit;
10244 }
10245
10246 magic = be32_to_cpu(buf[0]);
10247 if (magic != 0x669955aa) {
10248 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10249 rc = -ENODEV;
10250 goto test_nvram_exit;
10251 }
10252
10253 for (i = 0; nvram_tbl[i].size; i++) {
10254
10255 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10256 nvram_tbl[i].size);
10257 if (rc) {
10258 DP(NETIF_MSG_PROBE,
f5372251 10259 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10260 goto test_nvram_exit;
10261 }
10262
ab6ad5a4
EG
10263 crc = ether_crc_le(nvram_tbl[i].size, data);
10264 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10265 DP(NETIF_MSG_PROBE,
ab6ad5a4 10266 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10267 rc = -ENODEV;
10268 goto test_nvram_exit;
10269 }
10270 }
10271
10272test_nvram_exit:
10273 return rc;
10274}
10275
10276static int bnx2x_test_intr(struct bnx2x *bp)
10277{
10278 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10279 int i, rc;
10280
10281 if (!netif_running(bp->dev))
10282 return -ENODEV;
10283
8d9c5f34 10284 config->hdr.length = 0;
af246401
EG
10285 if (CHIP_IS_E1(bp))
10286 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10287 else
10288 config->hdr.offset = BP_FUNC(bp);
0626b899 10289 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10290 config->hdr.reserved1 = 0;
10291
e665bfda
MC
10292 bp->set_mac_pending++;
10293 smp_wmb();
f3c87cdd
YG
10294 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10295 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10296 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10297 if (rc == 0) {
f3c87cdd
YG
10298 for (i = 0; i < 10; i++) {
10299 if (!bp->set_mac_pending)
10300 break;
e665bfda 10301 smp_rmb();
f3c87cdd
YG
10302 msleep_interruptible(10);
10303 }
10304 if (i == 10)
10305 rc = -ENODEV;
10306 }
10307
10308 return rc;
10309}
10310
a2fbb9ea
ET
10311static void bnx2x_self_test(struct net_device *dev,
10312 struct ethtool_test *etest, u64 *buf)
10313{
10314 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10315
10316 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10317
f3c87cdd 10318 if (!netif_running(dev))
a2fbb9ea 10319 return;
a2fbb9ea 10320
33471629 10321 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10322 if (IS_E1HMF(bp))
10323 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10324
10325 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10326 int port = BP_PORT(bp);
10327 u32 val;
f3c87cdd
YG
10328 u8 link_up;
10329
279abdf5
EG
10330 /* save current value of input enable for TX port IF */
10331 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10332 /* disable input for TX port IF */
10333 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10334
f3c87cdd
YG
10335 link_up = bp->link_vars.link_up;
10336 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10337 bnx2x_nic_load(bp, LOAD_DIAG);
10338 /* wait until link state is restored */
10339 bnx2x_wait_for_link(bp, link_up);
10340
10341 if (bnx2x_test_registers(bp) != 0) {
10342 buf[0] = 1;
10343 etest->flags |= ETH_TEST_FL_FAILED;
10344 }
10345 if (bnx2x_test_memory(bp) != 0) {
10346 buf[1] = 1;
10347 etest->flags |= ETH_TEST_FL_FAILED;
10348 }
10349 buf[2] = bnx2x_test_loopback(bp, link_up);
10350 if (buf[2] != 0)
10351 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10352
f3c87cdd 10353 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10354
10355 /* restore input for TX port IF */
10356 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10357
f3c87cdd
YG
10358 bnx2x_nic_load(bp, LOAD_NORMAL);
10359 /* wait until link state is restored */
10360 bnx2x_wait_for_link(bp, link_up);
10361 }
10362 if (bnx2x_test_nvram(bp) != 0) {
10363 buf[3] = 1;
a2fbb9ea
ET
10364 etest->flags |= ETH_TEST_FL_FAILED;
10365 }
f3c87cdd
YG
10366 if (bnx2x_test_intr(bp) != 0) {
10367 buf[4] = 1;
10368 etest->flags |= ETH_TEST_FL_FAILED;
10369 }
10370 if (bp->port.pmf)
10371 if (bnx2x_link_test(bp) != 0) {
10372 buf[5] = 1;
10373 etest->flags |= ETH_TEST_FL_FAILED;
10374 }
f3c87cdd
YG
10375
10376#ifdef BNX2X_EXTRA_DEBUG
10377 bnx2x_panic_dump(bp);
10378#endif
a2fbb9ea
ET
10379}
10380
de832a55
EG
10381static const struct {
10382 long offset;
10383 int size;
10384 u8 string[ETH_GSTRING_LEN];
10385} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10386/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10387 { Q_STATS_OFFSET32(error_bytes_received_hi),
10388 8, "[%d]: rx_error_bytes" },
10389 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10390 8, "[%d]: rx_ucast_packets" },
10391 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10392 8, "[%d]: rx_mcast_packets" },
10393 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10394 8, "[%d]: rx_bcast_packets" },
10395 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10396 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10397 4, "[%d]: rx_phy_ip_err_discards"},
10398 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10399 4, "[%d]: rx_skb_alloc_discard" },
10400 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10401
10402/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10403 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10404 8, "[%d]: tx_packets" }
10405};
10406
bb2a0f7a
YG
10407static const struct {
10408 long offset;
10409 int size;
10410 u32 flags;
66e855f3
YG
10411#define STATS_FLAGS_PORT 1
10412#define STATS_FLAGS_FUNC 2
de832a55 10413#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10414 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10415} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10416/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10417 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10418 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10419 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10420 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10421 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10422 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10423 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10424 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10425 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10426 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10427 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10428 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10429 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10430 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10431 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10432 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10433 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10434/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10435 8, STATS_FLAGS_PORT, "rx_fragments" },
10436 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10437 8, STATS_FLAGS_PORT, "rx_jabbers" },
10438 { STATS_OFFSET32(no_buff_discard_hi),
10439 8, STATS_FLAGS_BOTH, "rx_discards" },
10440 { STATS_OFFSET32(mac_filter_discard),
10441 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10442 { STATS_OFFSET32(xxoverflow_discard),
10443 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10444 { STATS_OFFSET32(brb_drop_hi),
10445 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10446 { STATS_OFFSET32(brb_truncate_hi),
10447 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10448 { STATS_OFFSET32(pause_frames_received_hi),
10449 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10450 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10451 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10452 { STATS_OFFSET32(nig_timer_max),
10453 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10454/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10455 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10456 { STATS_OFFSET32(rx_skb_alloc_failed),
10457 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10458 { STATS_OFFSET32(hw_csum_err),
10459 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10460
10461 { STATS_OFFSET32(total_bytes_transmitted_hi),
10462 8, STATS_FLAGS_BOTH, "tx_bytes" },
10463 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10464 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10465 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10466 8, STATS_FLAGS_BOTH, "tx_packets" },
10467 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10468 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10469 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10470 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10471 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10472 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10473 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10474 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10475/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10476 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10477 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10478 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10479 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10480 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10481 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10482 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10483 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10484 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10485 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10486 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10487 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10488 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10489 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10490 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10491 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10492 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10493 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10494 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10495/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10496 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10497 { STATS_OFFSET32(pause_frames_sent_hi),
10498 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10499};
10500
de832a55
EG
10501#define IS_PORT_STAT(i) \
10502 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10503#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10504#define IS_E1HMF_MODE_STAT(bp) \
10505 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10506
15f0a394
BH
10507static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10508{
10509 struct bnx2x *bp = netdev_priv(dev);
10510 int i, num_stats;
10511
10512 switch(stringset) {
10513 case ETH_SS_STATS:
10514 if (is_multi(bp)) {
10515 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10516 if (!IS_E1HMF_MODE_STAT(bp))
10517 num_stats += BNX2X_NUM_STATS;
10518 } else {
10519 if (IS_E1HMF_MODE_STAT(bp)) {
10520 num_stats = 0;
10521 for (i = 0; i < BNX2X_NUM_STATS; i++)
10522 if (IS_FUNC_STAT(i))
10523 num_stats++;
10524 } else
10525 num_stats = BNX2X_NUM_STATS;
10526 }
10527 return num_stats;
10528
10529 case ETH_SS_TEST:
10530 return BNX2X_NUM_TESTS;
10531
10532 default:
10533 return -EINVAL;
10534 }
10535}
10536
a2fbb9ea
ET
10537static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10538{
bb2a0f7a 10539 struct bnx2x *bp = netdev_priv(dev);
de832a55 10540 int i, j, k;
bb2a0f7a 10541
a2fbb9ea
ET
10542 switch (stringset) {
10543 case ETH_SS_STATS:
de832a55
EG
10544 if (is_multi(bp)) {
10545 k = 0;
ca00392c 10546 for_each_rx_queue(bp, i) {
de832a55
EG
10547 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10548 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10549 bnx2x_q_stats_arr[j].string, i);
10550 k += BNX2X_NUM_Q_STATS;
10551 }
10552 if (IS_E1HMF_MODE_STAT(bp))
10553 break;
10554 for (j = 0; j < BNX2X_NUM_STATS; j++)
10555 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10556 bnx2x_stats_arr[j].string);
10557 } else {
10558 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10559 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10560 continue;
10561 strcpy(buf + j*ETH_GSTRING_LEN,
10562 bnx2x_stats_arr[i].string);
10563 j++;
10564 }
bb2a0f7a 10565 }
a2fbb9ea
ET
10566 break;
10567
10568 case ETH_SS_TEST:
10569 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10570 break;
10571 }
10572}
10573
a2fbb9ea
ET
10574static void bnx2x_get_ethtool_stats(struct net_device *dev,
10575 struct ethtool_stats *stats, u64 *buf)
10576{
10577 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10578 u32 *hw_stats, *offset;
10579 int i, j, k;
bb2a0f7a 10580
de832a55
EG
10581 if (is_multi(bp)) {
10582 k = 0;
ca00392c 10583 for_each_rx_queue(bp, i) {
de832a55
EG
10584 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10585 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10586 if (bnx2x_q_stats_arr[j].size == 0) {
10587 /* skip this counter */
10588 buf[k + j] = 0;
10589 continue;
10590 }
10591 offset = (hw_stats +
10592 bnx2x_q_stats_arr[j].offset);
10593 if (bnx2x_q_stats_arr[j].size == 4) {
10594 /* 4-byte counter */
10595 buf[k + j] = (u64) *offset;
10596 continue;
10597 }
10598 /* 8-byte counter */
10599 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10600 }
10601 k += BNX2X_NUM_Q_STATS;
10602 }
10603 if (IS_E1HMF_MODE_STAT(bp))
10604 return;
10605 hw_stats = (u32 *)&bp->eth_stats;
10606 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10607 if (bnx2x_stats_arr[j].size == 0) {
10608 /* skip this counter */
10609 buf[k + j] = 0;
10610 continue;
10611 }
10612 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10613 if (bnx2x_stats_arr[j].size == 4) {
10614 /* 4-byte counter */
10615 buf[k + j] = (u64) *offset;
10616 continue;
10617 }
10618 /* 8-byte counter */
10619 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10620 }
de832a55
EG
10621 } else {
10622 hw_stats = (u32 *)&bp->eth_stats;
10623 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10624 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10625 continue;
10626 if (bnx2x_stats_arr[i].size == 0) {
10627 /* skip this counter */
10628 buf[j] = 0;
10629 j++;
10630 continue;
10631 }
10632 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10633 if (bnx2x_stats_arr[i].size == 4) {
10634 /* 4-byte counter */
10635 buf[j] = (u64) *offset;
10636 j++;
10637 continue;
10638 }
10639 /* 8-byte counter */
10640 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10641 j++;
a2fbb9ea 10642 }
a2fbb9ea
ET
10643 }
10644}
10645
10646static int bnx2x_phys_id(struct net_device *dev, u32 data)
10647{
10648 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10649 int port = BP_PORT(bp);
a2fbb9ea
ET
10650 int i;
10651
34f80b04
EG
10652 if (!netif_running(dev))
10653 return 0;
10654
10655 if (!bp->port.pmf)
10656 return 0;
10657
a2fbb9ea
ET
10658 if (data == 0)
10659 data = 2;
10660
10661 for (i = 0; i < (data * 2); i++) {
c18487ee 10662 if ((i % 2) == 0)
34f80b04 10663 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10664 bp->link_params.hw_led_mode,
10665 bp->link_params.chip_id);
10666 else
34f80b04 10667 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10668 bp->link_params.hw_led_mode,
10669 bp->link_params.chip_id);
10670
a2fbb9ea
ET
10671 msleep_interruptible(500);
10672 if (signal_pending(current))
10673 break;
10674 }
10675
c18487ee 10676 if (bp->link_vars.link_up)
34f80b04 10677 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10678 bp->link_vars.line_speed,
10679 bp->link_params.hw_led_mode,
10680 bp->link_params.chip_id);
a2fbb9ea
ET
10681
10682 return 0;
10683}
10684
0fc0b732 10685static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10686 .get_settings = bnx2x_get_settings,
10687 .set_settings = bnx2x_set_settings,
10688 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10689 .get_regs_len = bnx2x_get_regs_len,
10690 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10691 .get_wol = bnx2x_get_wol,
10692 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10693 .get_msglevel = bnx2x_get_msglevel,
10694 .set_msglevel = bnx2x_set_msglevel,
10695 .nway_reset = bnx2x_nway_reset,
01e53298 10696 .get_link = bnx2x_get_link,
7a9b2557
VZ
10697 .get_eeprom_len = bnx2x_get_eeprom_len,
10698 .get_eeprom = bnx2x_get_eeprom,
10699 .set_eeprom = bnx2x_set_eeprom,
10700 .get_coalesce = bnx2x_get_coalesce,
10701 .set_coalesce = bnx2x_set_coalesce,
10702 .get_ringparam = bnx2x_get_ringparam,
10703 .set_ringparam = bnx2x_set_ringparam,
10704 .get_pauseparam = bnx2x_get_pauseparam,
10705 .set_pauseparam = bnx2x_set_pauseparam,
10706 .get_rx_csum = bnx2x_get_rx_csum,
10707 .set_rx_csum = bnx2x_set_rx_csum,
10708 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10709 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10710 .set_flags = bnx2x_set_flags,
10711 .get_flags = ethtool_op_get_flags,
10712 .get_sg = ethtool_op_get_sg,
10713 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10714 .get_tso = ethtool_op_get_tso,
10715 .set_tso = bnx2x_set_tso,
7a9b2557 10716 .self_test = bnx2x_self_test,
15f0a394 10717 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10718 .get_strings = bnx2x_get_strings,
a2fbb9ea 10719 .phys_id = bnx2x_phys_id,
bb2a0f7a 10720 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10721};
10722
10723/* end of ethtool_ops */
10724
10725/****************************************************************************
10726* General service functions
10727****************************************************************************/
10728
10729static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10730{
10731 u16 pmcsr;
10732
10733 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10734
10735 switch (state) {
10736 case PCI_D0:
34f80b04 10737 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10738 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10739 PCI_PM_CTRL_PME_STATUS));
10740
10741 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10742 /* delay required during transition out of D3hot */
a2fbb9ea 10743 msleep(20);
34f80b04 10744 break;
a2fbb9ea 10745
34f80b04
EG
10746 case PCI_D3hot:
10747 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10748 pmcsr |= 3;
a2fbb9ea 10749
34f80b04
EG
10750 if (bp->wol)
10751 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10752
34f80b04
EG
10753 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10754 pmcsr);
a2fbb9ea 10755
34f80b04
EG
10756 /* No more memory access after this point until
10757 * device is brought back to D0.
10758 */
10759 break;
10760
10761 default:
10762 return -EINVAL;
10763 }
10764 return 0;
a2fbb9ea
ET
10765}
10766
237907c1
EG
10767static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10768{
10769 u16 rx_cons_sb;
10770
10771 /* Tell compiler that status block fields can change */
10772 barrier();
10773 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10774 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10775 rx_cons_sb++;
10776 return (fp->rx_comp_cons != rx_cons_sb);
10777}
10778
34f80b04
EG
10779/*
10780 * net_device service functions
10781 */
10782
a2fbb9ea
ET
10783static int bnx2x_poll(struct napi_struct *napi, int budget)
10784{
10785 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10786 napi);
10787 struct bnx2x *bp = fp->bp;
10788 int work_done = 0;
10789
10790#ifdef BNX2X_STOP_ON_ERROR
10791 if (unlikely(bp->panic))
34f80b04 10792 goto poll_panic;
a2fbb9ea
ET
10793#endif
10794
a2fbb9ea
ET
10795 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10796 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10797
10798 bnx2x_update_fpsb_idx(fp);
10799
8534f32c 10800 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10801 work_done = bnx2x_rx_int(fp, budget);
356e2385 10802
8534f32c
EG
10803 /* must not complete if we consumed full budget */
10804 if (work_done >= budget)
10805 goto poll_again;
10806 }
a2fbb9ea 10807
ca00392c 10808 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10809 * ensure that status block indices have been actually read
ca00392c 10810 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10811 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10812 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10813 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10814 * may be postponed to right before bnx2x_ack_sb). In this case
10815 * there will never be another interrupt until there is another update
10816 * of the status block, while there is still unhandled work.
10817 */
10818 rmb();
a2fbb9ea 10819
ca00392c 10820 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10821#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10822poll_panic:
a2fbb9ea 10823#endif
288379f0 10824 napi_complete(napi);
a2fbb9ea 10825
0626b899 10826 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10827 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10828 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10829 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10830 }
356e2385 10831
8534f32c 10832poll_again:
a2fbb9ea
ET
10833 return work_done;
10834}
10835
755735eb
EG
10836
10837/* we split the first BD into headers and data BDs
33471629 10838 * to ease the pain of our fellow microcode engineers
755735eb
EG
10839 * we use one mapping for both BDs
10840 * So far this has only been observed to happen
10841 * in Other Operating Systems(TM)
10842 */
10843static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10844 struct bnx2x_fastpath *fp,
ca00392c
EG
10845 struct sw_tx_bd *tx_buf,
10846 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10847 u16 bd_prod, int nbd)
10848{
ca00392c 10849 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10850 struct eth_tx_bd *d_tx_bd;
10851 dma_addr_t mapping;
10852 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10853
10854 /* first fix first BD */
10855 h_tx_bd->nbd = cpu_to_le16(nbd);
10856 h_tx_bd->nbytes = cpu_to_le16(hlen);
10857
10858 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10859 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10860 h_tx_bd->addr_lo, h_tx_bd->nbd);
10861
10862 /* now get a new data BD
10863 * (after the pbd) and fill it */
10864 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10865 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10866
10867 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10868 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10869
10870 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10871 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10872 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10873
10874 /* this marks the BD as one that has no individual mapping */
10875 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10876
755735eb
EG
10877 DP(NETIF_MSG_TX_QUEUED,
10878 "TSO split data size is %d (%x:%x)\n",
10879 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10880
ca00392c
EG
10881 /* update tx_bd */
10882 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10883
10884 return bd_prod;
10885}
10886
10887static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10888{
10889 if (fix > 0)
10890 csum = (u16) ~csum_fold(csum_sub(csum,
10891 csum_partial(t_header - fix, fix, 0)));
10892
10893 else if (fix < 0)
10894 csum = (u16) ~csum_fold(csum_add(csum,
10895 csum_partial(t_header, -fix, 0)));
10896
10897 return swab16(csum);
10898}
10899
10900static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10901{
10902 u32 rc;
10903
10904 if (skb->ip_summed != CHECKSUM_PARTIAL)
10905 rc = XMIT_PLAIN;
10906
10907 else {
4781bfad 10908 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10909 rc = XMIT_CSUM_V6;
10910 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10911 rc |= XMIT_CSUM_TCP;
10912
10913 } else {
10914 rc = XMIT_CSUM_V4;
10915 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10916 rc |= XMIT_CSUM_TCP;
10917 }
10918 }
10919
10920 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10921 rc |= XMIT_GSO_V4;
10922
10923 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10924 rc |= XMIT_GSO_V6;
10925
10926 return rc;
10927}
10928
632da4d6 10929#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10930/* check if packet requires linearization (packet is too fragmented)
10931 no need to check fragmentation if page size > 8K (there will be no
10932 violation to FW restrictions) */
755735eb
EG
10933static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10934 u32 xmit_type)
10935{
10936 int to_copy = 0;
10937 int hlen = 0;
10938 int first_bd_sz = 0;
10939
10940 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10941 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10942
10943 if (xmit_type & XMIT_GSO) {
10944 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10945 /* Check if LSO packet needs to be copied:
10946 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10947 int wnd_size = MAX_FETCH_BD - 3;
33471629 10948 /* Number of windows to check */
755735eb
EG
10949 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10950 int wnd_idx = 0;
10951 int frag_idx = 0;
10952 u32 wnd_sum = 0;
10953
10954 /* Headers length */
10955 hlen = (int)(skb_transport_header(skb) - skb->data) +
10956 tcp_hdrlen(skb);
10957
10958 /* Amount of data (w/o headers) on linear part of SKB*/
10959 first_bd_sz = skb_headlen(skb) - hlen;
10960
10961 wnd_sum = first_bd_sz;
10962
10963 /* Calculate the first sum - it's special */
10964 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10965 wnd_sum +=
10966 skb_shinfo(skb)->frags[frag_idx].size;
10967
10968 /* If there was data on linear skb data - check it */
10969 if (first_bd_sz > 0) {
10970 if (unlikely(wnd_sum < lso_mss)) {
10971 to_copy = 1;
10972 goto exit_lbl;
10973 }
10974
10975 wnd_sum -= first_bd_sz;
10976 }
10977
10978 /* Others are easier: run through the frag list and
10979 check all windows */
10980 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10981 wnd_sum +=
10982 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10983
10984 if (unlikely(wnd_sum < lso_mss)) {
10985 to_copy = 1;
10986 break;
10987 }
10988 wnd_sum -=
10989 skb_shinfo(skb)->frags[wnd_idx].size;
10990 }
755735eb
EG
10991 } else {
10992 /* in non-LSO too fragmented packet should always
10993 be linearized */
10994 to_copy = 1;
10995 }
10996 }
10997
10998exit_lbl:
10999 if (unlikely(to_copy))
11000 DP(NETIF_MSG_TX_QUEUED,
11001 "Linearization IS REQUIRED for %s packet. "
11002 "num_frags %d hlen %d first_bd_sz %d\n",
11003 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11004 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11005
11006 return to_copy;
11007}
632da4d6 11008#endif
755735eb
EG
11009
11010/* called with netif_tx_lock
a2fbb9ea 11011 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11012 * netif_wake_queue()
a2fbb9ea 11013 */
61357325 11014static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11015{
11016 struct bnx2x *bp = netdev_priv(dev);
ca00392c 11017 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 11018 struct netdev_queue *txq;
a2fbb9ea 11019 struct sw_tx_bd *tx_buf;
ca00392c
EG
11020 struct eth_tx_start_bd *tx_start_bd;
11021 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11022 struct eth_tx_parse_bd *pbd = NULL;
11023 u16 pkt_prod, bd_prod;
755735eb 11024 int nbd, fp_index;
a2fbb9ea 11025 dma_addr_t mapping;
755735eb 11026 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11027 int i;
11028 u8 hlen = 0;
ca00392c 11029 __le16 pkt_size = 0;
a2fbb9ea
ET
11030
11031#ifdef BNX2X_STOP_ON_ERROR
11032 if (unlikely(bp->panic))
11033 return NETDEV_TX_BUSY;
11034#endif
11035
555f6c78
EG
11036 fp_index = skb_get_queue_mapping(skb);
11037 txq = netdev_get_tx_queue(dev, fp_index);
11038
ca00392c
EG
11039 fp = &bp->fp[fp_index + bp->num_rx_queues];
11040 fp_stat = &bp->fp[fp_index];
755735eb 11041
231fd58a 11042 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 11043 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 11044 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11045 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11046 return NETDEV_TX_BUSY;
11047 }
11048
755735eb
EG
11049 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11050 " gso type %x xmit_type %x\n",
11051 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11052 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11053
632da4d6 11054#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11055 /* First, check if we need to linearize the skb (due to FW
11056 restrictions). No need to check fragmentation if page size > 8K
11057 (there will be no violation to FW restrictions) */
755735eb
EG
11058 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11059 /* Statistics of linearization */
11060 bp->lin_cnt++;
11061 if (skb_linearize(skb) != 0) {
11062 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11063 "silently dropping this SKB\n");
11064 dev_kfree_skb_any(skb);
da5a662a 11065 return NETDEV_TX_OK;
755735eb
EG
11066 }
11067 }
632da4d6 11068#endif
755735eb 11069
a2fbb9ea 11070 /*
755735eb 11071 Please read carefully. First we use one BD which we mark as start,
ca00392c 11072 then we have a parsing info BD (used for TSO or xsum),
755735eb 11073 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11074 (don't forget to mark the last one as last,
11075 and to unmap only AFTER you write to the BD ...)
755735eb 11076 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11077 */
11078
11079 pkt_prod = fp->tx_pkt_prod++;
755735eb 11080 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11081
755735eb 11082 /* get a tx_buf and first BD */
a2fbb9ea 11083 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11084 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11085
ca00392c
EG
11086 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11087 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11088 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11089 /* header nbd */
ca00392c 11090 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11091
755735eb
EG
11092 /* remember the first BD of the packet */
11093 tx_buf->first_bd = fp->tx_bd_prod;
11094 tx_buf->skb = skb;
ca00392c 11095 tx_buf->flags = 0;
a2fbb9ea
ET
11096
11097 DP(NETIF_MSG_TX_QUEUED,
11098 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11099 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11100
0c6671b0
EG
11101#ifdef BCM_VLAN
11102 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11103 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11104 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11105 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11106 } else
0c6671b0 11107#endif
ca00392c 11108 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11109
ca00392c
EG
11110 /* turn on parsing and get a BD */
11111 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11112 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11113
ca00392c 11114 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11115
11116 if (xmit_type & XMIT_CSUM) {
ca00392c 11117 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11118
11119 /* for now NS flag is not used in Linux */
4781bfad
EG
11120 pbd->global_data =
11121 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11122 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11123
755735eb
EG
11124 pbd->ip_hlen = (skb_transport_header(skb) -
11125 skb_network_header(skb)) / 2;
11126
11127 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11128
755735eb 11129 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11130 hlen = hlen*2;
a2fbb9ea 11131
ca00392c 11132 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11133
11134 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11135 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11136 ETH_TX_BD_FLAGS_IP_CSUM;
11137 else
ca00392c
EG
11138 tx_start_bd->bd_flags.as_bitfield |=
11139 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11140
11141 if (xmit_type & XMIT_CSUM_TCP) {
11142 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11143
11144 } else {
11145 s8 fix = SKB_CS_OFF(skb); /* signed! */
11146
ca00392c 11147 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11148
755735eb 11149 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11150 "hlen %d fix %d csum before fix %x\n",
11151 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11152
11153 /* HW bug: fixup the CSUM */
11154 pbd->tcp_pseudo_csum =
11155 bnx2x_csum_fix(skb_transport_header(skb),
11156 SKB_CS(skb), fix);
11157
11158 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11159 pbd->tcp_pseudo_csum);
11160 }
a2fbb9ea
ET
11161 }
11162
11163 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11164 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11165
ca00392c
EG
11166 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11167 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11168 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11169 tx_start_bd->nbd = cpu_to_le16(nbd);
11170 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11171 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11172
11173 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11174 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11175 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11176 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11177 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11178
755735eb 11179 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11180
11181 DP(NETIF_MSG_TX_QUEUED,
11182 "TSO packet len %d hlen %d total len %d tso size %d\n",
11183 skb->len, hlen, skb_headlen(skb),
11184 skb_shinfo(skb)->gso_size);
11185
ca00392c 11186 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11187
755735eb 11188 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11189 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11190 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11191
11192 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11193 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11194 pbd->tcp_flags = pbd_tcp_flags(skb);
11195
11196 if (xmit_type & XMIT_GSO_V4) {
11197 pbd->ip_id = swab16(ip_hdr(skb)->id);
11198 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11199 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11200 ip_hdr(skb)->daddr,
11201 0, IPPROTO_TCP, 0));
755735eb
EG
11202
11203 } else
11204 pbd->tcp_pseudo_csum =
11205 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11206 &ipv6_hdr(skb)->daddr,
11207 0, IPPROTO_TCP, 0));
11208
a2fbb9ea
ET
11209 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11210 }
ca00392c 11211 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11212
755735eb
EG
11213 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11214 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11215
755735eb 11216 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11217 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11218 if (total_pkt_bd == NULL)
11219 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11220
755735eb
EG
11221 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11222 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11223
ca00392c
EG
11224 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11225 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11226 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11227 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11228
755735eb 11229 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11230 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11231 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11232 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11233 }
11234
ca00392c 11235 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11236
a2fbb9ea
ET
11237 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11238
755735eb 11239 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11240 * if the packet contains or ends with it
11241 */
11242 if (TX_BD_POFF(bd_prod) < nbd)
11243 nbd++;
11244
ca00392c
EG
11245 if (total_pkt_bd != NULL)
11246 total_pkt_bd->total_pkt_bytes = pkt_size;
11247
a2fbb9ea
ET
11248 if (pbd)
11249 DP(NETIF_MSG_TX_QUEUED,
11250 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11251 " tcp_flags %x xsum %x seq %u hlen %u\n",
11252 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11253 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11254 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11255
755735eb 11256 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11257
58f4c4cf
EG
11258 /*
11259 * Make sure that the BD data is updated before updating the producer
11260 * since FW might read the BD right after the producer is updated.
11261 * This is only applicable for weak-ordered memory model archs such
11262 * as IA-64. The following barrier is also mandatory since FW will
11263 * assumes packets must have BDs.
11264 */
11265 wmb();
11266
ca00392c
EG
11267 fp->tx_db.data.prod += nbd;
11268 barrier();
11269 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11270
11271 mmiowb();
11272
755735eb 11273 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11274
11275 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11276 netif_tx_stop_queue(txq);
58f4c4cf
EG
11277 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11278 if we put Tx into XOFF state. */
11279 smp_mb();
ca00392c 11280 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11281 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11282 netif_tx_wake_queue(txq);
a2fbb9ea 11283 }
ca00392c 11284 fp_stat->tx_pkt++;
a2fbb9ea
ET
11285
11286 return NETDEV_TX_OK;
11287}
11288
bb2a0f7a 11289/* called with rtnl_lock */
a2fbb9ea
ET
11290static int bnx2x_open(struct net_device *dev)
11291{
11292 struct bnx2x *bp = netdev_priv(dev);
11293
6eccabb3
EG
11294 netif_carrier_off(dev);
11295
a2fbb9ea
ET
11296 bnx2x_set_power_state(bp, PCI_D0);
11297
bb2a0f7a 11298 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11299}
11300
bb2a0f7a 11301/* called with rtnl_lock */
a2fbb9ea
ET
11302static int bnx2x_close(struct net_device *dev)
11303{
a2fbb9ea
ET
11304 struct bnx2x *bp = netdev_priv(dev);
11305
11306 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11307 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11308 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11309 if (!CHIP_REV_IS_SLOW(bp))
11310 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11311
11312 return 0;
11313}
11314
f5372251 11315/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11316static void bnx2x_set_rx_mode(struct net_device *dev)
11317{
11318 struct bnx2x *bp = netdev_priv(dev);
11319 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11320 int port = BP_PORT(bp);
11321
11322 if (bp->state != BNX2X_STATE_OPEN) {
11323 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11324 return;
11325 }
11326
11327 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11328
11329 if (dev->flags & IFF_PROMISC)
11330 rx_mode = BNX2X_RX_MODE_PROMISC;
11331
11332 else if ((dev->flags & IFF_ALLMULTI) ||
11333 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11334 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11335
11336 else { /* some multicasts */
11337 if (CHIP_IS_E1(bp)) {
11338 int i, old, offset;
11339 struct dev_mc_list *mclist;
11340 struct mac_configuration_cmd *config =
11341 bnx2x_sp(bp, mcast_config);
11342
11343 for (i = 0, mclist = dev->mc_list;
11344 mclist && (i < dev->mc_count);
11345 i++, mclist = mclist->next) {
11346
11347 config->config_table[i].
11348 cam_entry.msb_mac_addr =
11349 swab16(*(u16 *)&mclist->dmi_addr[0]);
11350 config->config_table[i].
11351 cam_entry.middle_mac_addr =
11352 swab16(*(u16 *)&mclist->dmi_addr[2]);
11353 config->config_table[i].
11354 cam_entry.lsb_mac_addr =
11355 swab16(*(u16 *)&mclist->dmi_addr[4]);
11356 config->config_table[i].cam_entry.flags =
11357 cpu_to_le16(port);
11358 config->config_table[i].
11359 target_table_entry.flags = 0;
ca00392c
EG
11360 config->config_table[i].target_table_entry.
11361 clients_bit_vector =
11362 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11363 config->config_table[i].
11364 target_table_entry.vlan_id = 0;
11365
11366 DP(NETIF_MSG_IFUP,
11367 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11368 config->config_table[i].
11369 cam_entry.msb_mac_addr,
11370 config->config_table[i].
11371 cam_entry.middle_mac_addr,
11372 config->config_table[i].
11373 cam_entry.lsb_mac_addr);
11374 }
8d9c5f34 11375 old = config->hdr.length;
34f80b04
EG
11376 if (old > i) {
11377 for (; i < old; i++) {
11378 if (CAM_IS_INVALID(config->
11379 config_table[i])) {
af246401 11380 /* already invalidated */
34f80b04
EG
11381 break;
11382 }
11383 /* invalidate */
11384 CAM_INVALIDATE(config->
11385 config_table[i]);
11386 }
11387 }
11388
11389 if (CHIP_REV_IS_SLOW(bp))
11390 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11391 else
11392 offset = BNX2X_MAX_MULTICAST*(1 + port);
11393
8d9c5f34 11394 config->hdr.length = i;
34f80b04 11395 config->hdr.offset = offset;
8d9c5f34 11396 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11397 config->hdr.reserved1 = 0;
11398
e665bfda
MC
11399 bp->set_mac_pending++;
11400 smp_wmb();
11401
34f80b04
EG
11402 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11403 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11404 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11405 0);
11406 } else { /* E1H */
11407 /* Accept one or more multicasts */
11408 struct dev_mc_list *mclist;
11409 u32 mc_filter[MC_HASH_SIZE];
11410 u32 crc, bit, regidx;
11411 int i;
11412
11413 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11414
11415 for (i = 0, mclist = dev->mc_list;
11416 mclist && (i < dev->mc_count);
11417 i++, mclist = mclist->next) {
11418
7c510e4b
JB
11419 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11420 mclist->dmi_addr);
34f80b04
EG
11421
11422 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11423 bit = (crc >> 24) & 0xff;
11424 regidx = bit >> 5;
11425 bit &= 0x1f;
11426 mc_filter[regidx] |= (1 << bit);
11427 }
11428
11429 for (i = 0; i < MC_HASH_SIZE; i++)
11430 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11431 mc_filter[i]);
11432 }
11433 }
11434
11435 bp->rx_mode = rx_mode;
11436 bnx2x_set_storm_rx_mode(bp);
11437}
11438
11439/* called with rtnl_lock */
a2fbb9ea
ET
11440static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11441{
11442 struct sockaddr *addr = p;
11443 struct bnx2x *bp = netdev_priv(dev);
11444
34f80b04 11445 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11446 return -EINVAL;
11447
11448 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11449 if (netif_running(dev)) {
11450 if (CHIP_IS_E1(bp))
e665bfda 11451 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11452 else
e665bfda 11453 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11454 }
a2fbb9ea
ET
11455
11456 return 0;
11457}
11458
c18487ee 11459/* called with rtnl_lock */
01cd4528
EG
11460static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11461 int devad, u16 addr)
a2fbb9ea 11462{
01cd4528
EG
11463 struct bnx2x *bp = netdev_priv(netdev);
11464 u16 value;
11465 int rc;
11466 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11467
01cd4528
EG
11468 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11469 prtad, devad, addr);
a2fbb9ea 11470
01cd4528
EG
11471 if (prtad != bp->mdio.prtad) {
11472 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11473 prtad, bp->mdio.prtad);
11474 return -EINVAL;
11475 }
11476
11477 /* The HW expects different devad if CL22 is used */
11478 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11479
01cd4528
EG
11480 bnx2x_acquire_phy_lock(bp);
11481 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11482 devad, addr, &value);
11483 bnx2x_release_phy_lock(bp);
11484 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11485
01cd4528
EG
11486 if (!rc)
11487 rc = value;
11488 return rc;
11489}
a2fbb9ea 11490
01cd4528
EG
11491/* called with rtnl_lock */
11492static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11493 u16 addr, u16 value)
11494{
11495 struct bnx2x *bp = netdev_priv(netdev);
11496 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11497 int rc;
11498
11499 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11500 " value 0x%x\n", prtad, devad, addr, value);
11501
11502 if (prtad != bp->mdio.prtad) {
11503 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11504 prtad, bp->mdio.prtad);
11505 return -EINVAL;
a2fbb9ea
ET
11506 }
11507
01cd4528
EG
11508 /* The HW expects different devad if CL22 is used */
11509 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11510
01cd4528
EG
11511 bnx2x_acquire_phy_lock(bp);
11512 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11513 devad, addr, value);
11514 bnx2x_release_phy_lock(bp);
11515 return rc;
11516}
c18487ee 11517
01cd4528
EG
11518/* called with rtnl_lock */
11519static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11520{
11521 struct bnx2x *bp = netdev_priv(dev);
11522 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11523
01cd4528
EG
11524 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11525 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11526
01cd4528
EG
11527 if (!netif_running(dev))
11528 return -EAGAIN;
11529
11530 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11531}
11532
34f80b04 11533/* called with rtnl_lock */
a2fbb9ea
ET
11534static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11535{
11536 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11537 int rc = 0;
a2fbb9ea
ET
11538
11539 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11540 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11541 return -EINVAL;
11542
11543 /* This does not race with packet allocation
c14423fe 11544 * because the actual alloc size is
a2fbb9ea
ET
11545 * only updated as part of load
11546 */
11547 dev->mtu = new_mtu;
11548
11549 if (netif_running(dev)) {
34f80b04
EG
11550 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11551 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11552 }
34f80b04
EG
11553
11554 return rc;
a2fbb9ea
ET
11555}
11556
11557static void bnx2x_tx_timeout(struct net_device *dev)
11558{
11559 struct bnx2x *bp = netdev_priv(dev);
11560
11561#ifdef BNX2X_STOP_ON_ERROR
11562 if (!bp->panic)
11563 bnx2x_panic();
11564#endif
11565 /* This allows the netif to be shutdown gracefully before resetting */
11566 schedule_work(&bp->reset_task);
11567}
11568
11569#ifdef BCM_VLAN
34f80b04 11570/* called with rtnl_lock */
a2fbb9ea
ET
11571static void bnx2x_vlan_rx_register(struct net_device *dev,
11572 struct vlan_group *vlgrp)
11573{
11574 struct bnx2x *bp = netdev_priv(dev);
11575
11576 bp->vlgrp = vlgrp;
0c6671b0
EG
11577
11578 /* Set flags according to the required capabilities */
11579 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11580
11581 if (dev->features & NETIF_F_HW_VLAN_TX)
11582 bp->flags |= HW_VLAN_TX_FLAG;
11583
11584 if (dev->features & NETIF_F_HW_VLAN_RX)
11585 bp->flags |= HW_VLAN_RX_FLAG;
11586
a2fbb9ea 11587 if (netif_running(dev))
49d66772 11588 bnx2x_set_client_config(bp);
a2fbb9ea 11589}
34f80b04 11590
a2fbb9ea
ET
11591#endif
11592
11593#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11594static void poll_bnx2x(struct net_device *dev)
11595{
11596 struct bnx2x *bp = netdev_priv(dev);
11597
11598 disable_irq(bp->pdev->irq);
11599 bnx2x_interrupt(bp->pdev->irq, dev);
11600 enable_irq(bp->pdev->irq);
11601}
11602#endif
11603
c64213cd
SH
11604static const struct net_device_ops bnx2x_netdev_ops = {
11605 .ndo_open = bnx2x_open,
11606 .ndo_stop = bnx2x_close,
11607 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11608 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11609 .ndo_set_mac_address = bnx2x_change_mac_addr,
11610 .ndo_validate_addr = eth_validate_addr,
11611 .ndo_do_ioctl = bnx2x_ioctl,
11612 .ndo_change_mtu = bnx2x_change_mtu,
11613 .ndo_tx_timeout = bnx2x_tx_timeout,
11614#ifdef BCM_VLAN
11615 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11616#endif
11617#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11618 .ndo_poll_controller = poll_bnx2x,
11619#endif
11620};
11621
34f80b04
EG
11622static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11623 struct net_device *dev)
a2fbb9ea
ET
11624{
11625 struct bnx2x *bp;
11626 int rc;
11627
11628 SET_NETDEV_DEV(dev, &pdev->dev);
11629 bp = netdev_priv(dev);
11630
34f80b04
EG
11631 bp->dev = dev;
11632 bp->pdev = pdev;
a2fbb9ea 11633 bp->flags = 0;
34f80b04 11634 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11635
11636 rc = pci_enable_device(pdev);
11637 if (rc) {
11638 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11639 goto err_out;
11640 }
11641
11642 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11643 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11644 " aborting\n");
11645 rc = -ENODEV;
11646 goto err_out_disable;
11647 }
11648
11649 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11650 printk(KERN_ERR PFX "Cannot find second PCI device"
11651 " base address, aborting\n");
11652 rc = -ENODEV;
11653 goto err_out_disable;
11654 }
11655
34f80b04
EG
11656 if (atomic_read(&pdev->enable_cnt) == 1) {
11657 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11658 if (rc) {
11659 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11660 " aborting\n");
11661 goto err_out_disable;
11662 }
a2fbb9ea 11663
34f80b04
EG
11664 pci_set_master(pdev);
11665 pci_save_state(pdev);
11666 }
a2fbb9ea
ET
11667
11668 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11669 if (bp->pm_cap == 0) {
11670 printk(KERN_ERR PFX "Cannot find power management"
11671 " capability, aborting\n");
11672 rc = -EIO;
11673 goto err_out_release;
11674 }
11675
11676 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11677 if (bp->pcie_cap == 0) {
11678 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11679 " aborting\n");
11680 rc = -EIO;
11681 goto err_out_release;
11682 }
11683
6a35528a 11684 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11685 bp->flags |= USING_DAC_FLAG;
6a35528a 11686 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11687 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11688 " failed, aborting\n");
11689 rc = -EIO;
11690 goto err_out_release;
11691 }
11692
284901a9 11693 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11694 printk(KERN_ERR PFX "System does not support DMA,"
11695 " aborting\n");
11696 rc = -EIO;
11697 goto err_out_release;
11698 }
11699
34f80b04
EG
11700 dev->mem_start = pci_resource_start(pdev, 0);
11701 dev->base_addr = dev->mem_start;
11702 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11703
11704 dev->irq = pdev->irq;
11705
275f165f 11706 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11707 if (!bp->regview) {
11708 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11709 rc = -ENOMEM;
11710 goto err_out_release;
11711 }
11712
34f80b04
EG
11713 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11714 min_t(u64, BNX2X_DB_SIZE,
11715 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11716 if (!bp->doorbells) {
11717 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11718 rc = -ENOMEM;
11719 goto err_out_unmap;
11720 }
11721
11722 bnx2x_set_power_state(bp, PCI_D0);
11723
34f80b04
EG
11724 /* clean indirect addresses */
11725 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11726 PCICFG_VENDOR_ID_OFFSET);
11727 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11728 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11729 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11730 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11731
34f80b04 11732 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11733
c64213cd 11734 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11735 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11736 dev->features |= NETIF_F_SG;
11737 dev->features |= NETIF_F_HW_CSUM;
11738 if (bp->flags & USING_DAC_FLAG)
11739 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11740 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11741 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11742#ifdef BCM_VLAN
11743 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11744 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11745
11746 dev->vlan_features |= NETIF_F_SG;
11747 dev->vlan_features |= NETIF_F_HW_CSUM;
11748 if (bp->flags & USING_DAC_FLAG)
11749 dev->vlan_features |= NETIF_F_HIGHDMA;
11750 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11751 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11752#endif
a2fbb9ea 11753
01cd4528
EG
11754 /* get_port_hwinfo() will set prtad and mmds properly */
11755 bp->mdio.prtad = MDIO_PRTAD_NONE;
11756 bp->mdio.mmds = 0;
11757 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11758 bp->mdio.dev = dev;
11759 bp->mdio.mdio_read = bnx2x_mdio_read;
11760 bp->mdio.mdio_write = bnx2x_mdio_write;
11761
a2fbb9ea
ET
11762 return 0;
11763
11764err_out_unmap:
11765 if (bp->regview) {
11766 iounmap(bp->regview);
11767 bp->regview = NULL;
11768 }
a2fbb9ea
ET
11769 if (bp->doorbells) {
11770 iounmap(bp->doorbells);
11771 bp->doorbells = NULL;
11772 }
11773
11774err_out_release:
34f80b04
EG
11775 if (atomic_read(&pdev->enable_cnt) == 1)
11776 pci_release_regions(pdev);
a2fbb9ea
ET
11777
11778err_out_disable:
11779 pci_disable_device(pdev);
11780 pci_set_drvdata(pdev, NULL);
11781
11782err_out:
11783 return rc;
11784}
11785
37f9ce62
EG
11786static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11787 int *width, int *speed)
25047950
ET
11788{
11789 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11790
37f9ce62 11791 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11792
37f9ce62
EG
11793 /* return value of 1=2.5GHz 2=5GHz */
11794 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11795}
37f9ce62 11796
94a78b79
VZ
11797static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11798{
37f9ce62 11799 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11800 struct bnx2x_fw_file_hdr *fw_hdr;
11801 struct bnx2x_fw_file_section *sections;
94a78b79 11802 u32 offset, len, num_ops;
37f9ce62 11803 u16 *ops_offsets;
94a78b79 11804 int i;
37f9ce62 11805 const u8 *fw_ver;
94a78b79
VZ
11806
11807 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11808 return -EINVAL;
11809
11810 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11811 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11812
11813 /* Make sure none of the offsets and sizes make us read beyond
11814 * the end of the firmware data */
11815 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11816 offset = be32_to_cpu(sections[i].offset);
11817 len = be32_to_cpu(sections[i].len);
11818 if (offset + len > firmware->size) {
37f9ce62
EG
11819 printk(KERN_ERR PFX "Section %d length is out of "
11820 "bounds\n", i);
94a78b79
VZ
11821 return -EINVAL;
11822 }
11823 }
11824
11825 /* Likewise for the init_ops offsets */
11826 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11827 ops_offsets = (u16 *)(firmware->data + offset);
11828 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11829
11830 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11831 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
11832 printk(KERN_ERR PFX "Section offset %d is out of "
11833 "bounds\n", i);
94a78b79
VZ
11834 return -EINVAL;
11835 }
11836 }
11837
11838 /* Check FW version */
11839 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11840 fw_ver = firmware->data + offset;
11841 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11842 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11843 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11844 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11845 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11846 " Should be %d.%d.%d.%d\n",
11847 fw_ver[0], fw_ver[1], fw_ver[2],
11848 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11849 BCM_5710_FW_MINOR_VERSION,
11850 BCM_5710_FW_REVISION_VERSION,
11851 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 11852 return -EINVAL;
94a78b79
VZ
11853 }
11854
11855 return 0;
11856}
11857
ab6ad5a4 11858static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 11859{
ab6ad5a4
EG
11860 const __be32 *source = (const __be32 *)_source;
11861 u32 *target = (u32 *)_target;
94a78b79 11862 u32 i;
94a78b79
VZ
11863
11864 for (i = 0; i < n/4; i++)
11865 target[i] = be32_to_cpu(source[i]);
11866}
11867
11868/*
11869 Ops array is stored in the following format:
11870 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11871 */
ab6ad5a4 11872static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 11873{
ab6ad5a4
EG
11874 const __be32 *source = (const __be32 *)_source;
11875 struct raw_op *target = (struct raw_op *)_target;
94a78b79 11876 u32 i, j, tmp;
94a78b79 11877
ab6ad5a4 11878 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
11879 tmp = be32_to_cpu(source[j]);
11880 target[i].op = (tmp >> 24) & 0xff;
11881 target[i].offset = tmp & 0xffffff;
11882 target[i].raw_data = be32_to_cpu(source[j+1]);
11883 }
11884}
ab6ad5a4
EG
11885
11886static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 11887{
ab6ad5a4
EG
11888 const __be16 *source = (const __be16 *)_source;
11889 u16 *target = (u16 *)_target;
94a78b79 11890 u32 i;
94a78b79
VZ
11891
11892 for (i = 0; i < n/2; i++)
11893 target[i] = be16_to_cpu(source[i]);
11894}
11895
11896#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
ab6ad5a4
EG
11897 do { \
11898 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11899 bp->arr = kmalloc(len, GFP_KERNEL); \
94a78b79 11900 if (!bp->arr) { \
ab6ad5a4
EG
11901 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
11902 "for "#arr"\n", len); \
94a78b79
VZ
11903 goto lbl; \
11904 } \
ab6ad5a4
EG
11905 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
11906 (u8 *)bp->arr, len); \
94a78b79
VZ
11907 } while (0)
11908
94a78b79
VZ
11909static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11910{
11911 char fw_file_name[40] = {0};
94a78b79 11912 struct bnx2x_fw_file_hdr *fw_hdr;
ab6ad5a4 11913 int rc, offset;
94a78b79
VZ
11914
11915 /* Create a FW file name */
11916 if (CHIP_IS_E1(bp))
ab6ad5a4 11917 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
94a78b79
VZ
11918 else
11919 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11920
11921 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11922 BCM_5710_FW_MAJOR_VERSION,
ab6ad5a4
EG
11923 BCM_5710_FW_MINOR_VERSION,
11924 BCM_5710_FW_REVISION_VERSION,
11925 BCM_5710_FW_ENGINEERING_VERSION);
94a78b79
VZ
11926
11927 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11928
11929 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11930 if (rc) {
ab6ad5a4
EG
11931 printk(KERN_ERR PFX "Can't load firmware file %s\n",
11932 fw_file_name);
94a78b79
VZ
11933 goto request_firmware_exit;
11934 }
11935
11936 rc = bnx2x_check_firmware(bp);
11937 if (rc) {
11938 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11939 goto request_firmware_exit;
11940 }
11941
11942 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11943
11944 /* Initialize the pointers to the init arrays */
11945 /* Blob */
11946 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11947
11948 /* Opcodes */
11949 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11950
11951 /* Offsets */
ab6ad5a4
EG
11952 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
11953 be16_to_cpu_n);
94a78b79
VZ
11954
11955 /* STORMs firmware */
573f2035
EG
11956 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11957 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11958 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
11959 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11960 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11961 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11962 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
11963 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11964 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11965 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11966 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
11967 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11968 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11969 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11970 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
11971 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
11972
11973 return 0;
ab6ad5a4 11974
94a78b79
VZ
11975init_offsets_alloc_err:
11976 kfree(bp->init_ops);
11977init_ops_alloc_err:
11978 kfree(bp->init_data);
11979request_firmware_exit:
11980 release_firmware(bp->firmware);
11981
11982 return rc;
11983}
11984
11985
a2fbb9ea
ET
11986static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11987 const struct pci_device_id *ent)
11988{
a2fbb9ea
ET
11989 struct net_device *dev = NULL;
11990 struct bnx2x *bp;
37f9ce62 11991 int pcie_width, pcie_speed;
25047950 11992 int rc;
a2fbb9ea 11993
a2fbb9ea 11994 /* dev zeroed in init_etherdev */
555f6c78 11995 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11996 if (!dev) {
11997 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11998 return -ENOMEM;
34f80b04 11999 }
a2fbb9ea 12000
a2fbb9ea
ET
12001 bp = netdev_priv(dev);
12002 bp->msglevel = debug;
12003
df4770de
EG
12004 pci_set_drvdata(pdev, dev);
12005
34f80b04 12006 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12007 if (rc < 0) {
12008 free_netdev(dev);
12009 return rc;
12010 }
12011
34f80b04 12012 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12013 if (rc)
12014 goto init_one_exit;
12015
94a78b79
VZ
12016 /* Set init arrays */
12017 rc = bnx2x_init_firmware(bp, &pdev->dev);
12018 if (rc) {
12019 printk(KERN_ERR PFX "Error loading firmware\n");
12020 goto init_one_exit;
12021 }
12022
693fc0d1 12023 rc = register_netdev(dev);
34f80b04 12024 if (rc) {
693fc0d1 12025 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12026 goto init_one_exit;
12027 }
12028
37f9ce62 12029 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 12030 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 12031 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 12032 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 12033 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 12034 dev->base_addr, bp->pdev->irq);
e174961c 12035 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 12036
a2fbb9ea 12037 return 0;
34f80b04
EG
12038
12039init_one_exit:
12040 if (bp->regview)
12041 iounmap(bp->regview);
12042
12043 if (bp->doorbells)
12044 iounmap(bp->doorbells);
12045
12046 free_netdev(dev);
12047
12048 if (atomic_read(&pdev->enable_cnt) == 1)
12049 pci_release_regions(pdev);
12050
12051 pci_disable_device(pdev);
12052 pci_set_drvdata(pdev, NULL);
12053
12054 return rc;
a2fbb9ea
ET
12055}
12056
12057static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12058{
12059 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12060 struct bnx2x *bp;
12061
12062 if (!dev) {
228241eb
ET
12063 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12064 return;
12065 }
228241eb 12066 bp = netdev_priv(dev);
a2fbb9ea 12067
a2fbb9ea
ET
12068 unregister_netdev(dev);
12069
94a78b79
VZ
12070 kfree(bp->init_ops_offsets);
12071 kfree(bp->init_ops);
12072 kfree(bp->init_data);
12073 release_firmware(bp->firmware);
12074
a2fbb9ea
ET
12075 if (bp->regview)
12076 iounmap(bp->regview);
12077
12078 if (bp->doorbells)
12079 iounmap(bp->doorbells);
12080
12081 free_netdev(dev);
34f80b04
EG
12082
12083 if (atomic_read(&pdev->enable_cnt) == 1)
12084 pci_release_regions(pdev);
12085
a2fbb9ea
ET
12086 pci_disable_device(pdev);
12087 pci_set_drvdata(pdev, NULL);
12088}
12089
12090static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12091{
12092 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12093 struct bnx2x *bp;
12094
34f80b04
EG
12095 if (!dev) {
12096 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12097 return -ENODEV;
12098 }
12099 bp = netdev_priv(dev);
a2fbb9ea 12100
34f80b04 12101 rtnl_lock();
a2fbb9ea 12102
34f80b04 12103 pci_save_state(pdev);
228241eb 12104
34f80b04
EG
12105 if (!netif_running(dev)) {
12106 rtnl_unlock();
12107 return 0;
12108 }
a2fbb9ea
ET
12109
12110 netif_device_detach(dev);
a2fbb9ea 12111
da5a662a 12112 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12113
a2fbb9ea 12114 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12115
34f80b04
EG
12116 rtnl_unlock();
12117
a2fbb9ea
ET
12118 return 0;
12119}
12120
12121static int bnx2x_resume(struct pci_dev *pdev)
12122{
12123 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12124 struct bnx2x *bp;
a2fbb9ea
ET
12125 int rc;
12126
228241eb
ET
12127 if (!dev) {
12128 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12129 return -ENODEV;
12130 }
228241eb 12131 bp = netdev_priv(dev);
a2fbb9ea 12132
34f80b04
EG
12133 rtnl_lock();
12134
228241eb 12135 pci_restore_state(pdev);
34f80b04
EG
12136
12137 if (!netif_running(dev)) {
12138 rtnl_unlock();
12139 return 0;
12140 }
12141
a2fbb9ea
ET
12142 bnx2x_set_power_state(bp, PCI_D0);
12143 netif_device_attach(dev);
12144
da5a662a 12145 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12146
34f80b04
EG
12147 rtnl_unlock();
12148
12149 return rc;
a2fbb9ea
ET
12150}
12151
f8ef6e44
YG
12152static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12153{
12154 int i;
12155
12156 bp->state = BNX2X_STATE_ERROR;
12157
12158 bp->rx_mode = BNX2X_RX_MODE_NONE;
12159
12160 bnx2x_netif_stop(bp, 0);
12161
12162 del_timer_sync(&bp->timer);
12163 bp->stats_state = STATS_STATE_DISABLED;
12164 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12165
12166 /* Release IRQs */
12167 bnx2x_free_irq(bp);
12168
12169 if (CHIP_IS_E1(bp)) {
12170 struct mac_configuration_cmd *config =
12171 bnx2x_sp(bp, mcast_config);
12172
8d9c5f34 12173 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12174 CAM_INVALIDATE(config->config_table[i]);
12175 }
12176
12177 /* Free SKBs, SGEs, TPA pool and driver internals */
12178 bnx2x_free_skbs(bp);
555f6c78 12179 for_each_rx_queue(bp, i)
f8ef6e44 12180 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12181 for_each_rx_queue(bp, i)
7cde1c8b 12182 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12183 bnx2x_free_mem(bp);
12184
12185 bp->state = BNX2X_STATE_CLOSED;
12186
12187 netif_carrier_off(bp->dev);
12188
12189 return 0;
12190}
12191
12192static void bnx2x_eeh_recover(struct bnx2x *bp)
12193{
12194 u32 val;
12195
12196 mutex_init(&bp->port.phy_mutex);
12197
12198 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12199 bp->link_params.shmem_base = bp->common.shmem_base;
12200 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12201
12202 if (!bp->common.shmem_base ||
12203 (bp->common.shmem_base < 0xA0000) ||
12204 (bp->common.shmem_base >= 0xC0000)) {
12205 BNX2X_DEV_INFO("MCP not active\n");
12206 bp->flags |= NO_MCP_FLAG;
12207 return;
12208 }
12209
12210 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12211 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12212 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12213 BNX2X_ERR("BAD MCP validity signature\n");
12214
12215 if (!BP_NOMCP(bp)) {
12216 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12217 & DRV_MSG_SEQ_NUMBER_MASK);
12218 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12219 }
12220}
12221
493adb1f
WX
12222/**
12223 * bnx2x_io_error_detected - called when PCI error is detected
12224 * @pdev: Pointer to PCI device
12225 * @state: The current pci connection state
12226 *
12227 * This function is called after a PCI bus error affecting
12228 * this device has been detected.
12229 */
12230static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12231 pci_channel_state_t state)
12232{
12233 struct net_device *dev = pci_get_drvdata(pdev);
12234 struct bnx2x *bp = netdev_priv(dev);
12235
12236 rtnl_lock();
12237
12238 netif_device_detach(dev);
12239
07ce50e4
DN
12240 if (state == pci_channel_io_perm_failure) {
12241 rtnl_unlock();
12242 return PCI_ERS_RESULT_DISCONNECT;
12243 }
12244
493adb1f 12245 if (netif_running(dev))
f8ef6e44 12246 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12247
12248 pci_disable_device(pdev);
12249
12250 rtnl_unlock();
12251
12252 /* Request a slot reset */
12253 return PCI_ERS_RESULT_NEED_RESET;
12254}
12255
12256/**
12257 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12258 * @pdev: Pointer to PCI device
12259 *
12260 * Restart the card from scratch, as if from a cold-boot.
12261 */
12262static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12263{
12264 struct net_device *dev = pci_get_drvdata(pdev);
12265 struct bnx2x *bp = netdev_priv(dev);
12266
12267 rtnl_lock();
12268
12269 if (pci_enable_device(pdev)) {
12270 dev_err(&pdev->dev,
12271 "Cannot re-enable PCI device after reset\n");
12272 rtnl_unlock();
12273 return PCI_ERS_RESULT_DISCONNECT;
12274 }
12275
12276 pci_set_master(pdev);
12277 pci_restore_state(pdev);
12278
12279 if (netif_running(dev))
12280 bnx2x_set_power_state(bp, PCI_D0);
12281
12282 rtnl_unlock();
12283
12284 return PCI_ERS_RESULT_RECOVERED;
12285}
12286
12287/**
12288 * bnx2x_io_resume - called when traffic can start flowing again
12289 * @pdev: Pointer to PCI device
12290 *
12291 * This callback is called when the error recovery driver tells us that
12292 * its OK to resume normal operation.
12293 */
12294static void bnx2x_io_resume(struct pci_dev *pdev)
12295{
12296 struct net_device *dev = pci_get_drvdata(pdev);
12297 struct bnx2x *bp = netdev_priv(dev);
12298
12299 rtnl_lock();
12300
f8ef6e44
YG
12301 bnx2x_eeh_recover(bp);
12302
493adb1f 12303 if (netif_running(dev))
f8ef6e44 12304 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12305
12306 netif_device_attach(dev);
12307
12308 rtnl_unlock();
12309}
12310
12311static struct pci_error_handlers bnx2x_err_handler = {
12312 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12313 .slot_reset = bnx2x_io_slot_reset,
12314 .resume = bnx2x_io_resume,
493adb1f
WX
12315};
12316
a2fbb9ea 12317static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12318 .name = DRV_MODULE_NAME,
12319 .id_table = bnx2x_pci_tbl,
12320 .probe = bnx2x_init_one,
12321 .remove = __devexit_p(bnx2x_remove_one),
12322 .suspend = bnx2x_suspend,
12323 .resume = bnx2x_resume,
12324 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12325};
12326
12327static int __init bnx2x_init(void)
12328{
dd21ca6d
SG
12329 int ret;
12330
938cf541
EG
12331 printk(KERN_INFO "%s", version);
12332
1cf167f2
EG
12333 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12334 if (bnx2x_wq == NULL) {
12335 printk(KERN_ERR PFX "Cannot create workqueue\n");
12336 return -ENOMEM;
12337 }
12338
dd21ca6d
SG
12339 ret = pci_register_driver(&bnx2x_pci_driver);
12340 if (ret) {
12341 printk(KERN_ERR PFX "Cannot register driver\n");
12342 destroy_workqueue(bnx2x_wq);
12343 }
12344 return ret;
a2fbb9ea
ET
12345}
12346
12347static void __exit bnx2x_cleanup(void)
12348{
12349 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12350
12351 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12352}
12353
12354module_init(bnx2x_init);
12355module_exit(bnx2x_cleanup);
12356
94a78b79 12357