1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.114-1"
60 #define DRV_MODULE_RELDATE "2009/07/29"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
105 module_param(poll, int, 0);
106 MODULE_PARM_DESC(poll, " Use polling (for debug)");
108 static int mrrs = -1;
109 module_param(mrrs, int, 0);
110 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
113 module_param(debug, int, 0);
114 MODULE_PARM_DESC(debug, " Default debug msglevel");
116 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
118 static struct workqueue_struct *bnx2x_wq;
120 enum bnx2x_board_type {
126 /* indexed by board_type, above */
129 } board_info[] __devinitdata = {
130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
136 static const struct pci_device_id bnx2x_pci_tbl[] = {
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
153 * locking is done by mcp
155 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
175 static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
182 /* copy command into DMAE command memory and set DMAE command go */
183 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
199 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
202 struct dmae_command *dmae = &bp->init_dmae;
203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
215 mutex_lock(&bp->dmae_mutex);
217 memset(dmae, 0, sizeof(struct dmae_command));
219 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
223 DMAE_CMD_ENDIANITY_B_DW_SWAP |
225 DMAE_CMD_ENDIANITY_DW_SWAP |
227 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
229 dmae->src_addr_lo = U64_LO(dma_addr);
230 dmae->src_addr_hi = U64_HI(dma_addr);
231 dmae->dst_addr_lo = dst_addr >> 2;
232 dmae->dst_addr_hi = 0;
234 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
236 dmae->comp_val = DMAE_COMP_VAL;
238 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
239 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
245 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
246 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
259 BNX2X_ERR("DMAE timeout!\n");
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
270 mutex_unlock(&bp->dmae_mutex);
273 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
275 struct dmae_command *dmae = &bp->init_dmae;
276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
290 mutex_lock(&bp->dmae_mutex);
292 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293 memset(dmae, 0, sizeof(struct dmae_command));
295 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 DMAE_CMD_ENDIANITY_DW_SWAP |
303 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
305 dmae->src_addr_lo = src_addr >> 2;
306 dmae->src_addr_hi = 0;
307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
312 dmae->comp_val = DMAE_COMP_VAL;
314 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
315 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
324 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
328 while (*wb_comp != DMAE_COMP_VAL) {
331 BNX2X_ERR("DMAE timeout!\n");
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
345 mutex_unlock(&bp->dmae_mutex);
348 /* used only for slowpath so not inlined */
349 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
359 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
363 REG_RD_DMAE(bp, reg, wb_data, 2);
365 return HILO_U64(wb_data[0], wb_data[1]);
369 static int bnx2x_mc_assert(struct bnx2x *bp)
373 u32 row0, row1, row2, row3;
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
490 static void bnx2x_fw_dump(struct bnx2x *bp)
496 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
497 mark = ((mark + 0x3) & ~0x3);
498 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
500 printk(KERN_ERR PFX);
501 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502 for (word = 0; word < 8; word++)
503 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
506 printk(KERN_CONT "%s", (char *)data);
508 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
513 printk(KERN_CONT "%s", (char *)data);
515 printk(KERN_ERR PFX "end of fw dump\n");
518 static void bnx2x_panic_dump(struct bnx2x *bp)
523 bp->stats_state = STATS_STATE_DISABLED;
524 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
526 BNX2X_ERR("begin crash dump -----------------\n");
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
537 for_each_rx_queue(bp, i) {
538 struct bnx2x_fastpath *fp = &bp->fp[i];
540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
543 i, fp->rx_bd_prod, fp->rx_bd_cons,
544 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp->rx_sge_prod, fp->last_max_sge,
549 le16_to_cpu(fp->fp_u_idx),
550 fp->status_blk->u_status_block.status_block_index);
554 for_each_tx_queue(bp, i) {
555 struct bnx2x_fastpath *fp = &bp->fp[i];
557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
563 fp->status_blk->c_status_block.status_block_index,
564 fp->tx_db.data.prod);
569 for_each_rx_queue(bp, i) {
570 struct bnx2x_fastpath *fp = &bp->fp[i];
572 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
574 for (j = start; j != end; j = RX_BD(j + 1)) {
575 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
582 start = RX_SGE(fp->rx_sge_prod);
583 end = RX_SGE(fp->last_max_sge);
584 for (j = start; j != end; j = RX_SGE(j + 1)) {
585 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i, j, rx_sge[1], rx_sge[0], sw_page->page);
592 start = RCQ_BD(fp->rx_comp_cons - 10);
593 end = RCQ_BD(fp->rx_comp_cons + 503);
594 for (j = start; j != end; j = RCQ_BD(j + 1)) {
595 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
603 for_each_tx_queue(bp, i) {
604 struct bnx2x_fastpath *fp = &bp->fp[i];
606 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608 for (j = start; j != end; j = TX_BD(j + 1)) {
609 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i, j, sw_bd->skb, sw_bd->first_bd);
615 start = TX_BD(fp->tx_bd_cons - 10);
616 end = TX_BD(fp->tx_bd_cons + 254);
617 for (j = start; j != end; j = TX_BD(j + 1)) {
618 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
627 BNX2X_ERR("end crash dump -----------------\n");
630 static void bnx2x_int_enable(struct bnx2x *bp)
632 int port = BP_PORT(bp);
633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
636 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0);
641 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651 HC_CONFIG_0_REG_INT_LINE_EN_0 |
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
654 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
657 REG_WR(bp, addr, val);
659 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
662 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
665 REG_WR(bp, addr, val);
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
672 if (CHIP_IS_E1H(bp)) {
673 /* init leading/trailing edge */
675 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
677 /* enable nig and gpio3 attention */
682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
686 /* Make sure that interrupts are indeed enabled from here on */
690 static void bnx2x_int_disable(struct bnx2x *bp)
692 int port = BP_PORT(bp);
693 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694 u32 val = REG_RD(bp, addr);
696 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698 HC_CONFIG_0_REG_INT_LINE_EN_0 |
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
701 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
704 /* flush all outstanding writes */
707 REG_WR(bp, addr, val);
708 if (REG_RD(bp, addr) != val)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
713 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
715 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
718 /* disable interrupt handling */
719 atomic_inc(&bp->intr_sem);
720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp);
726 /* make sure all ISRs are done */
728 synchronize_irq(bp->msix_table[0].vector);
730 for_each_queue(bp, i)
731 synchronize_irq(bp->msix_table[i + offset].vector);
733 synchronize_irq(bp->pdev->irq);
735 /* make sure sp_task is not running */
736 cancel_delayed_work(&bp->sp_task);
737 flush_workqueue(bnx2x_wq);
743 * General service functions
746 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
747 u8 storm, u16 index, u8 op, u8 update)
749 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750 COMMAND_REG_INT_ACK);
751 struct igu_ack_register igu_ack;
753 igu_ack.status_block_index = index;
754 igu_ack.sb_id_and_flags =
755 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
756 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
760 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32 *)&igu_ack), hc_addr);
762 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
764 /* Make sure that ACK is written */
769 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
771 struct host_status_block *fpsb = fp->status_blk;
774 barrier(); /* status block is written to by the chip */
775 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
779 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
786 static u16 bnx2x_ack_int(struct bnx2x *bp)
788 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789 COMMAND_REG_SIMD_MASK);
790 u32 result = REG_RD(bp, hc_addr);
792 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
800 * fast path service functions
803 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
805 /* Tell compiler that consumer and producer can change */
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
810 /* free skb in the packet ring at pos idx
811 * return idx of last bd freed
813 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
819 struct sk_buff *skb = tx_buf->skb;
820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
823 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
835 BNX2X_ERR("BAD nbd!\n");
839 new_cons = nbd + tx_buf->first_bd;
841 /* Get the next bd */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844 /* Skip a parse bd... */
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
867 dev_kfree_skb_any(skb);
868 tx_buf->first_bd = 0;
874 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
880 barrier(); /* Tell compiler that prod and cons can change */
881 prod = fp->tx_bd_prod;
882 cons = fp->tx_bd_cons;
884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
888 #ifdef BNX2X_STOP_ON_ERROR
890 WARN_ON(used > fp->bp->tx_ring_size);
891 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
894 return (s16)(fp->bp->tx_ring_size) - used;
897 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
899 struct bnx2x *bp = fp->bp;
900 struct netdev_queue *txq;
901 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
904 #ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911 sw_cons = fp->tx_pkt_cons;
913 while (sw_cons != hw_cons) {
916 pkt_cons = TX_BD(sw_cons);
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
920 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
921 hw_cons, sw_cons, pkt_cons);
923 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
928 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
933 fp->tx_pkt_cons = sw_cons;
934 fp->tx_bd_cons = bd_cons;
936 /* TBD need a thresh? */
937 if (unlikely(netif_tx_queue_stopped(txq))) {
939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
947 if ((netif_tx_queue_stopped(txq)) &&
948 (bp->state == BNX2X_STATE_OPEN) &&
949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
950 netif_tx_wake_queue(txq);
955 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956 union eth_rx_cqe *rr_cqe)
958 struct bnx2x *bp = fp->bp;
959 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
964 fp->index, cid, command, bp->state,
965 rr_cqe->ramrod_cqe.ramrod_type);
970 switch (command | fp->state) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972 BNX2X_FP_STATE_OPENING):
973 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
975 fp->state = BNX2X_FP_STATE_OPEN;
978 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
981 fp->state = BNX2X_FP_STATE_HALTED;
985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command, fp->state);
989 mb(); /* force bnx2x_wait_ramrod() to see the change */
993 switch (command | bp->state) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996 bp->state = BNX2X_STATE_OPEN;
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002 fp->state = BNX2X_FP_STATE_HALTED;
1005 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1006 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1007 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1013 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1014 bp->set_mac_pending = 0;
1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1024 command, bp->state);
1027 mb(); /* force bnx2x_wait_ramrod() to see the change */
1030 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1033 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034 struct page *page = sw_buf->page;
1035 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1037 /* Skip "next page" elements */
1041 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1042 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1043 __free_pages(page, PAGES_PER_SGE_SHIFT);
1045 sw_buf->page = NULL;
1050 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051 struct bnx2x_fastpath *fp, int last)
1055 for (i = 0; i < last; i++)
1056 bnx2x_free_rx_sge(bp, fp, i);
1059 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1062 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1067 if (unlikely(page == NULL))
1070 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1071 PCI_DMA_FROMDEVICE);
1072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1077 sw_buf->page = page;
1078 pci_unmap_addr_set(sw_buf, mapping, mapping);
1080 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1086 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, u16 index)
1089 struct sk_buff *skb;
1090 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1094 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095 if (unlikely(skb == NULL))
1098 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1099 PCI_DMA_FROMDEVICE);
1100 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1106 pci_unmap_addr_set(rx_buf, mapping, mapping);
1108 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1114 /* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1119 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120 struct sk_buff *skb, u16 cons, u16 prod)
1122 struct bnx2x *bp = fp->bp;
1123 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1128 pci_dma_sync_single_for_device(bp->pdev,
1129 pci_unmap_addr(cons_rx_buf, mapping),
1130 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1132 prod_rx_buf->skb = cons_rx_buf->skb;
1133 pci_unmap_addr_set(prod_rx_buf, mapping,
1134 pci_unmap_addr(cons_rx_buf, mapping));
1135 *prod_bd = *cons_bd;
1138 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1141 u16 last_max = fp->last_max_sge;
1143 if (SUB_S16(idx, last_max) > 0)
1144 fp->last_max_sge = idx;
1147 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1151 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152 int idx = RX_SGE_CNT * i - 1;
1154 for (j = 0; j < 2; j++) {
1155 SGE_MASK_CLEAR_BIT(fp, idx);
1161 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162 struct eth_fast_path_rx_cqe *fp_cqe)
1164 struct bnx2x *bp = fp->bp;
1165 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1166 le16_to_cpu(fp_cqe->len_on_bd)) >>
1168 u16 last_max, last_elem, first_elem;
1175 /* First mark all used pages */
1176 for (i = 0; i < sge_len; i++)
1177 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1179 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp->sge_mask));
1184 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1186 last_max = RX_SGE(fp->last_max_sge);
1187 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1190 /* If ring is not full */
1191 if (last_elem + 1 != first_elem)
1194 /* Now update the prod */
1195 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196 if (likely(fp->sge_mask[i]))
1199 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200 delta += RX_SGE_MASK_ELEM_SZ;
1204 fp->rx_sge_prod += delta;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp);
1209 DP(NETIF_MSG_RX_STATUS,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp->last_max_sge, fp->rx_sge_prod);
1214 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp->sge_mask, 0xff,
1218 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
1222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1227 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228 struct sk_buff *skb, u16 cons, u16 prod)
1230 struct bnx2x *bp = fp->bp;
1231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1239 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1240 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp->tpa_pool[queue] = *cons_rx_buf;
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1249 fp->tpa_state[queue] = BNX2X_TPA_START;
1251 /* point prod_bd to new skb */
1252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1255 #ifdef BNX2X_STOP_ON_ERROR
1256 fp->tpa_queue_used |= (1 << queue);
1257 #ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1262 fp->tpa_queue_used);
1266 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 struct sk_buff *skb,
1268 struct eth_fast_path_rx_cqe *fp_cqe,
1271 struct sw_rx_page *rx_pg, old_rx_pg;
1272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273 u32 i, frag_len, frag_size, pages;
1277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1280 /* This is needed in order to enable forwarding support */
1282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1283 max(frag_size, (u32)len_on_bd));
1285 #ifdef BNX2X_STOP_ON_ERROR
1287 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe->pkt_len, len_on_bd);
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
1303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1304 rx_pg = &fp->rx_page_ring[sge_idx];
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310 if (unlikely(err)) {
1311 fp->eth_q_stats.rx_skb_alloc_failed++;
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1317 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1322 skb->data_len += frag_len;
1323 skb->truesize += frag_len;
1324 skb->len += frag_len;
1326 frag_size -= frag_len;
1332 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1336 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337 struct sk_buff *skb = rx_buf->skb;
1339 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1345 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1347 if (likely(new_skb)) {
1348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
1352 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353 PARSING_FLAGS_VLAN);
1354 int is_not_hwaccel_vlan_cqe =
1355 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1359 prefetch(((char *)(skb)) + 128);
1361 #ifdef BNX2X_STOP_ON_ERROR
1362 if (pad + len > bp->rx_buf_size) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad, len, bp->rx_buf_size);
1371 skb_reserve(skb, pad);
1374 skb->protocol = eth_type_trans(skb, bp->dev);
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380 iph = (struct iphdr *)skb->data;
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe))
1385 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1388 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1391 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392 &cqe->fast_path_cqe, cqe_idx)) {
1394 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395 (!is_not_hwaccel_vlan_cqe))
1396 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397 le16_to_cpu(cqe->fast_path_cqe.
1401 netif_receive_skb(skb);
1403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1409 /* put new skb in bin */
1410 fp->tpa_pool[queue].skb = new_skb;
1413 /* else drop the packet and keep the buffer in the bin */
1414 DP(NETIF_MSG_RX_STATUS,
1415 "Failed to allocate new skb - dropping packet!\n");
1416 fp->eth_q_stats.rx_skb_alloc_failed++;
1419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1422 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423 struct bnx2x_fastpath *fp,
1424 u16 bd_prod, u16 rx_comp_prod,
1427 struct ustorm_eth_rx_producers rx_prods = {0};
1430 /* Update producers */
1431 rx_prods.bd_prod = bd_prod;
1432 rx_prods.cqe_prod = rx_comp_prod;
1433 rx_prods.sge_prod = rx_sge_prod;
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446 REG_WR(bp, BAR_USTRORM_INTMEM +
1447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1448 ((u32 *)&rx_prods)[i]);
1450 mmiowb(); /* keep prod updates ordered */
1452 DP(NETIF_MSG_RX_STATUS,
1453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1457 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1459 struct bnx2x *bp = fp->bp;
1460 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1461 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1464 #ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp->panic))
1469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
1471 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1475 bd_cons = fp->rx_bd_cons;
1476 bd_prod = fp->rx_bd_prod;
1477 bd_prod_fw = bd_prod;
1478 sw_comp_cons = fp->rx_comp_cons;
1479 sw_comp_prod = fp->rx_comp_prod;
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1486 DP(NETIF_MSG_RX_STATUS,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1488 fp->index, hw_comp_cons, sw_comp_cons);
1490 while (sw_comp_cons != hw_comp_cons) {
1491 struct sw_rx_bd *rx_buf = NULL;
1492 struct sk_buff *skb;
1493 union eth_rx_cqe *cqe;
1497 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498 bd_prod = RX_BD(bd_prod);
1499 bd_cons = RX_BD(bd_cons);
1501 cqe = &fp->rx_comp_ring[comp_ring_cons];
1502 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1504 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1505 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1506 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1507 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1508 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1511 /* is this a slowpath msg? */
1512 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1513 bnx2x_sp_event(fp, cqe);
1516 /* this is an rx packet */
1518 rx_buf = &fp->rx_buf_ring[bd_cons];
1520 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521 pad = cqe->fast_path_cqe.placement_offset;
1523 /* If CQE is marked both TPA_START and TPA_END
1524 it is a non-TPA CQE */
1525 if ((!fp->disable_tpa) &&
1526 (TPA_TYPE(cqe_fp_flags) !=
1527 (TPA_TYPE_START | TPA_TYPE_END))) {
1528 u16 queue = cqe->fast_path_cqe.queue_index;
1530 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531 DP(NETIF_MSG_RX_STATUS,
1532 "calling tpa_start on queue %d\n",
1535 bnx2x_tpa_start(fp, queue, skb,
1540 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541 DP(NETIF_MSG_RX_STATUS,
1542 "calling tpa_stop on queue %d\n",
1545 if (!BNX2X_RX_SUM_FIX(cqe))
1546 BNX2X_ERR("STOP on none TCP "
1549 /* This is a size of the linear data
1551 len = le16_to_cpu(cqe->fast_path_cqe.
1553 bnx2x_tpa_stop(bp, fp, queue, pad,
1554 len, cqe, comp_ring_cons);
1555 #ifdef BNX2X_STOP_ON_ERROR
1560 bnx2x_update_sge_prod(fp,
1561 &cqe->fast_path_cqe);
1566 pci_dma_sync_single_for_device(bp->pdev,
1567 pci_unmap_addr(rx_buf, mapping),
1568 pad + RX_COPY_THRESH,
1569 PCI_DMA_FROMDEVICE);
1571 prefetch(((char *)(skb)) + 128);
1573 /* is this an error packet? */
1574 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1575 DP(NETIF_MSG_RX_ERR,
1576 "ERROR flags %x rx packet %u\n",
1577 cqe_fp_flags, sw_comp_cons);
1578 fp->eth_q_stats.rx_err_discard_pkt++;
1582 /* Since we don't have a jumbo ring
1583 * copy small packets if mtu > 1500
1585 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586 (len <= RX_COPY_THRESH)) {
1587 struct sk_buff *new_skb;
1589 new_skb = netdev_alloc_skb(bp->dev,
1591 if (new_skb == NULL) {
1592 DP(NETIF_MSG_RX_ERR,
1593 "ERROR packet dropped "
1594 "because of alloc failure\n");
1595 fp->eth_q_stats.rx_skb_alloc_failed++;
1600 skb_copy_from_linear_data_offset(skb, pad,
1601 new_skb->data + pad, len);
1602 skb_reserve(new_skb, pad);
1603 skb_put(new_skb, len);
1605 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1609 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610 pci_unmap_single(bp->pdev,
1611 pci_unmap_addr(rx_buf, mapping),
1613 PCI_DMA_FROMDEVICE);
1614 skb_reserve(skb, pad);
1618 DP(NETIF_MSG_RX_ERR,
1619 "ERROR packet dropped because "
1620 "of alloc failure\n");
1621 fp->eth_q_stats.rx_skb_alloc_failed++;
1623 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1627 skb->protocol = eth_type_trans(skb, bp->dev);
1629 skb->ip_summed = CHECKSUM_NONE;
1631 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632 skb->ip_summed = CHECKSUM_UNNECESSARY;
1634 fp->eth_q_stats.hw_csum_err++;
1638 skb_record_rx_queue(skb, fp->index);
1640 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1641 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642 PARSING_FLAGS_VLAN))
1643 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1647 netif_receive_skb(skb);
1653 bd_cons = NEXT_RX_IDX(bd_cons);
1654 bd_prod = NEXT_RX_IDX(bd_prod);
1655 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1658 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1661 if (rx_pkt == budget)
1665 fp->rx_bd_cons = bd_cons;
1666 fp->rx_bd_prod = bd_prod_fw;
1667 fp->rx_comp_cons = sw_comp_cons;
1668 fp->rx_comp_prod = sw_comp_prod;
1670 /* Update producers */
1671 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1674 fp->rx_pkt += rx_pkt;
1680 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1682 struct bnx2x_fastpath *fp = fp_cookie;
1683 struct bnx2x *bp = fp->bp;
1685 /* Return here if interrupt is disabled */
1686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1691 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1692 fp->index, fp->sb_id);
1693 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1695 #ifdef BNX2X_STOP_ON_ERROR
1696 if (unlikely(bp->panic))
1699 /* Handle Rx or Tx according to MSI-X vector */
1700 if (fp->is_rx_queue) {
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(&fp->status_blk->u_status_block.status_block_index);
1704 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1710 bnx2x_update_fpsb_idx(fp);
1714 /* Re-enable interrupts */
1715 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1724 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1726 struct bnx2x *bp = netdev_priv(dev_instance);
1727 u16 status = bnx2x_ack_int(bp);
1731 /* Return here if interrupt is shared and it's not for us */
1732 if (unlikely(status == 0)) {
1733 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1736 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1738 /* Return here if interrupt is disabled */
1739 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1744 #ifdef BNX2X_STOP_ON_ERROR
1745 if (unlikely(bp->panic))
1749 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750 struct bnx2x_fastpath *fp = &bp->fp[i];
1752 mask = 0x2 << fp->sb_id;
1753 if (status & mask) {
1754 /* Handle Rx or Tx according to SB id */
1755 if (fp->is_rx_queue) {
1756 prefetch(fp->rx_cons_sb);
1757 prefetch(&fp->status_blk->u_status_block.
1758 status_block_index);
1760 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1763 prefetch(fp->tx_cons_sb);
1764 prefetch(&fp->status_blk->c_status_block.
1765 status_block_index);
1767 bnx2x_update_fpsb_idx(fp);
1771 /* Re-enable interrupts */
1772 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773 le16_to_cpu(fp->fp_u_idx),
1775 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776 le16_to_cpu(fp->fp_c_idx),
1784 if (unlikely(status & 0x1)) {
1785 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1793 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1799 /* end of fast path */
1801 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1806 * General service functions
1809 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1812 u32 resource_bit = (1 << resource);
1813 int func = BP_FUNC(bp);
1814 u32 hw_lock_control_reg;
1817 /* Validating that the resource is within range */
1818 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1820 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1826 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1828 hw_lock_control_reg =
1829 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1832 /* Validating that the resource is not already taken */
1833 lock_status = REG_RD(bp, hw_lock_control_reg);
1834 if (lock_status & resource_bit) {
1835 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1836 lock_status, resource_bit);
1840 /* Try for 5 second every 5ms */
1841 for (cnt = 0; cnt < 1000; cnt++) {
1842 /* Try to acquire the lock */
1843 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844 lock_status = REG_RD(bp, hw_lock_control_reg);
1845 if (lock_status & resource_bit)
1850 DP(NETIF_MSG_HW, "Timeout\n");
1854 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1857 u32 resource_bit = (1 << resource);
1858 int func = BP_FUNC(bp);
1859 u32 hw_lock_control_reg;
1861 /* Validating that the resource is within range */
1862 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1864 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1870 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1872 hw_lock_control_reg =
1873 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1876 /* Validating that the resource is currently taken */
1877 lock_status = REG_RD(bp, hw_lock_control_reg);
1878 if (!(lock_status & resource_bit)) {
1879 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1880 lock_status, resource_bit);
1884 REG_WR(bp, hw_lock_control_reg, resource_bit);
1888 /* HW Lock for shared dual port PHYs */
1889 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1891 mutex_lock(&bp->port.phy_mutex);
1893 if (bp->port.need_hw_lock)
1894 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1897 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1899 if (bp->port.need_hw_lock)
1900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1902 mutex_unlock(&bp->port.phy_mutex);
1905 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910 int gpio_shift = gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912 u32 gpio_mask = (1 << gpio_shift);
1916 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1921 /* read GPIO value */
1922 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1924 /* get the requested pin value */
1925 if ((gpio_reg & gpio_mask) == gpio_mask)
1930 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1935 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1945 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1950 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1951 /* read GPIO and mask except the float bits */
1952 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1955 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957 gpio_num, gpio_shift);
1958 /* clear FLOAT and set CLR */
1959 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1963 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965 gpio_num, gpio_shift);
1966 /* clear FLOAT and set SET */
1967 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1971 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1972 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973 gpio_num, gpio_shift);
1975 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1982 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1983 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1988 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2005 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2008 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010 "output low\n", gpio_num, gpio_shift);
2011 /* clear SET and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018 "output high\n", gpio_num, gpio_shift);
2019 /* clear CLR and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2028 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2036 u32 spio_mask = (1 << spio_num);
2039 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040 (spio_num > MISC_REGISTERS_SPIO_7)) {
2041 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2046 /* read SPIO and mask except the float bits */
2047 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2050 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2051 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052 /* clear FLOAT and set CLR */
2053 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2057 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2058 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059 /* clear FLOAT and set SET */
2060 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2064 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2067 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2074 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2080 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2082 switch (bp->link_vars.ieee_fc &
2083 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2084 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2085 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2089 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2090 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2094 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2095 bp->port.advertising |= ADVERTISED_Asym_Pause;
2099 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2105 static void bnx2x_link_report(struct bnx2x *bp)
2107 if (bp->state == BNX2X_STATE_DISABLED) {
2108 netif_carrier_off(bp->dev);
2109 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2113 if (bp->link_vars.link_up) {
2114 if (bp->state == BNX2X_STATE_OPEN)
2115 netif_carrier_on(bp->dev);
2116 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2118 printk("%d Mbps ", bp->link_vars.line_speed);
2120 if (bp->link_vars.duplex == DUPLEX_FULL)
2121 printk("full duplex");
2123 printk("half duplex");
2125 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2126 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2127 printk(", receive ");
2128 if (bp->link_vars.flow_ctrl &
2130 printk("& transmit ");
2132 printk(", transmit ");
2134 printk("flow control ON");
2138 } else { /* link_down */
2139 netif_carrier_off(bp->dev);
2140 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2144 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2146 if (!BP_NOMCP(bp)) {
2149 /* Initialize link parameters structure variables */
2150 /* It is recommended to turn off RX FC for jumbo frames
2151 for better performance */
2153 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2154 else if (bp->dev->mtu > 5000)
2155 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2157 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2159 bnx2x_acquire_phy_lock(bp);
2161 if (load_mode == LOAD_DIAG)
2162 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2164 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2166 bnx2x_release_phy_lock(bp);
2168 bnx2x_calc_fc_adv(bp);
2170 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2171 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2172 bnx2x_link_report(bp);
2177 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2181 static void bnx2x_link_set(struct bnx2x *bp)
2183 if (!BP_NOMCP(bp)) {
2184 bnx2x_acquire_phy_lock(bp);
2185 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2186 bnx2x_release_phy_lock(bp);
2188 bnx2x_calc_fc_adv(bp);
2190 BNX2X_ERR("Bootcode is missing - can not set link\n");
2193 static void bnx2x__link_reset(struct bnx2x *bp)
2195 if (!BP_NOMCP(bp)) {
2196 bnx2x_acquire_phy_lock(bp);
2197 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2198 bnx2x_release_phy_lock(bp);
2200 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2203 static u8 bnx2x_link_test(struct bnx2x *bp)
2207 bnx2x_acquire_phy_lock(bp);
2208 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2209 bnx2x_release_phy_lock(bp);
2214 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2216 u32 r_param = bp->link_vars.line_speed / 8;
2217 u32 fair_periodic_timeout_usec;
2220 memset(&(bp->cmng.rs_vars), 0,
2221 sizeof(struct rate_shaping_vars_per_port));
2222 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2224 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2225 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2227 /* this is the threshold below which no timer arming will occur
2228 1.25 coefficient is for the threshold to be a little bigger
2229 than the real time, to compensate for timer in-accuracy */
2230 bp->cmng.rs_vars.rs_threshold =
2231 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2233 /* resolution of fairness timer */
2234 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2235 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2236 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2238 /* this is the threshold below which we won't arm the timer anymore */
2239 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2241 /* we multiply by 1e3/8 to get bytes/msec.
2242 We don't want the credits to pass a credit
2243 of the t_fair*FAIR_MEM (algorithm resolution) */
2244 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2245 /* since each tick is 4 usec */
2246 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2249 /* Calculates the sum of vn_min_rates.
2250 It's needed for further normalizing of the min_rates.
2252 sum of vn_min_rates.
2254 0 - if all the min_rates are 0.
2255 In the later case fainess algorithm should be deactivated.
2256 If not all min_rates are zero then those that are zeroes will be set to 1.
2258 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2261 int port = BP_PORT(bp);
2264 bp->vn_weight_sum = 0;
2265 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2266 int func = 2*vn + port;
2267 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2268 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2269 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2271 /* Skip hidden vns */
2272 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2275 /* If min rate is zero - set it to 1 */
2277 vn_min_rate = DEF_MIN_RATE;
2281 bp->vn_weight_sum += vn_min_rate;
2284 /* ... only if all min rates are zeros - disable fairness */
2286 bp->vn_weight_sum = 0;
2289 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2291 struct rate_shaping_vars_per_vn m_rs_vn;
2292 struct fairness_vars_per_vn m_fair_vn;
2293 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2294 u16 vn_min_rate, vn_max_rate;
2297 /* If function is hidden - set min and max to zeroes */
2298 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2303 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2304 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2305 /* If fairness is enabled (not all min rates are zeroes) and
2306 if current min rate is zero - set it to 1.
2307 This is a requirement of the algorithm. */
2308 if (bp->vn_weight_sum && (vn_min_rate == 0))
2309 vn_min_rate = DEF_MIN_RATE;
2310 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2311 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2315 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2316 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2318 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2319 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2321 /* global vn counter - maximal Mbps for this vn */
2322 m_rs_vn.vn_counter.rate = vn_max_rate;
2324 /* quota - number of bytes transmitted in this period */
2325 m_rs_vn.vn_counter.quota =
2326 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2328 if (bp->vn_weight_sum) {
2329 /* credit for each period of the fairness algorithm:
2330 number of bytes in T_FAIR (the vn share the port rate).
2331 vn_weight_sum should not be larger than 10000, thus
2332 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2334 m_fair_vn.vn_credit_delta =
2335 max((u32)(vn_min_rate * (T_FAIR_COEF /
2336 (8 * bp->vn_weight_sum))),
2337 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2338 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2339 m_fair_vn.vn_credit_delta);
2342 /* Store it to internal memory */
2343 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2344 REG_WR(bp, BAR_XSTRORM_INTMEM +
2345 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2346 ((u32 *)(&m_rs_vn))[i]);
2348 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2349 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2351 ((u32 *)(&m_fair_vn))[i]);
2355 /* This function is called upon link interrupt */
2356 static void bnx2x_link_attn(struct bnx2x *bp)
2358 /* Make sure that we are synced with the current statistics */
2359 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2361 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2363 if (bp->link_vars.link_up) {
2365 /* dropless flow control */
2366 if (CHIP_IS_E1H(bp)) {
2367 int port = BP_PORT(bp);
2368 u32 pause_enabled = 0;
2370 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2373 REG_WR(bp, BAR_USTRORM_INTMEM +
2374 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2378 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2379 struct host_port_stats *pstats;
2381 pstats = bnx2x_sp(bp, port_stats);
2382 /* reset old bmac stats */
2383 memset(&(pstats->mac_stx[0]), 0,
2384 sizeof(struct mac_stx));
2386 if ((bp->state == BNX2X_STATE_OPEN) ||
2387 (bp->state == BNX2X_STATE_DISABLED))
2388 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2391 /* indicate link status */
2392 bnx2x_link_report(bp);
2395 int port = BP_PORT(bp);
2399 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2400 if (vn == BP_E1HVN(bp))
2403 func = ((vn << 1) | port);
2405 /* Set the attention towards other drivers
2407 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2408 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2411 if (bp->link_vars.link_up) {
2414 /* Init rate shaping and fairness contexts */
2415 bnx2x_init_port_minmax(bp);
2417 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2418 bnx2x_init_vn_minmax(bp, 2*vn + port);
2420 /* Store it to internal memory */
2422 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2423 REG_WR(bp, BAR_XSTRORM_INTMEM +
2424 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2425 ((u32 *)(&bp->cmng))[i]);
2430 static void bnx2x__link_status_update(struct bnx2x *bp)
2432 int func = BP_FUNC(bp);
2434 if (bp->state != BNX2X_STATE_OPEN)
2437 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2439 if (bp->link_vars.link_up)
2440 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2442 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2444 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2445 bnx2x_calc_vn_weight_sum(bp);
2447 /* indicate link status */
2448 bnx2x_link_report(bp);
2451 static void bnx2x_pmf_update(struct bnx2x *bp)
2453 int port = BP_PORT(bp);
2457 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2459 /* enable nig attention */
2460 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2461 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2462 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2464 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2472 * General service functions
2475 /* send the MCP a request, block until there is a reply */
2476 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2478 int func = BP_FUNC(bp);
2479 u32 seq = ++bp->fw_seq;
2482 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2484 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2485 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2488 /* let the FW do it's magic ... */
2491 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2493 /* Give the FW up to 2 second (200*10ms) */
2494 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2496 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2497 cnt*delay, rc, seq);
2499 /* is this a reply to our command? */
2500 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2501 rc &= FW_MSG_CODE_MASK;
2504 BNX2X_ERR("FW failed to respond!\n");
2512 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2513 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2514 static void bnx2x_set_rx_mode(struct net_device *dev);
2516 static void bnx2x_e1h_disable(struct bnx2x *bp)
2518 int port = BP_PORT(bp);
2521 bp->rx_mode = BNX2X_RX_MODE_NONE;
2522 bnx2x_set_storm_rx_mode(bp);
2524 netif_tx_disable(bp->dev);
2525 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2527 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2529 bnx2x_set_mac_addr_e1h(bp, 0);
2531 for (i = 0; i < MC_HASH_SIZE; i++)
2532 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2534 netif_carrier_off(bp->dev);
2537 static void bnx2x_e1h_enable(struct bnx2x *bp)
2539 int port = BP_PORT(bp);
2541 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2543 bnx2x_set_mac_addr_e1h(bp, 1);
2545 /* Tx queue should be only reenabled */
2546 netif_tx_wake_all_queues(bp->dev);
2548 /* Initialize the receive filter. */
2549 bnx2x_set_rx_mode(bp->dev);
2552 static void bnx2x_update_min_max(struct bnx2x *bp)
2554 int port = BP_PORT(bp);
2557 /* Init rate shaping and fairness contexts */
2558 bnx2x_init_port_minmax(bp);
2560 bnx2x_calc_vn_weight_sum(bp);
2562 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2563 bnx2x_init_vn_minmax(bp, 2*vn + port);
2568 /* Set the attention towards other drivers on the same port */
2569 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2570 if (vn == BP_E1HVN(bp))
2573 func = ((vn << 1) | port);
2574 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2575 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2578 /* Store it to internal memory */
2579 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2580 REG_WR(bp, BAR_XSTRORM_INTMEM +
2581 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2582 ((u32 *)(&bp->cmng))[i]);
2586 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2588 int func = BP_FUNC(bp);
2590 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2591 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2593 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2595 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2596 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2597 bp->state = BNX2X_STATE_DISABLED;
2599 bnx2x_e1h_disable(bp);
2601 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2602 bp->state = BNX2X_STATE_OPEN;
2604 bnx2x_e1h_enable(bp);
2606 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2608 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2610 bnx2x_update_min_max(bp);
2611 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2614 /* Report results to MCP */
2616 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2618 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2621 /* the slow path queue is odd since completions arrive on the fastpath ring */
2622 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2623 u32 data_hi, u32 data_lo, int common)
2625 int func = BP_FUNC(bp);
2627 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2628 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2629 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2630 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2631 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2633 #ifdef BNX2X_STOP_ON_ERROR
2634 if (unlikely(bp->panic))
2638 spin_lock_bh(&bp->spq_lock);
2640 if (!bp->spq_left) {
2641 BNX2X_ERR("BUG! SPQ ring full!\n");
2642 spin_unlock_bh(&bp->spq_lock);
2647 /* CID needs port number to be encoded int it */
2648 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2649 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2651 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2653 bp->spq_prod_bd->hdr.type |=
2654 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2656 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2657 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2661 if (bp->spq_prod_bd == bp->spq_last_bd) {
2662 bp->spq_prod_bd = bp->spq;
2663 bp->spq_prod_idx = 0;
2664 DP(NETIF_MSG_TIMER, "end of spq\n");
2671 /* Make sure that BD data is updated before writing the producer */
2674 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2679 spin_unlock_bh(&bp->spq_lock);
2683 /* acquire split MCP access lock register */
2684 static int bnx2x_acquire_alr(struct bnx2x *bp)
2691 for (j = 0; j < i*10; j++) {
2693 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2694 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2695 if (val & (1L << 31))
2700 if (!(val & (1L << 31))) {
2701 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2708 /* release split MCP access lock register */
2709 static void bnx2x_release_alr(struct bnx2x *bp)
2713 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2716 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2718 struct host_def_status_block *def_sb = bp->def_status_blk;
2721 barrier(); /* status block is written to by the chip */
2722 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2723 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2726 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2727 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2730 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2731 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2734 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2735 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2738 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2739 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2746 * slow path service functions
2749 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2751 int port = BP_PORT(bp);
2752 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2753 COMMAND_REG_ATTN_BITS_SET);
2754 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2755 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2756 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2757 NIG_REG_MASK_INTERRUPT_PORT0;
2761 if (bp->attn_state & asserted)
2762 BNX2X_ERR("IGU ERROR\n");
2764 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2765 aeu_mask = REG_RD(bp, aeu_addr);
2767 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2768 aeu_mask, asserted);
2769 aeu_mask &= ~(asserted & 0xff);
2770 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2772 REG_WR(bp, aeu_addr, aeu_mask);
2773 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2775 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2776 bp->attn_state |= asserted;
2777 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2779 if (asserted & ATTN_HARD_WIRED_MASK) {
2780 if (asserted & ATTN_NIG_FOR_FUNC) {
2782 bnx2x_acquire_phy_lock(bp);
2784 /* save nig interrupt mask */
2785 nig_mask = REG_RD(bp, nig_int_mask_addr);
2786 REG_WR(bp, nig_int_mask_addr, 0);
2788 bnx2x_link_attn(bp);
2790 /* handle unicore attn? */
2792 if (asserted & ATTN_SW_TIMER_4_FUNC)
2793 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2795 if (asserted & GPIO_2_FUNC)
2796 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2798 if (asserted & GPIO_3_FUNC)
2799 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2801 if (asserted & GPIO_4_FUNC)
2802 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2805 if (asserted & ATTN_GENERAL_ATTN_1) {
2806 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2807 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2809 if (asserted & ATTN_GENERAL_ATTN_2) {
2810 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2811 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2813 if (asserted & ATTN_GENERAL_ATTN_3) {
2814 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2815 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2818 if (asserted & ATTN_GENERAL_ATTN_4) {
2819 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2820 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2822 if (asserted & ATTN_GENERAL_ATTN_5) {
2823 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2824 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2826 if (asserted & ATTN_GENERAL_ATTN_6) {
2827 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2828 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2832 } /* if hardwired */
2834 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2836 REG_WR(bp, hc_addr, asserted);
2838 /* now set back the mask */
2839 if (asserted & ATTN_NIG_FOR_FUNC) {
2840 REG_WR(bp, nig_int_mask_addr, nig_mask);
2841 bnx2x_release_phy_lock(bp);
2845 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2847 int port = BP_PORT(bp);
2849 /* mark the failure */
2850 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2851 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2852 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2853 bp->link_params.ext_phy_config);
2855 /* log the failure */
2856 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2857 " the driver to shutdown the card to prevent permanent"
2858 " damage. Please contact Dell Support for assistance\n",
2861 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2863 int port = BP_PORT(bp);
2865 u32 val, swap_val, swap_override;
2867 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2868 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2870 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2872 val = REG_RD(bp, reg_offset);
2873 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2874 REG_WR(bp, reg_offset, val);
2876 BNX2X_ERR("SPIO5 hw attention\n");
2878 /* Fan failure attention */
2879 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2880 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2881 /* Low power mode is controlled by GPIO 2 */
2882 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2883 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2884 /* The PHY reset is controlled by GPIO 1 */
2885 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2886 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2889 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2890 /* The PHY reset is controlled by GPIO 1 */
2891 /* fake the port number to cancel the swap done in
2893 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2894 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2895 port = (swap_val && swap_override) ^ 1;
2896 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2897 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2903 bnx2x_fan_failure(bp);
2906 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2907 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2908 bnx2x_acquire_phy_lock(bp);
2909 bnx2x_handle_module_detect_int(&bp->link_params);
2910 bnx2x_release_phy_lock(bp);
2913 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2915 val = REG_RD(bp, reg_offset);
2916 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2917 REG_WR(bp, reg_offset, val);
2919 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2920 (attn & HW_INTERRUT_ASSERT_SET_0));
2925 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2929 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2931 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2932 BNX2X_ERR("DB hw attention 0x%x\n", val);
2933 /* DORQ discard attention */
2935 BNX2X_ERR("FATAL error from DORQ\n");
2938 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2940 int port = BP_PORT(bp);
2943 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2944 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2946 val = REG_RD(bp, reg_offset);
2947 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2948 REG_WR(bp, reg_offset, val);
2950 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2951 (attn & HW_INTERRUT_ASSERT_SET_1));
2956 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2960 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2962 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2963 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2964 /* CFC error attention */
2966 BNX2X_ERR("FATAL error from CFC\n");
2969 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2971 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2972 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2973 /* RQ_USDMDP_FIFO_OVERFLOW */
2975 BNX2X_ERR("FATAL error from PXP\n");
2978 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2980 int port = BP_PORT(bp);
2983 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2984 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2986 val = REG_RD(bp, reg_offset);
2987 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2988 REG_WR(bp, reg_offset, val);
2990 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2991 (attn & HW_INTERRUT_ASSERT_SET_2));
2996 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3000 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3002 if (attn & BNX2X_PMF_LINK_ASSERT) {
3003 int func = BP_FUNC(bp);
3005 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3006 val = SHMEM_RD(bp, func_mb[func].drv_status);
3007 if (val & DRV_STATUS_DCC_EVENT_MASK)
3009 (val & DRV_STATUS_DCC_EVENT_MASK));
3010 bnx2x__link_status_update(bp);
3011 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3012 bnx2x_pmf_update(bp);
3014 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3016 BNX2X_ERR("MC assert!\n");
3017 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3018 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3019 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3020 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3023 } else if (attn & BNX2X_MCP_ASSERT) {
3025 BNX2X_ERR("MCP assert!\n");
3026 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3030 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3033 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3034 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3035 if (attn & BNX2X_GRC_TIMEOUT) {
3036 val = CHIP_IS_E1H(bp) ?
3037 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3038 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3040 if (attn & BNX2X_GRC_RSV) {
3041 val = CHIP_IS_E1H(bp) ?
3042 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3043 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3045 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3049 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3051 struct attn_route attn;
3052 struct attn_route group_mask;
3053 int port = BP_PORT(bp);
3059 /* need to take HW lock because MCP or other port might also
3060 try to handle this event */
3061 bnx2x_acquire_alr(bp);
3063 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3064 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3065 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3066 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3067 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3068 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3070 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3071 if (deasserted & (1 << index)) {
3072 group_mask = bp->attn_group[index];
3074 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3075 index, group_mask.sig[0], group_mask.sig[1],
3076 group_mask.sig[2], group_mask.sig[3]);
3078 bnx2x_attn_int_deasserted3(bp,
3079 attn.sig[3] & group_mask.sig[3]);
3080 bnx2x_attn_int_deasserted1(bp,
3081 attn.sig[1] & group_mask.sig[1]);
3082 bnx2x_attn_int_deasserted2(bp,
3083 attn.sig[2] & group_mask.sig[2]);
3084 bnx2x_attn_int_deasserted0(bp,
3085 attn.sig[0] & group_mask.sig[0]);
3087 if ((attn.sig[0] & group_mask.sig[0] &
3088 HW_PRTY_ASSERT_SET_0) ||
3089 (attn.sig[1] & group_mask.sig[1] &
3090 HW_PRTY_ASSERT_SET_1) ||
3091 (attn.sig[2] & group_mask.sig[2] &
3092 HW_PRTY_ASSERT_SET_2))
3093 BNX2X_ERR("FATAL HW block parity attention\n");
3097 bnx2x_release_alr(bp);
3099 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3102 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3104 REG_WR(bp, reg_addr, val);
3106 if (~bp->attn_state & deasserted)
3107 BNX2X_ERR("IGU ERROR\n");
3109 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3110 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3112 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3113 aeu_mask = REG_RD(bp, reg_addr);
3115 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3116 aeu_mask, deasserted);
3117 aeu_mask |= (deasserted & 0xff);
3118 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3120 REG_WR(bp, reg_addr, aeu_mask);
3121 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3123 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3124 bp->attn_state &= ~deasserted;
3125 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3128 static void bnx2x_attn_int(struct bnx2x *bp)
3130 /* read local copy of bits */
3131 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3133 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3135 u32 attn_state = bp->attn_state;
3137 /* look for changed bits */
3138 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3139 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3142 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3143 attn_bits, attn_ack, asserted, deasserted);
3145 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3146 BNX2X_ERR("BAD attention state\n");
3148 /* handle bits that were raised */
3150 bnx2x_attn_int_asserted(bp, asserted);
3153 bnx2x_attn_int_deasserted(bp, deasserted);
3156 static void bnx2x_sp_task(struct work_struct *work)
3158 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3162 /* Return here if interrupt is disabled */
3163 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3164 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3168 status = bnx2x_update_dsb_idx(bp);
3169 /* if (status == 0) */
3170 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3172 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3178 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3180 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3182 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3184 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3186 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3191 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3193 struct net_device *dev = dev_instance;
3194 struct bnx2x *bp = netdev_priv(dev);
3196 /* Return here if interrupt is disabled */
3197 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3198 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3202 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3204 #ifdef BNX2X_STOP_ON_ERROR
3205 if (unlikely(bp->panic))
3209 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3214 /* end of slow path */
3218 /****************************************************************************
3220 ****************************************************************************/
3222 /* sum[hi:lo] += add[hi:lo] */
3223 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3226 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3229 /* difference = minuend - subtrahend */
3230 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3232 if (m_lo < s_lo) { \
3234 d_hi = m_hi - s_hi; \
3236 /* we can 'loan' 1 */ \
3238 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3240 /* m_hi <= s_hi */ \
3245 /* m_lo >= s_lo */ \
3246 if (m_hi < s_hi) { \
3250 /* m_hi >= s_hi */ \
3251 d_hi = m_hi - s_hi; \
3252 d_lo = m_lo - s_lo; \
3257 #define UPDATE_STAT64(s, t) \
3259 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3260 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3261 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3262 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3263 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3264 pstats->mac_stx[1].t##_lo, diff.lo); \
3267 #define UPDATE_STAT64_NIG(s, t) \
3269 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3270 diff.lo, new->s##_lo, old->s##_lo); \
3271 ADD_64(estats->t##_hi, diff.hi, \
3272 estats->t##_lo, diff.lo); \
3275 /* sum[hi:lo] += add */
3276 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3279 s_hi += (s_lo < a) ? 1 : 0; \
3282 #define UPDATE_EXTEND_STAT(s) \
3284 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3285 pstats->mac_stx[1].s##_lo, \
3289 #define UPDATE_EXTEND_TSTAT(s, t) \
3291 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3292 old_tclient->s = tclient->s; \
3293 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3296 #define UPDATE_EXTEND_USTAT(s, t) \
3298 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3299 old_uclient->s = uclient->s; \
3300 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3303 #define UPDATE_EXTEND_XSTAT(s, t) \
3305 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3306 old_xclient->s = xclient->s; \
3307 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3310 /* minuend -= subtrahend */
3311 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3313 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3316 /* minuend[hi:lo] -= subtrahend */
3317 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3319 SUB_64(m_hi, 0, m_lo, s); \
3322 #define SUB_EXTEND_USTAT(s, t) \
3324 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3325 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3329 * General service functions
3332 static inline long bnx2x_hilo(u32 *hiref)
3334 u32 lo = *(hiref + 1);
3335 #if (BITS_PER_LONG == 64)
3338 return HILO_U64(hi, lo);
3345 * Init service functions
3348 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3350 if (!bp->stats_pending) {
3351 struct eth_query_ramrod_data ramrod_data = {0};
3354 ramrod_data.drv_counter = bp->stats_counter++;
3355 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3356 for_each_queue(bp, i)
3357 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3359 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3360 ((u32 *)&ramrod_data)[1],
3361 ((u32 *)&ramrod_data)[0], 0);
3363 /* stats ramrod has it's own slot on the spq */
3365 bp->stats_pending = 1;
3370 static void bnx2x_stats_init(struct bnx2x *bp)
3372 int port = BP_PORT(bp);
3375 bp->stats_pending = 0;
3376 bp->executer_idx = 0;
3377 bp->stats_counter = 0;
3381 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3383 bp->port.port_stx = 0;
3384 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3386 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3387 bp->port.old_nig_stats.brb_discard =
3388 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3389 bp->port.old_nig_stats.brb_truncate =
3390 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3391 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3392 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3393 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3394 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3396 /* function stats */
3397 for_each_queue(bp, i) {
3398 struct bnx2x_fastpath *fp = &bp->fp[i];
3400 memset(&fp->old_tclient, 0,
3401 sizeof(struct tstorm_per_client_stats));
3402 memset(&fp->old_uclient, 0,
3403 sizeof(struct ustorm_per_client_stats));
3404 memset(&fp->old_xclient, 0,
3405 sizeof(struct xstorm_per_client_stats));
3406 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3409 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3410 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3412 bp->stats_state = STATS_STATE_DISABLED;
3413 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3414 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3417 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3419 struct dmae_command *dmae = &bp->stats_dmae;
3420 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3422 *stats_comp = DMAE_COMP_VAL;
3423 if (CHIP_REV_IS_SLOW(bp))
3427 if (bp->executer_idx) {
3428 int loader_idx = PMF_DMAE_C(bp);
3430 memset(dmae, 0, sizeof(struct dmae_command));
3432 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3433 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3434 DMAE_CMD_DST_RESET |
3436 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3438 DMAE_CMD_ENDIANITY_DW_SWAP |
3440 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3442 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3443 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3444 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3445 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3446 sizeof(struct dmae_command) *
3447 (loader_idx + 1)) >> 2;
3448 dmae->dst_addr_hi = 0;
3449 dmae->len = sizeof(struct dmae_command) >> 2;
3452 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3453 dmae->comp_addr_hi = 0;
3457 bnx2x_post_dmae(bp, dmae, loader_idx);
3459 } else if (bp->func_stx) {
3461 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3465 static int bnx2x_stats_comp(struct bnx2x *bp)
3467 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3471 while (*stats_comp != DMAE_COMP_VAL) {
3473 BNX2X_ERR("timeout waiting for stats finished\n");
3483 * Statistics service functions
3486 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3488 struct dmae_command *dmae;
3490 int loader_idx = PMF_DMAE_C(bp);
3491 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3494 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3495 BNX2X_ERR("BUG!\n");
3499 bp->executer_idx = 0;
3501 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3503 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3505 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3507 DMAE_CMD_ENDIANITY_DW_SWAP |
3509 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3510 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3512 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3513 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3514 dmae->src_addr_lo = bp->port.port_stx >> 2;
3515 dmae->src_addr_hi = 0;
3516 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3517 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3518 dmae->len = DMAE_LEN32_RD_MAX;
3519 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3520 dmae->comp_addr_hi = 0;
3523 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3524 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3525 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3526 dmae->src_addr_hi = 0;
3527 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3528 DMAE_LEN32_RD_MAX * 4);
3529 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3530 DMAE_LEN32_RD_MAX * 4);
3531 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3532 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3533 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3534 dmae->comp_val = DMAE_COMP_VAL;
3537 bnx2x_hw_stats_post(bp);
3538 bnx2x_stats_comp(bp);
3541 static void bnx2x_port_stats_init(struct bnx2x *bp)
3543 struct dmae_command *dmae;
3544 int port = BP_PORT(bp);
3545 int vn = BP_E1HVN(bp);
3547 int loader_idx = PMF_DMAE_C(bp);
3549 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3552 if (!bp->link_vars.link_up || !bp->port.pmf) {
3553 BNX2X_ERR("BUG!\n");
3557 bp->executer_idx = 0;
3560 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3561 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3562 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3564 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3566 DMAE_CMD_ENDIANITY_DW_SWAP |
3568 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3569 (vn << DMAE_CMD_E1HVN_SHIFT));
3571 if (bp->port.port_stx) {
3573 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3574 dmae->opcode = opcode;
3575 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3576 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3577 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3578 dmae->dst_addr_hi = 0;
3579 dmae->len = sizeof(struct host_port_stats) >> 2;
3580 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3581 dmae->comp_addr_hi = 0;
3587 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3588 dmae->opcode = opcode;
3589 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3590 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3591 dmae->dst_addr_lo = bp->func_stx >> 2;
3592 dmae->dst_addr_hi = 0;
3593 dmae->len = sizeof(struct host_func_stats) >> 2;
3594 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3595 dmae->comp_addr_hi = 0;
3600 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3601 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3602 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3604 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3606 DMAE_CMD_ENDIANITY_DW_SWAP |
3608 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3609 (vn << DMAE_CMD_E1HVN_SHIFT));
3611 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3613 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3614 NIG_REG_INGRESS_BMAC0_MEM);
3616 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3617 BIGMAC_REGISTER_TX_STAT_GTBYT */
3618 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3619 dmae->opcode = opcode;
3620 dmae->src_addr_lo = (mac_addr +
3621 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3622 dmae->src_addr_hi = 0;
3623 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3624 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3625 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3626 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3627 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3628 dmae->comp_addr_hi = 0;
3631 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3632 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3633 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3634 dmae->opcode = opcode;
3635 dmae->src_addr_lo = (mac_addr +
3636 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3637 dmae->src_addr_hi = 0;
3638 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3639 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3640 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3641 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3642 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3643 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3644 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3645 dmae->comp_addr_hi = 0;
3648 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3650 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3652 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3653 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3654 dmae->opcode = opcode;
3655 dmae->src_addr_lo = (mac_addr +
3656 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3657 dmae->src_addr_hi = 0;
3658 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3659 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3660 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3661 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3662 dmae->comp_addr_hi = 0;
3665 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3666 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3667 dmae->opcode = opcode;
3668 dmae->src_addr_lo = (mac_addr +
3669 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3670 dmae->src_addr_hi = 0;
3671 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3672 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3673 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3674 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3676 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3677 dmae->comp_addr_hi = 0;
3680 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3681 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3682 dmae->opcode = opcode;
3683 dmae->src_addr_lo = (mac_addr +
3684 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3685 dmae->src_addr_hi = 0;
3686 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3687 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3688 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3689 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3690 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3691 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3692 dmae->comp_addr_hi = 0;
3697 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3698 dmae->opcode = opcode;
3699 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3700 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3701 dmae->src_addr_hi = 0;
3702 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3703 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3704 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3705 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3706 dmae->comp_addr_hi = 0;
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3712 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3715 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3716 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3717 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3718 dmae->len = (2*sizeof(u32)) >> 2;
3719 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3720 dmae->comp_addr_hi = 0;
3723 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3724 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3725 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3726 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3728 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3730 DMAE_CMD_ENDIANITY_DW_SWAP |
3732 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3733 (vn << DMAE_CMD_E1HVN_SHIFT));
3734 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3735 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3736 dmae->src_addr_hi = 0;
3737 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3738 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3739 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3740 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3741 dmae->len = (2*sizeof(u32)) >> 2;
3742 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3743 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3744 dmae->comp_val = DMAE_COMP_VAL;
3749 static void bnx2x_func_stats_init(struct bnx2x *bp)
3751 struct dmae_command *dmae = &bp->stats_dmae;
3752 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3755 if (!bp->func_stx) {
3756 BNX2X_ERR("BUG!\n");
3760 bp->executer_idx = 0;
3761 memset(dmae, 0, sizeof(struct dmae_command));
3763 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3764 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3765 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3767 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3769 DMAE_CMD_ENDIANITY_DW_SWAP |
3771 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3772 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3773 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3774 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3775 dmae->dst_addr_lo = bp->func_stx >> 2;
3776 dmae->dst_addr_hi = 0;
3777 dmae->len = sizeof(struct host_func_stats) >> 2;
3778 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3779 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3780 dmae->comp_val = DMAE_COMP_VAL;
3785 static void bnx2x_stats_start(struct bnx2x *bp)
3788 bnx2x_port_stats_init(bp);
3790 else if (bp->func_stx)
3791 bnx2x_func_stats_init(bp);
3793 bnx2x_hw_stats_post(bp);
3794 bnx2x_storm_stats_post(bp);
3797 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3799 bnx2x_stats_comp(bp);
3800 bnx2x_stats_pmf_update(bp);
3801 bnx2x_stats_start(bp);
3804 static void bnx2x_stats_restart(struct bnx2x *bp)
3806 bnx2x_stats_comp(bp);
3807 bnx2x_stats_start(bp);
3810 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3812 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3813 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3814 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3820 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3821 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3822 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3823 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3824 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3825 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3826 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3827 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3828 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3829 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3830 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3831 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3832 UPDATE_STAT64(tx_stat_gt127,
3833 tx_stat_etherstatspkts65octetsto127octets);
3834 UPDATE_STAT64(tx_stat_gt255,
3835 tx_stat_etherstatspkts128octetsto255octets);
3836 UPDATE_STAT64(tx_stat_gt511,
3837 tx_stat_etherstatspkts256octetsto511octets);
3838 UPDATE_STAT64(tx_stat_gt1023,
3839 tx_stat_etherstatspkts512octetsto1023octets);
3840 UPDATE_STAT64(tx_stat_gt1518,
3841 tx_stat_etherstatspkts1024octetsto1522octets);
3842 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3843 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3844 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3845 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3846 UPDATE_STAT64(tx_stat_gterr,
3847 tx_stat_dot3statsinternalmactransmiterrors);
3848 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3850 estats->pause_frames_received_hi =
3851 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3852 estats->pause_frames_received_lo =
3853 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3855 estats->pause_frames_sent_hi =
3856 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3857 estats->pause_frames_sent_lo =
3858 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3861 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3863 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3864 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3865 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3867 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3868 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3869 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3870 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3871 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3872 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3873 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3874 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3875 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3876 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3877 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3878 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3879 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3880 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3881 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3882 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3883 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3884 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3885 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3886 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3887 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3888 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3889 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3890 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3891 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3892 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3893 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3894 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3895 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3896 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3897 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3899 estats->pause_frames_received_hi =
3900 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3901 estats->pause_frames_received_lo =
3902 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3903 ADD_64(estats->pause_frames_received_hi,
3904 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3905 estats->pause_frames_received_lo,
3906 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3908 estats->pause_frames_sent_hi =
3909 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3910 estats->pause_frames_sent_lo =
3911 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3912 ADD_64(estats->pause_frames_sent_hi,
3913 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3914 estats->pause_frames_sent_lo,
3915 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3918 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3920 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3921 struct nig_stats *old = &(bp->port.old_nig_stats);
3922 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3923 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3930 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3931 bnx2x_bmac_stats_update(bp);
3933 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3934 bnx2x_emac_stats_update(bp);
3936 else { /* unreached */
3937 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3941 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3942 new->brb_discard - old->brb_discard);
3943 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3944 new->brb_truncate - old->brb_truncate);
3946 UPDATE_STAT64_NIG(egress_mac_pkt0,
3947 etherstatspkts1024octetsto1522octets);
3948 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3950 memcpy(old, new, sizeof(struct nig_stats));
3952 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3953 sizeof(struct mac_stx));
3954 estats->brb_drop_hi = pstats->brb_drop_hi;
3955 estats->brb_drop_lo = pstats->brb_drop_lo;
3957 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3959 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3960 if (nig_timer_max != estats->nig_timer_max) {
3961 estats->nig_timer_max = nig_timer_max;
3962 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3968 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3970 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3971 struct tstorm_per_port_stats *tport =
3972 &stats->tstorm_common.port_statistics;
3973 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3974 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3977 memset(&(fstats->total_bytes_received_hi), 0,
3978 sizeof(struct host_func_stats) - 2*sizeof(u32));
3979 estats->error_bytes_received_hi = 0;
3980 estats->error_bytes_received_lo = 0;
3981 estats->etherstatsoverrsizepkts_hi = 0;
3982 estats->etherstatsoverrsizepkts_lo = 0;
3983 estats->no_buff_discard_hi = 0;
3984 estats->no_buff_discard_lo = 0;
3986 for_each_rx_queue(bp, i) {
3987 struct bnx2x_fastpath *fp = &bp->fp[i];
3988 int cl_id = fp->cl_id;
3989 struct tstorm_per_client_stats *tclient =
3990 &stats->tstorm_common.client_statistics[cl_id];
3991 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3992 struct ustorm_per_client_stats *uclient =
3993 &stats->ustorm_common.client_statistics[cl_id];
3994 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3995 struct xstorm_per_client_stats *xclient =
3996 &stats->xstorm_common.client_statistics[cl_id];
3997 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3998 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4001 /* are storm stats valid? */
4002 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4003 bp->stats_counter) {
4004 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4005 " xstorm counter (%d) != stats_counter (%d)\n",
4006 i, xclient->stats_counter, bp->stats_counter);
4009 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4010 bp->stats_counter) {
4011 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4012 " tstorm counter (%d) != stats_counter (%d)\n",
4013 i, tclient->stats_counter, bp->stats_counter);
4016 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4017 bp->stats_counter) {
4018 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4019 " ustorm counter (%d) != stats_counter (%d)\n",
4020 i, uclient->stats_counter, bp->stats_counter);
4024 qstats->total_bytes_received_hi =
4025 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4026 qstats->total_bytes_received_lo =
4027 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4029 ADD_64(qstats->total_bytes_received_hi,
4030 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4031 qstats->total_bytes_received_lo,
4032 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4034 ADD_64(qstats->total_bytes_received_hi,
4035 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4036 qstats->total_bytes_received_lo,
4037 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4039 qstats->valid_bytes_received_hi =
4040 qstats->total_bytes_received_hi;
4041 qstats->valid_bytes_received_lo =
4042 qstats->total_bytes_received_lo;
4044 qstats->error_bytes_received_hi =
4045 le32_to_cpu(tclient->rcv_error_bytes.hi);
4046 qstats->error_bytes_received_lo =
4047 le32_to_cpu(tclient->rcv_error_bytes.lo);
4049 ADD_64(qstats->total_bytes_received_hi,
4050 qstats->error_bytes_received_hi,
4051 qstats->total_bytes_received_lo,
4052 qstats->error_bytes_received_lo);
4054 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4055 total_unicast_packets_received);
4056 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4057 total_multicast_packets_received);
4058 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4059 total_broadcast_packets_received);
4060 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4061 etherstatsoverrsizepkts);
4062 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4064 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4065 total_unicast_packets_received);
4066 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4067 total_multicast_packets_received);
4068 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4069 total_broadcast_packets_received);
4070 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4071 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4072 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4074 qstats->total_bytes_transmitted_hi =
4075 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4076 qstats->total_bytes_transmitted_lo =
4077 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4079 ADD_64(qstats->total_bytes_transmitted_hi,
4080 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4081 qstats->total_bytes_transmitted_lo,
4082 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4084 ADD_64(qstats->total_bytes_transmitted_hi,
4085 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4086 qstats->total_bytes_transmitted_lo,
4087 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4089 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4090 total_unicast_packets_transmitted);
4091 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4092 total_multicast_packets_transmitted);
4093 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4094 total_broadcast_packets_transmitted);
4096 old_tclient->checksum_discard = tclient->checksum_discard;
4097 old_tclient->ttl0_discard = tclient->ttl0_discard;
4099 ADD_64(fstats->total_bytes_received_hi,
4100 qstats->total_bytes_received_hi,
4101 fstats->total_bytes_received_lo,
4102 qstats->total_bytes_received_lo);
4103 ADD_64(fstats->total_bytes_transmitted_hi,
4104 qstats->total_bytes_transmitted_hi,
4105 fstats->total_bytes_transmitted_lo,
4106 qstats->total_bytes_transmitted_lo);
4107 ADD_64(fstats->total_unicast_packets_received_hi,
4108 qstats->total_unicast_packets_received_hi,
4109 fstats->total_unicast_packets_received_lo,
4110 qstats->total_unicast_packets_received_lo);
4111 ADD_64(fstats->total_multicast_packets_received_hi,
4112 qstats->total_multicast_packets_received_hi,
4113 fstats->total_multicast_packets_received_lo,
4114 qstats->total_multicast_packets_received_lo);
4115 ADD_64(fstats->total_broadcast_packets_received_hi,
4116 qstats->total_broadcast_packets_received_hi,
4117 fstats->total_broadcast_packets_received_lo,
4118 qstats->total_broadcast_packets_received_lo);
4119 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4120 qstats->total_unicast_packets_transmitted_hi,
4121 fstats->total_unicast_packets_transmitted_lo,
4122 qstats->total_unicast_packets_transmitted_lo);
4123 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4124 qstats->total_multicast_packets_transmitted_hi,
4125 fstats->total_multicast_packets_transmitted_lo,
4126 qstats->total_multicast_packets_transmitted_lo);
4127 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4128 qstats->total_broadcast_packets_transmitted_hi,
4129 fstats->total_broadcast_packets_transmitted_lo,
4130 qstats->total_broadcast_packets_transmitted_lo);
4131 ADD_64(fstats->valid_bytes_received_hi,
4132 qstats->valid_bytes_received_hi,
4133 fstats->valid_bytes_received_lo,
4134 qstats->valid_bytes_received_lo);
4136 ADD_64(estats->error_bytes_received_hi,
4137 qstats->error_bytes_received_hi,
4138 estats->error_bytes_received_lo,
4139 qstats->error_bytes_received_lo);
4140 ADD_64(estats->etherstatsoverrsizepkts_hi,
4141 qstats->etherstatsoverrsizepkts_hi,
4142 estats->etherstatsoverrsizepkts_lo,
4143 qstats->etherstatsoverrsizepkts_lo);
4144 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4145 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4148 ADD_64(fstats->total_bytes_received_hi,
4149 estats->rx_stat_ifhcinbadoctets_hi,
4150 fstats->total_bytes_received_lo,
4151 estats->rx_stat_ifhcinbadoctets_lo);
4153 memcpy(estats, &(fstats->total_bytes_received_hi),
4154 sizeof(struct host_func_stats) - 2*sizeof(u32));
4156 ADD_64(estats->etherstatsoverrsizepkts_hi,
4157 estats->rx_stat_dot3statsframestoolong_hi,
4158 estats->etherstatsoverrsizepkts_lo,
4159 estats->rx_stat_dot3statsframestoolong_lo);
4160 ADD_64(estats->error_bytes_received_hi,
4161 estats->rx_stat_ifhcinbadoctets_hi,
4162 estats->error_bytes_received_lo,
4163 estats->rx_stat_ifhcinbadoctets_lo);
4166 estats->mac_filter_discard =
4167 le32_to_cpu(tport->mac_filter_discard);
4168 estats->xxoverflow_discard =
4169 le32_to_cpu(tport->xxoverflow_discard);
4170 estats->brb_truncate_discard =
4171 le32_to_cpu(tport->brb_truncate_discard);
4172 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4175 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4177 bp->stats_pending = 0;
4182 static void bnx2x_net_stats_update(struct bnx2x *bp)
4184 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4185 struct net_device_stats *nstats = &bp->dev->stats;
4188 nstats->rx_packets =
4189 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4190 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4191 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4193 nstats->tx_packets =
4194 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4195 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4196 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4198 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4200 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4202 nstats->rx_dropped = estats->mac_discard;
4203 for_each_rx_queue(bp, i)
4204 nstats->rx_dropped +=
4205 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4207 nstats->tx_dropped = 0;
4210 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4212 nstats->collisions =
4213 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4215 nstats->rx_length_errors =
4216 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4217 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4218 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4219 bnx2x_hilo(&estats->brb_truncate_hi);
4220 nstats->rx_crc_errors =
4221 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4222 nstats->rx_frame_errors =
4223 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4224 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4225 nstats->rx_missed_errors = estats->xxoverflow_discard;
4227 nstats->rx_errors = nstats->rx_length_errors +
4228 nstats->rx_over_errors +
4229 nstats->rx_crc_errors +
4230 nstats->rx_frame_errors +
4231 nstats->rx_fifo_errors +
4232 nstats->rx_missed_errors;
4234 nstats->tx_aborted_errors =
4235 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4236 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4237 nstats->tx_carrier_errors =
4238 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4239 nstats->tx_fifo_errors = 0;
4240 nstats->tx_heartbeat_errors = 0;
4241 nstats->tx_window_errors = 0;
4243 nstats->tx_errors = nstats->tx_aborted_errors +
4244 nstats->tx_carrier_errors +
4245 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4248 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4250 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4253 estats->driver_xoff = 0;
4254 estats->rx_err_discard_pkt = 0;
4255 estats->rx_skb_alloc_failed = 0;
4256 estats->hw_csum_err = 0;
4257 for_each_rx_queue(bp, i) {
4258 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4260 estats->driver_xoff += qstats->driver_xoff;
4261 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4262 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4263 estats->hw_csum_err += qstats->hw_csum_err;
4267 static void bnx2x_stats_update(struct bnx2x *bp)
4269 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4271 if (*stats_comp != DMAE_COMP_VAL)
4275 bnx2x_hw_stats_update(bp);
4277 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4278 BNX2X_ERR("storm stats were not updated for 3 times\n");
4283 bnx2x_net_stats_update(bp);
4284 bnx2x_drv_stats_update(bp);
4286 if (bp->msglevel & NETIF_MSG_TIMER) {
4287 struct bnx2x_fastpath *fp0_rx = bp->fp;
4288 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4289 struct tstorm_per_client_stats *old_tclient =
4290 &bp->fp->old_tclient;
4291 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4292 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4293 struct net_device_stats *nstats = &bp->dev->stats;
4296 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4297 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4299 bnx2x_tx_avail(fp0_tx),
4300 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4301 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4303 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4304 fp0_rx->rx_comp_cons),
4305 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4306 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4307 "brb truncate %u\n",
4308 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4309 qstats->driver_xoff,
4310 estats->brb_drop_lo, estats->brb_truncate_lo);
4311 printk(KERN_DEBUG "tstats: checksum_discard %u "
4312 "packets_too_big_discard %lu no_buff_discard %lu "
4313 "mac_discard %u mac_filter_discard %u "
4314 "xxovrflow_discard %u brb_truncate_discard %u "
4315 "ttl0_discard %u\n",
4316 le32_to_cpu(old_tclient->checksum_discard),
4317 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4318 bnx2x_hilo(&qstats->no_buff_discard_hi),
4319 estats->mac_discard, estats->mac_filter_discard,
4320 estats->xxoverflow_discard, estats->brb_truncate_discard,
4321 le32_to_cpu(old_tclient->ttl0_discard));
4323 for_each_queue(bp, i) {
4324 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4325 bnx2x_fp(bp, i, tx_pkt),
4326 bnx2x_fp(bp, i, rx_pkt),
4327 bnx2x_fp(bp, i, rx_calls));
4331 bnx2x_hw_stats_post(bp);
4332 bnx2x_storm_stats_post(bp);
4335 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4337 struct dmae_command *dmae;
4339 int loader_idx = PMF_DMAE_C(bp);
4340 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4342 bp->executer_idx = 0;
4344 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4346 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4348 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4350 DMAE_CMD_ENDIANITY_DW_SWAP |
4352 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4353 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4355 if (bp->port.port_stx) {
4357 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4359 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4361 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4362 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4363 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4364 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4365 dmae->dst_addr_hi = 0;
4366 dmae->len = sizeof(struct host_port_stats) >> 2;
4368 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4369 dmae->comp_addr_hi = 0;
4372 dmae->comp_addr_lo =
4373 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4374 dmae->comp_addr_hi =
4375 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4376 dmae->comp_val = DMAE_COMP_VAL;
4384 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4385 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4386 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4387 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4388 dmae->dst_addr_lo = bp->func_stx >> 2;
4389 dmae->dst_addr_hi = 0;
4390 dmae->len = sizeof(struct host_func_stats) >> 2;
4391 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4392 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4393 dmae->comp_val = DMAE_COMP_VAL;
4399 static void bnx2x_stats_stop(struct bnx2x *bp)
4403 bnx2x_stats_comp(bp);
4406 update = (bnx2x_hw_stats_update(bp) == 0);
4408 update |= (bnx2x_storm_stats_update(bp) == 0);
4411 bnx2x_net_stats_update(bp);
4414 bnx2x_port_stats_stop(bp);
4416 bnx2x_hw_stats_post(bp);
4417 bnx2x_stats_comp(bp);
4421 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4425 static const struct {
4426 void (*action)(struct bnx2x *bp);
4427 enum bnx2x_stats_state next_state;
4428 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4431 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4432 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4433 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4434 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4437 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4438 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4439 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4440 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4444 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4446 enum bnx2x_stats_state state = bp->stats_state;
4448 bnx2x_stats_stm[state][event].action(bp);
4449 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4451 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4452 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4453 state, event, bp->stats_state);
4456 static void bnx2x_timer(unsigned long data)
4458 struct bnx2x *bp = (struct bnx2x *) data;
4460 if (!netif_running(bp->dev))
4463 if (atomic_read(&bp->intr_sem) != 0)
4467 struct bnx2x_fastpath *fp = &bp->fp[0];
4471 rc = bnx2x_rx_int(fp, 1000);
4474 if (!BP_NOMCP(bp)) {
4475 int func = BP_FUNC(bp);
4479 ++bp->fw_drv_pulse_wr_seq;
4480 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4481 /* TBD - add SYSTEM_TIME */
4482 drv_pulse = bp->fw_drv_pulse_wr_seq;
4483 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4485 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4486 MCP_PULSE_SEQ_MASK);
4487 /* The delta between driver pulse and mcp response
4488 * should be 1 (before mcp response) or 0 (after mcp response)
4490 if ((drv_pulse != mcp_pulse) &&
4491 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4492 /* someone lost a heartbeat... */
4493 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4494 drv_pulse, mcp_pulse);
4498 if ((bp->state == BNX2X_STATE_OPEN) ||
4499 (bp->state == BNX2X_STATE_DISABLED))
4500 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4503 mod_timer(&bp->timer, jiffies + bp->current_interval);
4506 /* end of Statistics */
4511 * nic init service functions
4514 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4516 int port = BP_PORT(bp);
4519 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4520 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4521 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4522 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4523 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4524 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4527 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4528 dma_addr_t mapping, int sb_id)
4530 int port = BP_PORT(bp);
4531 int func = BP_FUNC(bp);
4536 section = ((u64)mapping) + offsetof(struct host_status_block,
4538 sb->u_status_block.status_block_id = sb_id;
4540 REG_WR(bp, BAR_CSTRORM_INTMEM +
4541 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4542 REG_WR(bp, BAR_CSTRORM_INTMEM +
4543 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4545 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4546 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4548 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4549 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4550 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4553 section = ((u64)mapping) + offsetof(struct host_status_block,
4555 sb->c_status_block.status_block_id = sb_id;
4557 REG_WR(bp, BAR_CSTRORM_INTMEM +
4558 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4559 REG_WR(bp, BAR_CSTRORM_INTMEM +
4560 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4562 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4563 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4565 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4566 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4567 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4569 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4572 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4574 int func = BP_FUNC(bp);
4576 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4577 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4578 sizeof(struct tstorm_def_status_block)/4);
4579 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4580 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4581 sizeof(struct cstorm_def_status_block_u)/4);
4582 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4583 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4584 sizeof(struct cstorm_def_status_block_c)/4);
4585 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4586 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4587 sizeof(struct xstorm_def_status_block)/4);
4590 static void bnx2x_init_def_sb(struct bnx2x *bp,
4591 struct host_def_status_block *def_sb,
4592 dma_addr_t mapping, int sb_id)
4594 int port = BP_PORT(bp);
4595 int func = BP_FUNC(bp);
4596 int index, val, reg_offset;
4600 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4601 atten_status_block);
4602 def_sb->atten_status_block.status_block_id = sb_id;
4606 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4607 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4609 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4610 bp->attn_group[index].sig[0] = REG_RD(bp,
4611 reg_offset + 0x10*index);
4612 bp->attn_group[index].sig[1] = REG_RD(bp,
4613 reg_offset + 0x4 + 0x10*index);
4614 bp->attn_group[index].sig[2] = REG_RD(bp,
4615 reg_offset + 0x8 + 0x10*index);
4616 bp->attn_group[index].sig[3] = REG_RD(bp,
4617 reg_offset + 0xc + 0x10*index);
4620 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4621 HC_REG_ATTN_MSG0_ADDR_L);
4623 REG_WR(bp, reg_offset, U64_LO(section));
4624 REG_WR(bp, reg_offset + 4, U64_HI(section));
4626 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4628 val = REG_RD(bp, reg_offset);
4630 REG_WR(bp, reg_offset, val);
4633 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4634 u_def_status_block);
4635 def_sb->u_def_status_block.status_block_id = sb_id;
4637 REG_WR(bp, BAR_CSTRORM_INTMEM +
4638 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4639 REG_WR(bp, BAR_CSTRORM_INTMEM +
4640 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4642 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4643 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4645 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4646 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4647 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4650 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4651 c_def_status_block);
4652 def_sb->c_def_status_block.status_block_id = sb_id;
4654 REG_WR(bp, BAR_CSTRORM_INTMEM +
4655 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4656 REG_WR(bp, BAR_CSTRORM_INTMEM +
4657 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4659 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4660 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4662 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4663 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4664 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4667 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4668 t_def_status_block);
4669 def_sb->t_def_status_block.status_block_id = sb_id;
4671 REG_WR(bp, BAR_TSTRORM_INTMEM +
4672 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4673 REG_WR(bp, BAR_TSTRORM_INTMEM +
4674 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4676 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4677 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4679 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4680 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4681 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4684 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4685 x_def_status_block);
4686 def_sb->x_def_status_block.status_block_id = sb_id;
4688 REG_WR(bp, BAR_XSTRORM_INTMEM +
4689 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4690 REG_WR(bp, BAR_XSTRORM_INTMEM +
4691 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4693 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4694 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4696 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4697 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4698 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4700 bp->stats_pending = 0;
4701 bp->set_mac_pending = 0;
4703 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4706 static void bnx2x_update_coalesce(struct bnx2x *bp)
4708 int port = BP_PORT(bp);
4711 for_each_queue(bp, i) {
4712 int sb_id = bp->fp[i].sb_id;
4714 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4715 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4716 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4717 U_SB_ETH_RX_CQ_INDEX),
4719 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4720 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4721 U_SB_ETH_RX_CQ_INDEX),
4722 (bp->rx_ticks/12) ? 0 : 1);
4724 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4725 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4726 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4727 C_SB_ETH_TX_CQ_INDEX),
4729 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4730 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4731 C_SB_ETH_TX_CQ_INDEX),
4732 (bp->tx_ticks/12) ? 0 : 1);
4736 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4737 struct bnx2x_fastpath *fp, int last)
4741 for (i = 0; i < last; i++) {
4742 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4743 struct sk_buff *skb = rx_buf->skb;
4746 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4750 if (fp->tpa_state[i] == BNX2X_TPA_START)
4751 pci_unmap_single(bp->pdev,
4752 pci_unmap_addr(rx_buf, mapping),
4753 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4760 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4762 int func = BP_FUNC(bp);
4763 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4764 ETH_MAX_AGGREGATION_QUEUES_E1H;
4765 u16 ring_prod, cqe_ring_prod;
4768 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4770 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4772 if (bp->flags & TPA_ENABLE_FLAG) {
4774 for_each_rx_queue(bp, j) {
4775 struct bnx2x_fastpath *fp = &bp->fp[j];
4777 for (i = 0; i < max_agg_queues; i++) {
4778 fp->tpa_pool[i].skb =
4779 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4780 if (!fp->tpa_pool[i].skb) {
4781 BNX2X_ERR("Failed to allocate TPA "
4782 "skb pool for queue[%d] - "
4783 "disabling TPA on this "
4785 bnx2x_free_tpa_pool(bp, fp, i);
4786 fp->disable_tpa = 1;
4789 pci_unmap_addr_set((struct sw_rx_bd *)
4790 &bp->fp->tpa_pool[i],
4792 fp->tpa_state[i] = BNX2X_TPA_STOP;
4797 for_each_rx_queue(bp, j) {
4798 struct bnx2x_fastpath *fp = &bp->fp[j];
4801 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4802 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4804 /* Mark queue as Rx */
4805 fp->is_rx_queue = 1;
4807 /* "next page" elements initialization */
4809 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4810 struct eth_rx_sge *sge;
4812 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4814 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4815 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4817 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4818 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4821 bnx2x_init_sge_ring_bit_mask(fp);
4824 for (i = 1; i <= NUM_RX_RINGS; i++) {
4825 struct eth_rx_bd *rx_bd;
4827 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4829 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4830 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4832 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4833 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4837 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4838 struct eth_rx_cqe_next_page *nextpg;
4840 nextpg = (struct eth_rx_cqe_next_page *)
4841 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4843 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4844 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4846 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4847 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4850 /* Allocate SGEs and initialize the ring elements */
4851 for (i = 0, ring_prod = 0;
4852 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4854 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4855 BNX2X_ERR("was only able to allocate "
4857 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4858 /* Cleanup already allocated elements */
4859 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4860 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4861 fp->disable_tpa = 1;
4865 ring_prod = NEXT_SGE_IDX(ring_prod);
4867 fp->rx_sge_prod = ring_prod;
4869 /* Allocate BDs and initialize BD ring */
4870 fp->rx_comp_cons = 0;
4871 cqe_ring_prod = ring_prod = 0;
4872 for (i = 0; i < bp->rx_ring_size; i++) {
4873 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4874 BNX2X_ERR("was only able to allocate "
4875 "%d rx skbs on queue[%d]\n", i, j);
4876 fp->eth_q_stats.rx_skb_alloc_failed++;
4879 ring_prod = NEXT_RX_IDX(ring_prod);
4880 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4881 WARN_ON(ring_prod <= i);
4884 fp->rx_bd_prod = ring_prod;
4885 /* must not have more available CQEs than BDs */
4886 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4888 fp->rx_pkt = fp->rx_calls = 0;
4891 * this will generate an interrupt (to the TSTORM)
4892 * must only be done after chip is initialized
4894 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4899 REG_WR(bp, BAR_USTRORM_INTMEM +
4900 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4901 U64_LO(fp->rx_comp_mapping));
4902 REG_WR(bp, BAR_USTRORM_INTMEM +
4903 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4904 U64_HI(fp->rx_comp_mapping));
4908 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4912 for_each_tx_queue(bp, j) {
4913 struct bnx2x_fastpath *fp = &bp->fp[j];
4915 for (i = 1; i <= NUM_TX_RINGS; i++) {
4916 struct eth_tx_next_bd *tx_next_bd =
4917 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
4919 tx_next_bd->addr_hi =
4920 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4921 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4922 tx_next_bd->addr_lo =
4923 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4924 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4927 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
4928 fp->tx_db.data.zero_fill1 = 0;
4929 fp->tx_db.data.prod = 0;
4931 fp->tx_pkt_prod = 0;
4932 fp->tx_pkt_cons = 0;
4935 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4940 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4942 int func = BP_FUNC(bp);
4944 spin_lock_init(&bp->spq_lock);
4946 bp->spq_left = MAX_SPQ_PENDING;
4947 bp->spq_prod_idx = 0;
4948 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4949 bp->spq_prod_bd = bp->spq;
4950 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4952 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4953 U64_LO(bp->spq_mapping));
4955 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4956 U64_HI(bp->spq_mapping));
4958 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4962 static void bnx2x_init_context(struct bnx2x *bp)
4966 for_each_rx_queue(bp, i) {
4967 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4968 struct bnx2x_fastpath *fp = &bp->fp[i];
4969 u8 cl_id = fp->cl_id;
4971 context->ustorm_st_context.common.sb_index_numbers =
4972 BNX2X_RX_SB_INDEX_NUM;
4973 context->ustorm_st_context.common.clientId = cl_id;
4974 context->ustorm_st_context.common.status_block_id = fp->sb_id;
4975 context->ustorm_st_context.common.flags =
4976 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4977 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4978 context->ustorm_st_context.common.statistics_counter_id =
4980 context->ustorm_st_context.common.mc_alignment_log_size =
4981 BNX2X_RX_ALIGN_SHIFT;
4982 context->ustorm_st_context.common.bd_buff_size =
4984 context->ustorm_st_context.common.bd_page_base_hi =
4985 U64_HI(fp->rx_desc_mapping);
4986 context->ustorm_st_context.common.bd_page_base_lo =
4987 U64_LO(fp->rx_desc_mapping);
4988 if (!fp->disable_tpa) {
4989 context->ustorm_st_context.common.flags |=
4990 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
4991 context->ustorm_st_context.common.sge_buff_size =
4992 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4994 context->ustorm_st_context.common.sge_page_base_hi =
4995 U64_HI(fp->rx_sge_mapping);
4996 context->ustorm_st_context.common.sge_page_base_lo =
4997 U64_LO(fp->rx_sge_mapping);
4999 context->ustorm_st_context.common.max_sges_for_packet =
5000 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5001 context->ustorm_st_context.common.max_sges_for_packet =
5002 ((context->ustorm_st_context.common.
5003 max_sges_for_packet + PAGES_PER_SGE - 1) &
5004 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5007 context->ustorm_ag_context.cdu_usage =
5008 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5009 CDU_REGION_NUMBER_UCM_AG,
5010 ETH_CONNECTION_TYPE);
5012 context->xstorm_ag_context.cdu_reserved =
5013 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5014 CDU_REGION_NUMBER_XCM_AG,
5015 ETH_CONNECTION_TYPE);
5018 for_each_tx_queue(bp, i) {
5019 struct bnx2x_fastpath *fp = &bp->fp[i];
5020 struct eth_context *context =
5021 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5023 context->cstorm_st_context.sb_index_number =
5024 C_SB_ETH_TX_CQ_INDEX;
5025 context->cstorm_st_context.status_block_id = fp->sb_id;
5027 context->xstorm_st_context.tx_bd_page_base_hi =
5028 U64_HI(fp->tx_desc_mapping);
5029 context->xstorm_st_context.tx_bd_page_base_lo =
5030 U64_LO(fp->tx_desc_mapping);
5031 context->xstorm_st_context.statistics_data = (fp->cl_id |
5032 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5036 static void bnx2x_init_ind_table(struct bnx2x *bp)
5038 int func = BP_FUNC(bp);
5041 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5045 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5046 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5047 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5048 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5049 bp->fp->cl_id + (i % bp->num_rx_queues));
5052 static void bnx2x_set_client_config(struct bnx2x *bp)
5054 struct tstorm_eth_client_config tstorm_client = {0};
5055 int port = BP_PORT(bp);
5058 tstorm_client.mtu = bp->dev->mtu;
5059 tstorm_client.config_flags =
5060 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5061 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5063 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5064 tstorm_client.config_flags |=
5065 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5066 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5070 for_each_queue(bp, i) {
5071 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5073 REG_WR(bp, BAR_TSTRORM_INTMEM +
5074 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5075 ((u32 *)&tstorm_client)[0]);
5076 REG_WR(bp, BAR_TSTRORM_INTMEM +
5077 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5078 ((u32 *)&tstorm_client)[1]);
5081 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5082 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5085 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5087 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5088 int mode = bp->rx_mode;
5089 int mask = (1 << BP_L_ID(bp));
5090 int func = BP_FUNC(bp);
5091 int port = BP_PORT(bp);
5093 /* All but management unicast packets should pass to the host as well */
5095 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5096 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5097 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5098 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5100 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5103 case BNX2X_RX_MODE_NONE: /* no Rx */
5104 tstorm_mac_filter.ucast_drop_all = mask;
5105 tstorm_mac_filter.mcast_drop_all = mask;
5106 tstorm_mac_filter.bcast_drop_all = mask;
5109 case BNX2X_RX_MODE_NORMAL:
5110 tstorm_mac_filter.bcast_accept_all = mask;
5113 case BNX2X_RX_MODE_ALLMULTI:
5114 tstorm_mac_filter.mcast_accept_all = mask;
5115 tstorm_mac_filter.bcast_accept_all = mask;
5118 case BNX2X_RX_MODE_PROMISC:
5119 tstorm_mac_filter.ucast_accept_all = mask;
5120 tstorm_mac_filter.mcast_accept_all = mask;
5121 tstorm_mac_filter.bcast_accept_all = mask;
5122 /* pass management unicast packets as well */
5123 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5127 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5132 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5135 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5136 REG_WR(bp, BAR_TSTRORM_INTMEM +
5137 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5138 ((u32 *)&tstorm_mac_filter)[i]);
5140 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5141 ((u32 *)&tstorm_mac_filter)[i]); */
5144 if (mode != BNX2X_RX_MODE_NONE)
5145 bnx2x_set_client_config(bp);
5148 static void bnx2x_init_internal_common(struct bnx2x *bp)
5152 /* Zero this manually as its initialization is
5153 currently missing in the initTool */
5154 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5155 REG_WR(bp, BAR_USTRORM_INTMEM +
5156 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5159 static void bnx2x_init_internal_port(struct bnx2x *bp)
5161 int port = BP_PORT(bp);
5164 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5166 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5167 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5168 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5171 static void bnx2x_init_internal_func(struct bnx2x *bp)
5173 struct tstorm_eth_function_common_config tstorm_config = {0};
5174 struct stats_indication_flags stats_flags = {0};
5175 int port = BP_PORT(bp);
5176 int func = BP_FUNC(bp);
5182 tstorm_config.config_flags = MULTI_FLAGS(bp);
5183 tstorm_config.rss_result_mask = MULTI_MASK;
5186 /* Enable TPA if needed */
5187 if (bp->flags & TPA_ENABLE_FLAG)
5188 tstorm_config.config_flags |=
5189 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5192 tstorm_config.config_flags |=
5193 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5195 tstorm_config.leading_client_id = BP_L_ID(bp);
5197 REG_WR(bp, BAR_TSTRORM_INTMEM +
5198 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5199 (*(u32 *)&tstorm_config));
5201 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5202 bnx2x_set_storm_rx_mode(bp);
5204 for_each_queue(bp, i) {
5205 u8 cl_id = bp->fp[i].cl_id;
5207 /* reset xstorm per client statistics */
5208 offset = BAR_XSTRORM_INTMEM +
5209 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5211 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5212 REG_WR(bp, offset + j*4, 0);
5214 /* reset tstorm per client statistics */
5215 offset = BAR_TSTRORM_INTMEM +
5216 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5218 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5219 REG_WR(bp, offset + j*4, 0);
5221 /* reset ustorm per client statistics */
5222 offset = BAR_USTRORM_INTMEM +
5223 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5225 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5226 REG_WR(bp, offset + j*4, 0);
5229 /* Init statistics related context */
5230 stats_flags.collect_eth = 1;
5232 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5233 ((u32 *)&stats_flags)[0]);
5234 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5235 ((u32 *)&stats_flags)[1]);
5237 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5238 ((u32 *)&stats_flags)[0]);
5239 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5240 ((u32 *)&stats_flags)[1]);
5242 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5243 ((u32 *)&stats_flags)[0]);
5244 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5245 ((u32 *)&stats_flags)[1]);
5247 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5248 ((u32 *)&stats_flags)[0]);
5249 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5250 ((u32 *)&stats_flags)[1]);
5252 REG_WR(bp, BAR_XSTRORM_INTMEM +
5253 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5254 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5255 REG_WR(bp, BAR_XSTRORM_INTMEM +
5256 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5257 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5259 REG_WR(bp, BAR_TSTRORM_INTMEM +
5260 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5261 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5262 REG_WR(bp, BAR_TSTRORM_INTMEM +
5263 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5264 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5266 REG_WR(bp, BAR_USTRORM_INTMEM +
5267 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5268 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5269 REG_WR(bp, BAR_USTRORM_INTMEM +
5270 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5271 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5273 if (CHIP_IS_E1H(bp)) {
5274 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5276 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5278 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5280 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5283 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5287 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5289 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5290 SGE_PAGE_SIZE * PAGES_PER_SGE),
5292 for_each_rx_queue(bp, i) {
5293 struct bnx2x_fastpath *fp = &bp->fp[i];
5295 REG_WR(bp, BAR_USTRORM_INTMEM +
5296 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5297 U64_LO(fp->rx_comp_mapping));
5298 REG_WR(bp, BAR_USTRORM_INTMEM +
5299 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5300 U64_HI(fp->rx_comp_mapping));
5303 REG_WR(bp, BAR_USTRORM_INTMEM +
5304 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5305 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5306 REG_WR(bp, BAR_USTRORM_INTMEM +
5307 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5308 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5310 REG_WR16(bp, BAR_USTRORM_INTMEM +
5311 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5315 /* dropless flow control */
5316 if (CHIP_IS_E1H(bp)) {
5317 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5319 rx_pause.bd_thr_low = 250;
5320 rx_pause.cqe_thr_low = 250;
5322 rx_pause.sge_thr_low = 0;
5323 rx_pause.bd_thr_high = 350;
5324 rx_pause.cqe_thr_high = 350;
5325 rx_pause.sge_thr_high = 0;
5327 for_each_rx_queue(bp, i) {
5328 struct bnx2x_fastpath *fp = &bp->fp[i];
5330 if (!fp->disable_tpa) {
5331 rx_pause.sge_thr_low = 150;
5332 rx_pause.sge_thr_high = 250;
5336 offset = BAR_USTRORM_INTMEM +
5337 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5340 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5342 REG_WR(bp, offset + j*4,
5343 ((u32 *)&rx_pause)[j]);
5347 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5349 /* Init rate shaping and fairness contexts */
5353 /* During init there is no active link
5354 Until link is up, set link rate to 10Gbps */
5355 bp->link_vars.line_speed = SPEED_10000;
5356 bnx2x_init_port_minmax(bp);
5358 bnx2x_calc_vn_weight_sum(bp);
5360 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5361 bnx2x_init_vn_minmax(bp, 2*vn + port);
5363 /* Enable rate shaping and fairness */
5364 bp->cmng.flags.cmng_enables =
5365 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5366 if (bp->vn_weight_sum)
5367 bp->cmng.flags.cmng_enables |=
5368 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5370 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5371 " fairness will be disabled\n");
5373 /* rate shaping and fairness are disabled */
5375 "single function mode minmax will be disabled\n");
5379 /* Store it to internal memory */
5381 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5382 REG_WR(bp, BAR_XSTRORM_INTMEM +
5383 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5384 ((u32 *)(&bp->cmng))[i]);
5387 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5389 switch (load_code) {
5390 case FW_MSG_CODE_DRV_LOAD_COMMON:
5391 bnx2x_init_internal_common(bp);
5394 case FW_MSG_CODE_DRV_LOAD_PORT:
5395 bnx2x_init_internal_port(bp);
5398 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5399 bnx2x_init_internal_func(bp);
5403 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5408 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5412 for_each_queue(bp, i) {
5413 struct bnx2x_fastpath *fp = &bp->fp[i];
5416 fp->state = BNX2X_FP_STATE_CLOSED;
5418 fp->cl_id = BP_L_ID(bp) + i;
5419 fp->sb_id = fp->cl_id;
5420 /* Suitable Rx and Tx SBs are served by the same client */
5421 if (i >= bp->num_rx_queues)
5422 fp->cl_id -= bp->num_rx_queues;
5424 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5425 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5426 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5428 bnx2x_update_fpsb_idx(fp);
5431 /* ensure status block indices were read */
5435 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5437 bnx2x_update_dsb_idx(bp);
5438 bnx2x_update_coalesce(bp);
5439 bnx2x_init_rx_rings(bp);
5440 bnx2x_init_tx_ring(bp);
5441 bnx2x_init_sp_ring(bp);
5442 bnx2x_init_context(bp);
5443 bnx2x_init_internal(bp, load_code);
5444 bnx2x_init_ind_table(bp);
5445 bnx2x_stats_init(bp);
5447 /* At this point, we are ready for interrupts */
5448 atomic_set(&bp->intr_sem, 0);
5450 /* flush all before enabling interrupts */
5454 bnx2x_int_enable(bp);
5456 /* Check for SPIO5 */
5457 bnx2x_attn_int_deasserted0(bp,
5458 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5459 AEU_INPUTS_ATTN_BITS_SPIO5);
5462 /* end of nic init */
5465 * gzip service functions
5468 static int bnx2x_gunzip_init(struct bnx2x *bp)
5470 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5471 &bp->gunzip_mapping);
5472 if (bp->gunzip_buf == NULL)
5475 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5476 if (bp->strm == NULL)
5479 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5481 if (bp->strm->workspace == NULL)
5491 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5492 bp->gunzip_mapping);
5493 bp->gunzip_buf = NULL;
5496 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5497 " un-compression\n", bp->dev->name);
5501 static void bnx2x_gunzip_end(struct bnx2x *bp)
5503 kfree(bp->strm->workspace);
5508 if (bp->gunzip_buf) {
5509 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5510 bp->gunzip_mapping);
5511 bp->gunzip_buf = NULL;
5515 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5519 /* check gzip header */
5520 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5521 BNX2X_ERR("Bad gzip header\n");
5529 if (zbuf[3] & FNAME)
5530 while ((zbuf[n++] != 0) && (n < len));
5532 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5533 bp->strm->avail_in = len - n;
5534 bp->strm->next_out = bp->gunzip_buf;
5535 bp->strm->avail_out = FW_BUF_SIZE;
5537 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5541 rc = zlib_inflate(bp->strm, Z_FINISH);
5542 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5543 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5544 bp->dev->name, bp->strm->msg);
5546 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5547 if (bp->gunzip_outlen & 0x3)
5548 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5549 " gunzip_outlen (%d) not aligned\n",
5550 bp->dev->name, bp->gunzip_outlen);
5551 bp->gunzip_outlen >>= 2;
5553 zlib_inflateEnd(bp->strm);
5555 if (rc == Z_STREAM_END)
5561 /* nic load/unload */
5564 * General service functions
5567 /* send a NIG loopback debug packet */
5568 static void bnx2x_lb_pckt(struct bnx2x *bp)
5572 /* Ethernet source and destination addresses */
5573 wb_write[0] = 0x55555555;
5574 wb_write[1] = 0x55555555;
5575 wb_write[2] = 0x20; /* SOP */
5576 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5578 /* NON-IP protocol */
5579 wb_write[0] = 0x09000000;
5580 wb_write[1] = 0x55555555;
5581 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5582 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5585 /* some of the internal memories
5586 * are not directly readable from the driver
5587 * to test them we send debug packets
5589 static int bnx2x_int_mem_test(struct bnx2x *bp)
5595 if (CHIP_REV_IS_FPGA(bp))
5597 else if (CHIP_REV_IS_EMUL(bp))
5602 DP(NETIF_MSG_HW, "start part1\n");
5604 /* Disable inputs of parser neighbor blocks */
5605 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5606 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5607 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5608 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5610 /* Write 0 to parser credits for CFC search request */
5611 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5613 /* send Ethernet packet */
5616 /* TODO do i reset NIG statistic? */
5617 /* Wait until NIG register shows 1 packet of size 0x10 */
5618 count = 1000 * factor;
5621 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5622 val = *bnx2x_sp(bp, wb_data[0]);
5630 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5634 /* Wait until PRS register shows 1 packet */
5635 count = 1000 * factor;
5637 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5645 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5649 /* Reset and init BRB, PRS */
5650 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5652 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5654 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5655 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5657 DP(NETIF_MSG_HW, "part2\n");
5659 /* Disable inputs of parser neighbor blocks */
5660 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5661 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5662 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5663 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5665 /* Write 0 to parser credits for CFC search request */
5666 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5668 /* send 10 Ethernet packets */
5669 for (i = 0; i < 10; i++)
5672 /* Wait until NIG register shows 10 + 1
5673 packets of size 11*0x10 = 0xb0 */
5674 count = 1000 * factor;
5677 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5678 val = *bnx2x_sp(bp, wb_data[0]);
5686 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5690 /* Wait until PRS register shows 2 packets */
5691 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5693 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5695 /* Write 1 to parser credits for CFC search request */
5696 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5698 /* Wait until PRS register shows 3 packets */
5699 msleep(10 * factor);
5700 /* Wait until NIG register shows 1 packet of size 0x10 */
5701 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5703 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5705 /* clear NIG EOP FIFO */
5706 for (i = 0; i < 11; i++)
5707 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5708 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5710 BNX2X_ERR("clear of NIG failed\n");
5714 /* Reset and init BRB, PRS, NIG */
5715 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5717 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5719 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5720 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5723 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5726 /* Enable inputs of parser neighbor blocks */
5727 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5728 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5729 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5730 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5732 DP(NETIF_MSG_HW, "done\n");
5737 static void enable_blocks_attention(struct bnx2x *bp)
5739 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5740 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5741 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5742 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5743 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5744 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5745 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5746 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5747 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5748 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5749 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5750 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5751 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5752 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5753 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5754 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5755 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5756 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5757 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5758 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5759 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5760 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5761 if (CHIP_REV_IS_FPGA(bp))
5762 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5764 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5765 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5766 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5767 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5768 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5769 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5770 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5771 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5772 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5773 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5777 static void bnx2x_reset_common(struct bnx2x *bp)
5780 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5782 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5786 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5792 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5793 SHARED_HW_CFG_FAN_FAILURE_MASK;
5795 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5799 * The fan failure mechanism is usually related to the PHY type since
5800 * the power consumption of the board is affected by the PHY. Currently,
5801 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5803 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5804 for (port = PORT_0; port < PORT_MAX; port++) {
5806 SHMEM_RD(bp, dev_info.port_hw_config[port].
5807 external_phy_config) &
5808 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5811 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5813 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5815 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5818 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5820 if (is_required == 0)
5823 /* Fan failure is indicated by SPIO 5 */
5824 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5825 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5827 /* set to active low mode */
5828 val = REG_RD(bp, MISC_REG_SPIO_INT);
5829 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5830 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5831 REG_WR(bp, MISC_REG_SPIO_INT, val);
5833 /* enable interrupt to signal the IGU */
5834 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5835 val |= (1 << MISC_REGISTERS_SPIO_5);
5836 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5839 static int bnx2x_init_common(struct bnx2x *bp)
5843 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5845 bnx2x_reset_common(bp);
5846 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5847 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5849 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5850 if (CHIP_IS_E1H(bp))
5851 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5853 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5855 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5857 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5858 if (CHIP_IS_E1(bp)) {
5859 /* enable HW interrupt from PXP on USDM overflow
5860 bit 16 on INT_MASK_0 */
5861 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5864 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5868 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5869 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5870 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5871 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5872 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5873 /* make sure this value is 0 */
5874 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5876 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5877 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5878 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5879 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5880 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5883 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5885 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5886 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5887 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5890 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5891 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5893 /* let the HW do it's magic ... */
5895 /* finish PXP init */
5896 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5898 BNX2X_ERR("PXP2 CFG failed\n");
5901 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5903 BNX2X_ERR("PXP2 RD_INIT failed\n");
5907 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5908 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5910 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5912 /* clean the DMAE memory */
5914 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5916 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5917 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5918 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5919 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5921 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5922 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5923 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5924 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5926 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5927 /* soft reset pulse */
5928 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5929 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5932 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5935 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5936 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5937 if (!CHIP_REV_IS_SLOW(bp)) {
5938 /* enable hw interrupt from doorbell Q */
5939 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5942 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5943 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5944 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5946 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5947 if (CHIP_IS_E1H(bp))
5948 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5950 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5951 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5952 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5953 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5955 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5956 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5957 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5958 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5960 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5961 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5962 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5963 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5966 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5968 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5971 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5972 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5973 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5975 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5976 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5977 REG_WR(bp, i, 0xc0cac01a);
5978 /* TODO: replace with something meaningful */
5980 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5981 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5983 if (sizeof(union cdu_context) != 1024)
5984 /* we currently assume that a context is 1024 bytes */
5985 printk(KERN_ALERT PFX "please adjust the size of"
5986 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5988 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5989 val = (4 << 24) + (0 << 12) + 1024;
5990 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5992 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5993 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5994 /* enable context validation interrupt from CFC */
5995 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5997 /* set the thresholds to prevent CFC/CDU race */
5998 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6000 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6001 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6003 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6004 /* Reset PCIE errors for debug */
6005 REG_WR(bp, 0x2814, 0xffffffff);
6006 REG_WR(bp, 0x3820, 0xffffffff);
6008 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6009 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6010 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6011 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6013 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6014 if (CHIP_IS_E1H(bp)) {
6015 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6016 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6019 if (CHIP_REV_IS_SLOW(bp))
6022 /* finish CFC init */
6023 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6025 BNX2X_ERR("CFC LL_INIT failed\n");
6028 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6030 BNX2X_ERR("CFC AC_INIT failed\n");
6033 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6035 BNX2X_ERR("CFC CAM_INIT failed\n");
6038 REG_WR(bp, CFC_REG_DEBUG0, 0);
6040 /* read NIG statistic
6041 to see if this is our first up since powerup */
6042 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6043 val = *bnx2x_sp(bp, wb_data[0]);
6045 /* do internal memory self test */
6046 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6047 BNX2X_ERR("internal mem self test failed\n");
6051 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6052 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6053 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6054 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6055 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6056 bp->port.need_hw_lock = 1;
6063 bnx2x_setup_fan_failure_detection(bp);
6065 /* clear PXP2 attentions */
6066 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6068 enable_blocks_attention(bp);
6070 if (!BP_NOMCP(bp)) {
6071 bnx2x_acquire_phy_lock(bp);
6072 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6073 bnx2x_release_phy_lock(bp);
6075 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6080 static int bnx2x_init_port(struct bnx2x *bp)
6082 int port = BP_PORT(bp);
6083 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6087 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6089 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6091 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6092 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6094 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6095 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6096 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6101 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6102 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6103 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6104 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6109 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6110 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6111 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6112 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6117 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6118 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6119 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6120 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6122 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6125 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6126 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6128 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6130 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6132 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6133 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6134 /* no pause for emulation and FPGA */
6139 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6140 else if (bp->dev->mtu > 4096) {
6141 if (bp->flags & ONE_PORT_FLAG)
6145 /* (24*1024 + val*4)/256 */
6146 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6149 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6150 high = low + 56; /* 14*1024/256 */
6152 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6153 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6156 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6158 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6159 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6160 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6161 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6163 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6164 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6165 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6166 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6168 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6169 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6171 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6173 /* configure PBF to work without PAUSE mtu 9000 */
6174 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6176 /* update threshold */
6177 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6178 /* update init credit */
6179 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6182 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6184 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6187 /* tell the searcher where the T2 table is */
6188 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6190 wb_write[0] = U64_LO(bp->t2_mapping);
6191 wb_write[1] = U64_HI(bp->t2_mapping);
6192 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6193 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6194 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6195 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6197 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6199 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6200 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6202 if (CHIP_IS_E1(bp)) {
6203 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6204 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6206 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6208 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6209 /* init aeu_mask_attn_func_0/1:
6210 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6211 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6212 * bits 4-7 are used for "per vn group attention" */
6213 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6214 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6216 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6217 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6218 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6219 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6220 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6222 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6224 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6226 if (CHIP_IS_E1H(bp)) {
6227 /* 0x2 disable e1hov, 0x1 enable */
6228 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6229 (IS_E1HMF(bp) ? 0x1 : 0x2));
6231 /* support pause requests from USDM, TSDM and BRB */
6232 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6235 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6236 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6237 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6241 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6242 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6244 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6245 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6247 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6249 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6250 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6252 /* The GPIO should be swapped if the swap register is
6254 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6255 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6257 /* Select function upon port-swap configuration */
6259 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6260 aeu_gpio_mask = (swap_val && swap_override) ?
6261 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6262 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6264 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6265 aeu_gpio_mask = (swap_val && swap_override) ?
6266 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6267 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6269 val = REG_RD(bp, offset);
6270 /* add GPIO3 to group */
6271 val |= aeu_gpio_mask;
6272 REG_WR(bp, offset, val);
6276 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6277 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6278 /* add SPIO 5 to group 0 */
6280 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6281 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6282 val = REG_RD(bp, reg_addr);
6283 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6284 REG_WR(bp, reg_addr, val);
6292 bnx2x__link_reset(bp);
6297 #define ILT_PER_FUNC (768/2)
6298 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6299 /* the phys address is shifted right 12 bits and has an added
6300 1=valid bit added to the 53rd bit
6301 then since this is a wide register(TM)
6302 we split it into two 32 bit writes
6304 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6305 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6306 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6307 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6309 #define CNIC_ILT_LINES 0
6311 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6315 if (CHIP_IS_E1H(bp))
6316 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6318 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6320 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6323 static int bnx2x_init_func(struct bnx2x *bp)
6325 int port = BP_PORT(bp);
6326 int func = BP_FUNC(bp);
6330 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6332 /* set MSI reconfigure capability */
6333 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6334 val = REG_RD(bp, addr);
6335 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6336 REG_WR(bp, addr, val);
6338 i = FUNC_ILT_BASE(func);
6340 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6341 if (CHIP_IS_E1H(bp)) {
6342 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6343 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6345 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6346 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6349 if (CHIP_IS_E1H(bp)) {
6350 for (i = 0; i < 9; i++)
6351 bnx2x_init_block(bp,
6352 cm_blocks[i], FUNC0_STAGE + func);
6354 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6355 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6358 /* HC init per function */
6359 if (CHIP_IS_E1H(bp)) {
6360 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6362 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6363 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6365 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6367 /* Reset PCIE errors for debug */
6368 REG_WR(bp, 0x2114, 0xffffffff);
6369 REG_WR(bp, 0x2120, 0xffffffff);
6374 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6378 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6379 BP_FUNC(bp), load_code);
6382 mutex_init(&bp->dmae_mutex);
6383 bnx2x_gunzip_init(bp);
6385 switch (load_code) {
6386 case FW_MSG_CODE_DRV_LOAD_COMMON:
6387 rc = bnx2x_init_common(bp);
6392 case FW_MSG_CODE_DRV_LOAD_PORT:
6394 rc = bnx2x_init_port(bp);
6399 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6401 rc = bnx2x_init_func(bp);
6407 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6411 if (!BP_NOMCP(bp)) {
6412 int func = BP_FUNC(bp);
6414 bp->fw_drv_pulse_wr_seq =
6415 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6416 DRV_PULSE_SEQ_MASK);
6417 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6418 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6419 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6423 /* this needs to be done before gunzip end */
6424 bnx2x_zero_def_sb(bp);
6425 for_each_queue(bp, i)
6426 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6429 bnx2x_gunzip_end(bp);
6434 static void bnx2x_free_mem(struct bnx2x *bp)
6437 #define BNX2X_PCI_FREE(x, y, size) \
6440 pci_free_consistent(bp->pdev, size, x, y); \
6446 #define BNX2X_FREE(x) \
6458 for_each_queue(bp, i) {
6461 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6462 bnx2x_fp(bp, i, status_blk_mapping),
6463 sizeof(struct host_status_block));
6466 for_each_rx_queue(bp, i) {
6468 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6469 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6470 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6471 bnx2x_fp(bp, i, rx_desc_mapping),
6472 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6474 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6475 bnx2x_fp(bp, i, rx_comp_mapping),
6476 sizeof(struct eth_fast_path_rx_cqe) *
6480 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6481 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6482 bnx2x_fp(bp, i, rx_sge_mapping),
6483 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6486 for_each_tx_queue(bp, i) {
6488 /* fastpath tx rings: tx_buf tx_desc */
6489 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6490 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6491 bnx2x_fp(bp, i, tx_desc_mapping),
6492 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6494 /* end of fastpath */
6496 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6497 sizeof(struct host_def_status_block));
6499 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6500 sizeof(struct bnx2x_slowpath));
6503 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6504 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6505 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6506 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6508 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6510 #undef BNX2X_PCI_FREE
6514 static int bnx2x_alloc_mem(struct bnx2x *bp)
6517 #define BNX2X_PCI_ALLOC(x, y, size) \
6519 x = pci_alloc_consistent(bp->pdev, size, y); \
6521 goto alloc_mem_err; \
6522 memset(x, 0, size); \
6525 #define BNX2X_ALLOC(x, size) \
6527 x = vmalloc(size); \
6529 goto alloc_mem_err; \
6530 memset(x, 0, size); \
6537 for_each_queue(bp, i) {
6538 bnx2x_fp(bp, i, bp) = bp;
6541 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6542 &bnx2x_fp(bp, i, status_blk_mapping),
6543 sizeof(struct host_status_block));
6546 for_each_rx_queue(bp, i) {
6548 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6549 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6550 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6551 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6552 &bnx2x_fp(bp, i, rx_desc_mapping),
6553 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6555 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6556 &bnx2x_fp(bp, i, rx_comp_mapping),
6557 sizeof(struct eth_fast_path_rx_cqe) *
6561 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6562 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6563 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6564 &bnx2x_fp(bp, i, rx_sge_mapping),
6565 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6568 for_each_tx_queue(bp, i) {
6570 /* fastpath tx rings: tx_buf tx_desc */
6571 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6572 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6573 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6574 &bnx2x_fp(bp, i, tx_desc_mapping),
6575 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6577 /* end of fastpath */
6579 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6580 sizeof(struct host_def_status_block));
6582 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6583 sizeof(struct bnx2x_slowpath));
6586 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6589 for (i = 0; i < 64*1024; i += 64) {
6590 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6591 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6594 /* allocate searcher T2 table
6595 we allocate 1/4 of alloc num for T2
6596 (which is not entered into the ILT) */
6597 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6600 for (i = 0; i < 16*1024; i += 64)
6601 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6603 /* now fixup the last line in the block to point to the next block */
6604 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6606 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6607 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6609 /* QM queues (128*MAX_CONN) */
6610 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6613 /* Slow path ring */
6614 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6622 #undef BNX2X_PCI_ALLOC
6626 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6630 for_each_tx_queue(bp, i) {
6631 struct bnx2x_fastpath *fp = &bp->fp[i];
6633 u16 bd_cons = fp->tx_bd_cons;
6634 u16 sw_prod = fp->tx_pkt_prod;
6635 u16 sw_cons = fp->tx_pkt_cons;
6637 while (sw_cons != sw_prod) {
6638 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6644 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6648 for_each_rx_queue(bp, j) {
6649 struct bnx2x_fastpath *fp = &bp->fp[j];
6651 for (i = 0; i < NUM_RX_BD; i++) {
6652 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6653 struct sk_buff *skb = rx_buf->skb;
6658 pci_unmap_single(bp->pdev,
6659 pci_unmap_addr(rx_buf, mapping),
6660 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6665 if (!fp->disable_tpa)
6666 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6667 ETH_MAX_AGGREGATION_QUEUES_E1 :
6668 ETH_MAX_AGGREGATION_QUEUES_E1H);
6672 static void bnx2x_free_skbs(struct bnx2x *bp)
6674 bnx2x_free_tx_skbs(bp);
6675 bnx2x_free_rx_skbs(bp);
6678 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6682 free_irq(bp->msix_table[0].vector, bp->dev);
6683 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6684 bp->msix_table[0].vector);
6686 for_each_queue(bp, i) {
6687 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6688 "state %x\n", i, bp->msix_table[i + offset].vector,
6689 bnx2x_fp(bp, i, state));
6691 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6695 static void bnx2x_free_irq(struct bnx2x *bp)
6697 if (bp->flags & USING_MSIX_FLAG) {
6698 bnx2x_free_msix_irqs(bp);
6699 pci_disable_msix(bp->pdev);
6700 bp->flags &= ~USING_MSIX_FLAG;
6702 } else if (bp->flags & USING_MSI_FLAG) {
6703 free_irq(bp->pdev->irq, bp->dev);
6704 pci_disable_msi(bp->pdev);
6705 bp->flags &= ~USING_MSI_FLAG;
6708 free_irq(bp->pdev->irq, bp->dev);
6711 static int bnx2x_enable_msix(struct bnx2x *bp)
6713 int i, rc, offset = 1;
6716 bp->msix_table[0].entry = igu_vec;
6717 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6719 for_each_queue(bp, i) {
6720 igu_vec = BP_L_ID(bp) + offset + i;
6721 bp->msix_table[i + offset].entry = igu_vec;
6722 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6723 "(fastpath #%u)\n", i + offset, igu_vec, i);
6726 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6727 BNX2X_NUM_QUEUES(bp) + offset);
6729 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6733 bp->flags |= USING_MSIX_FLAG;
6738 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6740 int i, rc, offset = 1;
6742 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6743 bp->dev->name, bp->dev);
6745 BNX2X_ERR("request sp irq failed\n");
6749 for_each_queue(bp, i) {
6750 struct bnx2x_fastpath *fp = &bp->fp[i];
6752 if (i < bp->num_rx_queues)
6753 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6755 sprintf(fp->name, "%s-tx-%d",
6756 bp->dev->name, i - bp->num_rx_queues);
6758 rc = request_irq(bp->msix_table[i + offset].vector,
6759 bnx2x_msix_fp_int, 0, fp->name, fp);
6761 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6762 bnx2x_free_msix_irqs(bp);
6766 fp->state = BNX2X_FP_STATE_IRQ;
6769 i = BNX2X_NUM_QUEUES(bp);
6770 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6772 bp->dev->name, bp->msix_table[0].vector,
6773 0, bp->msix_table[offset].vector,
6774 i - 1, bp->msix_table[offset + i - 1].vector);
6779 static int bnx2x_enable_msi(struct bnx2x *bp)
6783 rc = pci_enable_msi(bp->pdev);
6785 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6788 bp->flags |= USING_MSI_FLAG;
6793 static int bnx2x_req_irq(struct bnx2x *bp)
6795 unsigned long flags;
6798 if (bp->flags & USING_MSI_FLAG)
6801 flags = IRQF_SHARED;
6803 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6804 bp->dev->name, bp->dev);
6806 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6811 static void bnx2x_napi_enable(struct bnx2x *bp)
6815 for_each_rx_queue(bp, i)
6816 napi_enable(&bnx2x_fp(bp, i, napi));
6819 static void bnx2x_napi_disable(struct bnx2x *bp)
6823 for_each_rx_queue(bp, i)
6824 napi_disable(&bnx2x_fp(bp, i, napi));
6827 static void bnx2x_netif_start(struct bnx2x *bp)
6831 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6832 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6835 if (netif_running(bp->dev)) {
6836 bnx2x_napi_enable(bp);
6837 bnx2x_int_enable(bp);
6838 if (bp->state == BNX2X_STATE_OPEN)
6839 netif_tx_wake_all_queues(bp->dev);
6844 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6846 bnx2x_int_disable_sync(bp, disable_hw);
6847 bnx2x_napi_disable(bp);
6848 netif_tx_disable(bp->dev);
6849 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6853 * Init service functions
6856 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6858 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6859 int port = BP_PORT(bp);
6862 * unicasts 0-31:port0 32-63:port1
6863 * multicast 64-127:port0 128-191:port1
6865 config->hdr.length = 2;
6866 config->hdr.offset = port ? 32 : 0;
6867 config->hdr.client_id = bp->fp->cl_id;
6868 config->hdr.reserved1 = 0;
6871 config->config_table[0].cam_entry.msb_mac_addr =
6872 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6873 config->config_table[0].cam_entry.middle_mac_addr =
6874 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6875 config->config_table[0].cam_entry.lsb_mac_addr =
6876 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6877 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6879 config->config_table[0].target_table_entry.flags = 0;
6881 CAM_INVALIDATE(config->config_table[0]);
6882 config->config_table[0].target_table_entry.clients_bit_vector =
6883 cpu_to_le32(1 << BP_L_ID(bp));
6884 config->config_table[0].target_table_entry.vlan_id = 0;
6886 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6887 (set ? "setting" : "clearing"),
6888 config->config_table[0].cam_entry.msb_mac_addr,
6889 config->config_table[0].cam_entry.middle_mac_addr,
6890 config->config_table[0].cam_entry.lsb_mac_addr);
6893 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6894 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6895 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6896 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6898 config->config_table[1].target_table_entry.flags =
6899 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6901 CAM_INVALIDATE(config->config_table[1]);
6902 config->config_table[1].target_table_entry.clients_bit_vector =
6903 cpu_to_le32(1 << BP_L_ID(bp));
6904 config->config_table[1].target_table_entry.vlan_id = 0;
6906 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6907 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6908 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6911 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6913 struct mac_configuration_cmd_e1h *config =
6914 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6916 /* CAM allocation for E1H
6917 * unicasts: by func number
6918 * multicast: 20+FUNC*20, 20 each
6920 config->hdr.length = 1;
6921 config->hdr.offset = BP_FUNC(bp);
6922 config->hdr.client_id = bp->fp->cl_id;
6923 config->hdr.reserved1 = 0;
6926 config->config_table[0].msb_mac_addr =
6927 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6928 config->config_table[0].middle_mac_addr =
6929 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6930 config->config_table[0].lsb_mac_addr =
6931 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6932 config->config_table[0].clients_bit_vector =
6933 cpu_to_le32(1 << BP_L_ID(bp));
6934 config->config_table[0].vlan_id = 0;
6935 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6937 config->config_table[0].flags = BP_PORT(bp);
6939 config->config_table[0].flags =
6940 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6942 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6943 (set ? "setting" : "clearing"),
6944 config->config_table[0].msb_mac_addr,
6945 config->config_table[0].middle_mac_addr,
6946 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6948 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6949 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6950 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6953 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6954 int *state_p, int poll)
6956 /* can take a while if any port is running */
6959 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6960 poll ? "polling" : "waiting", state, idx);
6965 bnx2x_rx_int(bp->fp, 10);
6966 /* if index is different from 0
6967 * the reply for some commands will
6968 * be on the non default queue
6971 bnx2x_rx_int(&bp->fp[idx], 10);
6974 mb(); /* state is changed by bnx2x_sp_event() */
6975 if (*state_p == state) {
6976 #ifdef BNX2X_STOP_ON_ERROR
6977 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6986 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6987 poll ? "polling" : "waiting", state, idx);
6988 #ifdef BNX2X_STOP_ON_ERROR
6995 static int bnx2x_setup_leading(struct bnx2x *bp)
6999 /* reset IGU state */
7000 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7003 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7005 /* Wait for completion */
7006 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7011 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7013 struct bnx2x_fastpath *fp = &bp->fp[index];
7015 /* reset IGU state */
7016 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7019 fp->state = BNX2X_FP_STATE_OPENING;
7020 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7023 /* Wait for completion */
7024 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7028 static int bnx2x_poll(struct napi_struct *napi, int budget);
7030 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7031 int *num_tx_queues_out)
7033 int _num_rx_queues = 0, _num_tx_queues = 0;
7035 switch (bp->multi_mode) {
7036 case ETH_RSS_MODE_DISABLED:
7041 case ETH_RSS_MODE_REGULAR:
7043 _num_rx_queues = min_t(u32, num_rx_queues,
7044 BNX2X_MAX_QUEUES(bp));
7046 _num_rx_queues = min_t(u32, num_online_cpus(),
7047 BNX2X_MAX_QUEUES(bp));
7050 _num_tx_queues = min_t(u32, num_tx_queues,
7051 BNX2X_MAX_QUEUES(bp));
7053 _num_tx_queues = min_t(u32, num_online_cpus(),
7054 BNX2X_MAX_QUEUES(bp));
7056 /* There must be not more Tx queues than Rx queues */
7057 if (_num_tx_queues > _num_rx_queues) {
7058 BNX2X_ERR("number of tx queues (%d) > "
7059 "number of rx queues (%d)"
7060 " defaulting to %d\n",
7061 _num_tx_queues, _num_rx_queues,
7063 _num_tx_queues = _num_rx_queues;
7074 *num_rx_queues_out = _num_rx_queues;
7075 *num_tx_queues_out = _num_tx_queues;
7078 static int bnx2x_set_int_mode(struct bnx2x *bp)
7085 bp->num_rx_queues = 1;
7086 bp->num_tx_queues = 1;
7087 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7092 /* Set interrupt mode according to bp->multi_mode value */
7093 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7094 &bp->num_tx_queues);
7096 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7097 bp->num_rx_queues, bp->num_tx_queues);
7099 /* if we can't use MSI-X we only need one fp,
7100 * so try to enable MSI-X with the requested number of fp's
7101 * and fallback to MSI or legacy INTx with one fp
7103 rc = bnx2x_enable_msix(bp);
7105 /* failed to enable MSI-X */
7107 BNX2X_ERR("Multi requested but failed to "
7108 "enable MSI-X (rx %d tx %d), "
7109 "set number of queues to 1\n",
7110 bp->num_rx_queues, bp->num_tx_queues);
7111 bp->num_rx_queues = 1;
7112 bp->num_tx_queues = 1;
7116 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7121 /* must be called with rtnl_lock */
7122 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7127 #ifdef BNX2X_STOP_ON_ERROR
7128 if (unlikely(bp->panic))
7132 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7134 rc = bnx2x_set_int_mode(bp);
7136 if (bnx2x_alloc_mem(bp))
7139 for_each_rx_queue(bp, i)
7140 bnx2x_fp(bp, i, disable_tpa) =
7141 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7143 for_each_rx_queue(bp, i)
7144 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7147 bnx2x_napi_enable(bp);
7149 if (bp->flags & USING_MSIX_FLAG) {
7150 rc = bnx2x_req_msix_irqs(bp);
7152 pci_disable_msix(bp->pdev);
7156 /* Fall to INTx if failed to enable MSI-X due to lack of
7157 memory (in bnx2x_set_int_mode()) */
7158 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7159 bnx2x_enable_msi(bp);
7161 rc = bnx2x_req_irq(bp);
7163 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7164 if (bp->flags & USING_MSI_FLAG)
7165 pci_disable_msi(bp->pdev);
7168 if (bp->flags & USING_MSI_FLAG) {
7169 bp->dev->irq = bp->pdev->irq;
7170 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7171 bp->dev->name, bp->pdev->irq);
7175 /* Send LOAD_REQUEST command to MCP
7176 Returns the type of LOAD command:
7177 if it is the first port to be initialized
7178 common blocks should be initialized, otherwise - not
7180 if (!BP_NOMCP(bp)) {
7181 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7183 BNX2X_ERR("MCP response failure, aborting\n");
7187 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7188 rc = -EBUSY; /* other port in diagnostic mode */
7193 int port = BP_PORT(bp);
7195 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7196 load_count[0], load_count[1], load_count[2]);
7198 load_count[1 + port]++;
7199 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7200 load_count[0], load_count[1], load_count[2]);
7201 if (load_count[0] == 1)
7202 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7203 else if (load_count[1 + port] == 1)
7204 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7206 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7209 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7210 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7214 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7217 rc = bnx2x_init_hw(bp, load_code);
7219 BNX2X_ERR("HW init failed, aborting\n");
7223 /* Setup NIC internals and enable interrupts */
7224 bnx2x_nic_init(bp, load_code);
7226 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7227 (bp->common.shmem2_base))
7228 SHMEM2_WR(bp, dcc_support,
7229 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7230 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7232 /* Send LOAD_DONE command to MCP */
7233 if (!BP_NOMCP(bp)) {
7234 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7236 BNX2X_ERR("MCP response failure, aborting\n");
7242 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7244 rc = bnx2x_setup_leading(bp);
7246 BNX2X_ERR("Setup leading failed!\n");
7250 if (CHIP_IS_E1H(bp))
7251 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7252 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7253 bp->state = BNX2X_STATE_DISABLED;
7256 if (bp->state == BNX2X_STATE_OPEN) {
7257 for_each_nondefault_queue(bp, i) {
7258 rc = bnx2x_setup_multi(bp, i);
7264 bnx2x_set_mac_addr_e1(bp, 1);
7266 bnx2x_set_mac_addr_e1h(bp, 1);
7270 bnx2x_initial_phy_init(bp, load_mode);
7272 /* Start fast path */
7273 switch (load_mode) {
7275 if (bp->state == BNX2X_STATE_OPEN) {
7276 /* Tx queue should be only reenabled */
7277 netif_tx_wake_all_queues(bp->dev);
7279 /* Initialize the receive filter. */
7280 bnx2x_set_rx_mode(bp->dev);
7284 netif_tx_start_all_queues(bp->dev);
7285 if (bp->state != BNX2X_STATE_OPEN)
7286 netif_tx_disable(bp->dev);
7287 /* Initialize the receive filter. */
7288 bnx2x_set_rx_mode(bp->dev);
7292 /* Initialize the receive filter. */
7293 bnx2x_set_rx_mode(bp->dev);
7294 bp->state = BNX2X_STATE_DIAG;
7302 bnx2x__link_status_update(bp);
7304 /* start the timer */
7305 mod_timer(&bp->timer, jiffies + bp->current_interval);
7311 bnx2x_int_disable_sync(bp, 1);
7312 if (!BP_NOMCP(bp)) {
7313 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7314 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7317 /* Free SKBs, SGEs, TPA pool and driver internals */
7318 bnx2x_free_skbs(bp);
7319 for_each_rx_queue(bp, i)
7320 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7325 bnx2x_napi_disable(bp);
7326 for_each_rx_queue(bp, i)
7327 netif_napi_del(&bnx2x_fp(bp, i, napi));
7333 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7335 struct bnx2x_fastpath *fp = &bp->fp[index];
7338 /* halt the connection */
7339 fp->state = BNX2X_FP_STATE_HALTING;
7340 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7342 /* Wait for completion */
7343 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7345 if (rc) /* timeout */
7348 /* delete cfc entry */
7349 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7351 /* Wait for completion */
7352 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7357 static int bnx2x_stop_leading(struct bnx2x *bp)
7359 __le16 dsb_sp_prod_idx;
7360 /* if the other port is handling traffic,
7361 this can take a lot of time */
7367 /* Send HALT ramrod */
7368 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7369 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7371 /* Wait for completion */
7372 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7373 &(bp->fp[0].state), 1);
7374 if (rc) /* timeout */
7377 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7379 /* Send PORT_DELETE ramrod */
7380 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7382 /* Wait for completion to arrive on default status block
7383 we are going to reset the chip anyway
7384 so there is not much to do if this times out
7386 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7388 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7389 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7390 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7391 #ifdef BNX2X_STOP_ON_ERROR
7399 rmb(); /* Refresh the dsb_sp_prod */
7401 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7402 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7407 static void bnx2x_reset_func(struct bnx2x *bp)
7409 int port = BP_PORT(bp);
7410 int func = BP_FUNC(bp);
7414 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7415 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7418 base = FUNC_ILT_BASE(func);
7419 for (i = base; i < base + ILT_PER_FUNC; i++)
7420 bnx2x_ilt_wr(bp, i, 0);
7423 static void bnx2x_reset_port(struct bnx2x *bp)
7425 int port = BP_PORT(bp);
7428 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7430 /* Do not rcv packets to BRB */
7431 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7432 /* Do not direct rcv packets that are not for MCP to the BRB */
7433 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7434 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7437 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7440 /* Check for BRB port occupancy */
7441 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7443 DP(NETIF_MSG_IFDOWN,
7444 "BRB1 is not empty %d blocks are occupied\n", val);
7446 /* TODO: Close Doorbell port? */
7449 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7451 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7452 BP_FUNC(bp), reset_code);
7454 switch (reset_code) {
7455 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7456 bnx2x_reset_port(bp);
7457 bnx2x_reset_func(bp);
7458 bnx2x_reset_common(bp);
7461 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7462 bnx2x_reset_port(bp);
7463 bnx2x_reset_func(bp);
7466 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7467 bnx2x_reset_func(bp);
7471 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7476 /* must be called with rtnl_lock */
7477 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7479 int port = BP_PORT(bp);
7483 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7485 bp->rx_mode = BNX2X_RX_MODE_NONE;
7486 bnx2x_set_storm_rx_mode(bp);
7488 bnx2x_netif_stop(bp, 1);
7490 del_timer_sync(&bp->timer);
7491 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7492 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7493 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7498 /* Wait until tx fastpath tasks complete */
7499 for_each_tx_queue(bp, i) {
7500 struct bnx2x_fastpath *fp = &bp->fp[i];
7503 while (bnx2x_has_tx_work_unload(fp)) {
7507 BNX2X_ERR("timeout waiting for queue[%d]\n",
7509 #ifdef BNX2X_STOP_ON_ERROR
7520 /* Give HW time to discard old tx messages */
7523 if (CHIP_IS_E1(bp)) {
7524 struct mac_configuration_cmd *config =
7525 bnx2x_sp(bp, mcast_config);
7527 bnx2x_set_mac_addr_e1(bp, 0);
7529 for (i = 0; i < config->hdr.length; i++)
7530 CAM_INVALIDATE(config->config_table[i]);
7532 config->hdr.length = i;
7533 if (CHIP_REV_IS_SLOW(bp))
7534 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7536 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7537 config->hdr.client_id = bp->fp->cl_id;
7538 config->hdr.reserved1 = 0;
7540 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7541 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7542 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7545 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7547 bnx2x_set_mac_addr_e1h(bp, 0);
7549 for (i = 0; i < MC_HASH_SIZE; i++)
7550 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7552 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7555 if (unload_mode == UNLOAD_NORMAL)
7556 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7558 else if (bp->flags & NO_WOL_FLAG)
7559 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7562 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7563 u8 *mac_addr = bp->dev->dev_addr;
7565 /* The mac address is written to entries 1-4 to
7566 preserve entry 0 which is used by the PMF */
7567 u8 entry = (BP_E1HVN(bp) + 1)*8;
7569 val = (mac_addr[0] << 8) | mac_addr[1];
7570 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7572 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7573 (mac_addr[4] << 8) | mac_addr[5];
7574 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7576 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7579 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7581 /* Close multi and leading connections
7582 Completions for ramrods are collected in a synchronous way */
7583 for_each_nondefault_queue(bp, i)
7584 if (bnx2x_stop_multi(bp, i))
7587 rc = bnx2x_stop_leading(bp);
7589 BNX2X_ERR("Stop leading failed!\n");
7590 #ifdef BNX2X_STOP_ON_ERROR
7599 reset_code = bnx2x_fw_command(bp, reset_code);
7601 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7602 load_count[0], load_count[1], load_count[2]);
7604 load_count[1 + port]--;
7605 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7606 load_count[0], load_count[1], load_count[2]);
7607 if (load_count[0] == 0)
7608 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7609 else if (load_count[1 + port] == 0)
7610 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7612 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7615 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7616 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7617 bnx2x__link_reset(bp);
7619 /* Reset the chip */
7620 bnx2x_reset_chip(bp, reset_code);
7622 /* Report UNLOAD_DONE to MCP */
7624 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7628 /* Free SKBs, SGEs, TPA pool and driver internals */
7629 bnx2x_free_skbs(bp);
7630 for_each_rx_queue(bp, i)
7631 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7632 for_each_rx_queue(bp, i)
7633 netif_napi_del(&bnx2x_fp(bp, i, napi));
7636 bp->state = BNX2X_STATE_CLOSED;
7638 netif_carrier_off(bp->dev);
7643 static void bnx2x_reset_task(struct work_struct *work)
7645 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7647 #ifdef BNX2X_STOP_ON_ERROR
7648 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7649 " so reset not done to allow debug dump,\n"
7650 " you will need to reboot when done\n");
7656 if (!netif_running(bp->dev))
7657 goto reset_task_exit;
7659 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7660 bnx2x_nic_load(bp, LOAD_NORMAL);
7666 /* end of nic load/unload */
7671 * Init service functions
7674 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7677 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7678 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7679 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7680 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7681 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7682 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7683 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7684 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7686 BNX2X_ERR("Unsupported function index: %d\n", func);
7691 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7693 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7695 /* Flush all outstanding writes */
7698 /* Pretend to be function 0 */
7700 /* Flush the GRC transaction (in the chip) */
7701 new_val = REG_RD(bp, reg);
7703 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7708 /* From now we are in the "like-E1" mode */
7709 bnx2x_int_disable(bp);
7711 /* Flush all outstanding writes */
7714 /* Restore the original funtion settings */
7715 REG_WR(bp, reg, orig_func);
7716 new_val = REG_RD(bp, reg);
7717 if (new_val != orig_func) {
7718 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7719 orig_func, new_val);
7724 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7726 if (CHIP_IS_E1H(bp))
7727 bnx2x_undi_int_disable_e1h(bp, func);
7729 bnx2x_int_disable(bp);
7732 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7736 /* Check if there is any driver already loaded */
7737 val = REG_RD(bp, MISC_REG_UNPREPARED);
7739 /* Check if it is the UNDI driver
7740 * UNDI driver initializes CID offset for normal bell to 0x7
7742 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7743 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7745 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7747 int func = BP_FUNC(bp);
7751 /* clear the UNDI indication */
7752 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7754 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7756 /* try unload UNDI on port 0 */
7759 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7760 DRV_MSG_SEQ_NUMBER_MASK);
7761 reset_code = bnx2x_fw_command(bp, reset_code);
7763 /* if UNDI is loaded on the other port */
7764 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7766 /* send "DONE" for previous unload */
7767 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7769 /* unload UNDI on port 1 */
7772 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7773 DRV_MSG_SEQ_NUMBER_MASK);
7774 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7776 bnx2x_fw_command(bp, reset_code);
7779 /* now it's safe to release the lock */
7780 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7782 bnx2x_undi_int_disable(bp, func);
7784 /* close input traffic and wait for it */
7785 /* Do not rcv packets to BRB */
7787 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7788 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7789 /* Do not direct rcv packets that are not for MCP to
7792 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7793 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7796 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7797 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7800 /* save NIG port swap info */
7801 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7802 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7805 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7808 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7810 /* take the NIG out of reset and restore swap values */
7812 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7813 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7814 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7815 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7817 /* send unload done to the MCP */
7818 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7820 /* restore our func and fw_seq */
7823 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7824 DRV_MSG_SEQ_NUMBER_MASK);
7827 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7831 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7833 u32 val, val2, val3, val4, id;
7836 /* Get the chip revision id and number. */
7837 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7838 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7839 id = ((val & 0xffff) << 16);
7840 val = REG_RD(bp, MISC_REG_CHIP_REV);
7841 id |= ((val & 0xf) << 12);
7842 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7843 id |= ((val & 0xff) << 4);
7844 val = REG_RD(bp, MISC_REG_BOND_ID);
7846 bp->common.chip_id = id;
7847 bp->link_params.chip_id = bp->common.chip_id;
7848 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7850 val = (REG_RD(bp, 0x2874) & 0x55);
7851 if ((bp->common.chip_id & 0x1) ||
7852 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7853 bp->flags |= ONE_PORT_FLAG;
7854 BNX2X_DEV_INFO("single port device\n");
7857 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7858 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7859 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7860 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7861 bp->common.flash_size, bp->common.flash_size);
7863 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7864 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
7865 bp->link_params.shmem_base = bp->common.shmem_base;
7866 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7867 bp->common.shmem_base, bp->common.shmem2_base);
7869 if (!bp->common.shmem_base ||
7870 (bp->common.shmem_base < 0xA0000) ||
7871 (bp->common.shmem_base >= 0xC0000)) {
7872 BNX2X_DEV_INFO("MCP not active\n");
7873 bp->flags |= NO_MCP_FLAG;
7877 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7878 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7879 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7880 BNX2X_ERR("BAD MCP validity signature\n");
7882 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7883 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7885 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7886 SHARED_HW_CFG_LED_MODE_MASK) >>
7887 SHARED_HW_CFG_LED_MODE_SHIFT);
7889 bp->link_params.feature_config_flags = 0;
7890 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7891 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7892 bp->link_params.feature_config_flags |=
7893 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7895 bp->link_params.feature_config_flags &=
7896 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7898 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7899 bp->common.bc_ver = val;
7900 BNX2X_DEV_INFO("bc_ver %X\n", val);
7901 if (val < BNX2X_BC_VER) {
7902 /* for now only warn
7903 * later we might need to enforce this */
7904 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7905 " please upgrade BC\n", BNX2X_BC_VER, val);
7907 bp->link_params.feature_config_flags |=
7908 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7909 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7911 if (BP_E1HVN(bp) == 0) {
7912 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7913 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7915 /* no WOL capability for E1HVN != 0 */
7916 bp->flags |= NO_WOL_FLAG;
7918 BNX2X_DEV_INFO("%sWoL capable\n",
7919 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7921 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7922 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7923 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7924 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7926 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7927 val, val2, val3, val4);
7930 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7933 int port = BP_PORT(bp);
7936 switch (switch_cfg) {
7938 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7941 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7942 switch (ext_phy_type) {
7943 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7944 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7947 bp->port.supported |= (SUPPORTED_10baseT_Half |
7948 SUPPORTED_10baseT_Full |
7949 SUPPORTED_100baseT_Half |
7950 SUPPORTED_100baseT_Full |
7951 SUPPORTED_1000baseT_Full |
7952 SUPPORTED_2500baseX_Full |
7957 SUPPORTED_Asym_Pause);
7960 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7961 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7964 bp->port.supported |= (SUPPORTED_10baseT_Half |
7965 SUPPORTED_10baseT_Full |
7966 SUPPORTED_100baseT_Half |
7967 SUPPORTED_100baseT_Full |
7968 SUPPORTED_1000baseT_Full |
7973 SUPPORTED_Asym_Pause);
7977 BNX2X_ERR("NVRAM config error. "
7978 "BAD SerDes ext_phy_config 0x%x\n",
7979 bp->link_params.ext_phy_config);
7983 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7985 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7988 case SWITCH_CFG_10G:
7989 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7992 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7993 switch (ext_phy_type) {
7994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7995 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7998 bp->port.supported |= (SUPPORTED_10baseT_Half |
7999 SUPPORTED_10baseT_Full |
8000 SUPPORTED_100baseT_Half |
8001 SUPPORTED_100baseT_Full |
8002 SUPPORTED_1000baseT_Full |
8003 SUPPORTED_2500baseX_Full |
8004 SUPPORTED_10000baseT_Full |
8009 SUPPORTED_Asym_Pause);
8012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8013 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8016 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8017 SUPPORTED_1000baseT_Full |
8021 SUPPORTED_Asym_Pause);
8024 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8025 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8028 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8029 SUPPORTED_2500baseX_Full |
8030 SUPPORTED_1000baseT_Full |
8034 SUPPORTED_Asym_Pause);
8037 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8038 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8041 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8044 SUPPORTED_Asym_Pause);
8047 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8048 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8051 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8052 SUPPORTED_1000baseT_Full |
8055 SUPPORTED_Asym_Pause);
8058 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8059 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8062 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8063 SUPPORTED_1000baseT_Full |
8067 SUPPORTED_Asym_Pause);
8070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8071 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8074 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8075 SUPPORTED_1000baseT_Full |
8079 SUPPORTED_Asym_Pause);
8082 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8083 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8086 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8090 SUPPORTED_Asym_Pause);
8093 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8094 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8097 bp->port.supported |= (SUPPORTED_10baseT_Half |
8098 SUPPORTED_10baseT_Full |
8099 SUPPORTED_100baseT_Half |
8100 SUPPORTED_100baseT_Full |
8101 SUPPORTED_1000baseT_Full |
8102 SUPPORTED_10000baseT_Full |
8106 SUPPORTED_Asym_Pause);
8109 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8110 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8111 bp->link_params.ext_phy_config);
8115 BNX2X_ERR("NVRAM config error. "
8116 "BAD XGXS ext_phy_config 0x%x\n",
8117 bp->link_params.ext_phy_config);
8121 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8123 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8128 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8129 bp->port.link_config);
8132 bp->link_params.phy_addr = bp->port.phy_addr;
8134 /* mask what we support according to speed_cap_mask */
8135 if (!(bp->link_params.speed_cap_mask &
8136 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8137 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8139 if (!(bp->link_params.speed_cap_mask &
8140 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8141 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8143 if (!(bp->link_params.speed_cap_mask &
8144 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8145 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8147 if (!(bp->link_params.speed_cap_mask &
8148 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8149 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8151 if (!(bp->link_params.speed_cap_mask &
8152 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8153 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8154 SUPPORTED_1000baseT_Full);
8156 if (!(bp->link_params.speed_cap_mask &
8157 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8158 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8160 if (!(bp->link_params.speed_cap_mask &
8161 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8162 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8164 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8167 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8169 bp->link_params.req_duplex = DUPLEX_FULL;
8171 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8172 case PORT_FEATURE_LINK_SPEED_AUTO:
8173 if (bp->port.supported & SUPPORTED_Autoneg) {
8174 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8175 bp->port.advertising = bp->port.supported;
8178 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8180 if ((ext_phy_type ==
8181 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8183 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8184 /* force 10G, no AN */
8185 bp->link_params.req_line_speed = SPEED_10000;
8186 bp->port.advertising =
8187 (ADVERTISED_10000baseT_Full |
8191 BNX2X_ERR("NVRAM config error. "
8192 "Invalid link_config 0x%x"
8193 " Autoneg not supported\n",
8194 bp->port.link_config);
8199 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8200 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8201 bp->link_params.req_line_speed = SPEED_10;
8202 bp->port.advertising = (ADVERTISED_10baseT_Full |
8205 BNX2X_ERR("NVRAM config error. "
8206 "Invalid link_config 0x%x"
8207 " speed_cap_mask 0x%x\n",
8208 bp->port.link_config,
8209 bp->link_params.speed_cap_mask);
8214 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8215 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8216 bp->link_params.req_line_speed = SPEED_10;
8217 bp->link_params.req_duplex = DUPLEX_HALF;
8218 bp->port.advertising = (ADVERTISED_10baseT_Half |
8221 BNX2X_ERR("NVRAM config error. "
8222 "Invalid link_config 0x%x"
8223 " speed_cap_mask 0x%x\n",
8224 bp->port.link_config,
8225 bp->link_params.speed_cap_mask);
8230 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8231 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8232 bp->link_params.req_line_speed = SPEED_100;
8233 bp->port.advertising = (ADVERTISED_100baseT_Full |
8236 BNX2X_ERR("NVRAM config error. "
8237 "Invalid link_config 0x%x"
8238 " speed_cap_mask 0x%x\n",
8239 bp->port.link_config,
8240 bp->link_params.speed_cap_mask);
8245 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8246 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8247 bp->link_params.req_line_speed = SPEED_100;
8248 bp->link_params.req_duplex = DUPLEX_HALF;
8249 bp->port.advertising = (ADVERTISED_100baseT_Half |
8252 BNX2X_ERR("NVRAM config error. "
8253 "Invalid link_config 0x%x"
8254 " speed_cap_mask 0x%x\n",
8255 bp->port.link_config,
8256 bp->link_params.speed_cap_mask);
8261 case PORT_FEATURE_LINK_SPEED_1G:
8262 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8263 bp->link_params.req_line_speed = SPEED_1000;
8264 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8267 BNX2X_ERR("NVRAM config error. "
8268 "Invalid link_config 0x%x"
8269 " speed_cap_mask 0x%x\n",
8270 bp->port.link_config,
8271 bp->link_params.speed_cap_mask);
8276 case PORT_FEATURE_LINK_SPEED_2_5G:
8277 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8278 bp->link_params.req_line_speed = SPEED_2500;
8279 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8282 BNX2X_ERR("NVRAM config error. "
8283 "Invalid link_config 0x%x"
8284 " speed_cap_mask 0x%x\n",
8285 bp->port.link_config,
8286 bp->link_params.speed_cap_mask);
8291 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8292 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8293 case PORT_FEATURE_LINK_SPEED_10G_KR:
8294 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8295 bp->link_params.req_line_speed = SPEED_10000;
8296 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8299 BNX2X_ERR("NVRAM config error. "
8300 "Invalid link_config 0x%x"
8301 " speed_cap_mask 0x%x\n",
8302 bp->port.link_config,
8303 bp->link_params.speed_cap_mask);
8309 BNX2X_ERR("NVRAM config error. "
8310 "BAD link speed link_config 0x%x\n",
8311 bp->port.link_config);
8312 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8313 bp->port.advertising = bp->port.supported;
8317 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8318 PORT_FEATURE_FLOW_CONTROL_MASK);
8319 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8320 !(bp->port.supported & SUPPORTED_Autoneg))
8321 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8323 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8324 " advertising 0x%x\n",
8325 bp->link_params.req_line_speed,
8326 bp->link_params.req_duplex,
8327 bp->link_params.req_flow_ctrl, bp->port.advertising);
8330 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8332 int port = BP_PORT(bp);
8337 bp->link_params.bp = bp;
8338 bp->link_params.port = port;
8340 bp->link_params.lane_config =
8341 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8342 bp->link_params.ext_phy_config =
8344 dev_info.port_hw_config[port].external_phy_config);
8345 /* BCM8727_NOC => BCM8727 no over current */
8346 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8347 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8348 bp->link_params.ext_phy_config &=
8349 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8350 bp->link_params.ext_phy_config |=
8351 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8352 bp->link_params.feature_config_flags |=
8353 FEATURE_CONFIG_BCM8727_NOC;
8356 bp->link_params.speed_cap_mask =
8358 dev_info.port_hw_config[port].speed_capability_mask);
8360 bp->port.link_config =
8361 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8363 /* Get the 4 lanes xgxs config rx and tx */
8364 for (i = 0; i < 2; i++) {
8366 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8367 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8368 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8371 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8372 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8373 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8376 /* If the device is capable of WoL, set the default state according
8379 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8380 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8381 (config & PORT_FEATURE_WOL_ENABLED));
8383 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8384 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8385 bp->link_params.lane_config,
8386 bp->link_params.ext_phy_config,
8387 bp->link_params.speed_cap_mask, bp->port.link_config);
8389 bp->link_params.switch_cfg |= (bp->port.link_config &
8390 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8391 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8393 bnx2x_link_settings_requested(bp);
8395 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8396 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8397 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8398 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8399 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8400 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8401 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8402 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8403 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8404 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8407 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8409 int func = BP_FUNC(bp);
8413 bnx2x_get_common_hwinfo(bp);
8417 if (CHIP_IS_E1H(bp)) {
8419 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8421 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8422 FUNC_MF_CFG_E1HOV_TAG_MASK);
8423 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8425 BNX2X_DEV_INFO("%s function mode\n",
8426 IS_E1HMF(bp) ? "multi" : "single");
8429 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8431 FUNC_MF_CFG_E1HOV_TAG_MASK);
8432 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8434 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8436 func, bp->e1hov, bp->e1hov);
8438 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8439 " aborting\n", func);
8444 BNX2X_ERR("!!! VN %d in single function mode,"
8445 " aborting\n", BP_E1HVN(bp));
8451 if (!BP_NOMCP(bp)) {
8452 bnx2x_get_port_hwinfo(bp);
8454 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8455 DRV_MSG_SEQ_NUMBER_MASK);
8456 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8460 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8461 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8462 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8463 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8464 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8465 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8466 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8467 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8468 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8469 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8470 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8472 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8480 /* only supposed to happen on emulation/FPGA */
8481 BNX2X_ERR("warning random MAC workaround active\n");
8482 random_ether_addr(bp->dev->dev_addr);
8483 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8489 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8491 int func = BP_FUNC(bp);
8495 /* Disable interrupt handling until HW is initialized */
8496 atomic_set(&bp->intr_sem, 1);
8497 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8499 mutex_init(&bp->port.phy_mutex);
8501 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8502 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8504 rc = bnx2x_get_hwinfo(bp);
8506 /* need to reset chip if undi was active */
8508 bnx2x_undi_unload(bp);
8510 if (CHIP_REV_IS_FPGA(bp))
8511 printk(KERN_ERR PFX "FPGA detected\n");
8513 if (BP_NOMCP(bp) && (func == 0))
8515 "MCP disabled, must load devices in order!\n");
8517 /* Set multi queue mode */
8518 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8519 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8521 "Multi disabled since int_mode requested is not MSI-X\n");
8522 multi_mode = ETH_RSS_MODE_DISABLED;
8524 bp->multi_mode = multi_mode;
8529 bp->flags &= ~TPA_ENABLE_FLAG;
8530 bp->dev->features &= ~NETIF_F_LRO;
8532 bp->flags |= TPA_ENABLE_FLAG;
8533 bp->dev->features |= NETIF_F_LRO;
8538 bp->tx_ring_size = MAX_TX_AVAIL;
8539 bp->rx_ring_size = MAX_RX_AVAIL;
8546 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8547 bp->current_interval = (poll ? poll : timer_interval);
8549 init_timer(&bp->timer);
8550 bp->timer.expires = jiffies + bp->current_interval;
8551 bp->timer.data = (unsigned long) bp;
8552 bp->timer.function = bnx2x_timer;
8558 * ethtool service functions
8561 /* All ethtool functions called with rtnl_lock */
8563 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8565 struct bnx2x *bp = netdev_priv(dev);
8567 cmd->supported = bp->port.supported;
8568 cmd->advertising = bp->port.advertising;
8570 if (netif_carrier_ok(dev)) {
8571 cmd->speed = bp->link_vars.line_speed;
8572 cmd->duplex = bp->link_vars.duplex;
8574 cmd->speed = bp->link_params.req_line_speed;
8575 cmd->duplex = bp->link_params.req_duplex;
8580 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8581 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8582 if (vn_max_rate < cmd->speed)
8583 cmd->speed = vn_max_rate;
8586 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8588 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8590 switch (ext_phy_type) {
8591 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8592 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8593 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8594 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8595 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8596 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8597 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8598 cmd->port = PORT_FIBRE;
8601 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8602 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8603 cmd->port = PORT_TP;
8606 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8607 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8608 bp->link_params.ext_phy_config);
8612 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8613 bp->link_params.ext_phy_config);
8617 cmd->port = PORT_TP;
8619 cmd->phy_address = bp->port.phy_addr;
8620 cmd->transceiver = XCVR_INTERNAL;
8622 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8623 cmd->autoneg = AUTONEG_ENABLE;
8625 cmd->autoneg = AUTONEG_DISABLE;
8630 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8631 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8632 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8633 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8634 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8635 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8636 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8641 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8643 struct bnx2x *bp = netdev_priv(dev);
8649 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8650 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8651 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8652 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8653 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8654 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8655 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8657 if (cmd->autoneg == AUTONEG_ENABLE) {
8658 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8659 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8663 /* advertise the requested speed and duplex if supported */
8664 cmd->advertising &= bp->port.supported;
8666 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8667 bp->link_params.req_duplex = DUPLEX_FULL;
8668 bp->port.advertising |= (ADVERTISED_Autoneg |
8671 } else { /* forced speed */
8672 /* advertise the requested speed and duplex if supported */
8673 switch (cmd->speed) {
8675 if (cmd->duplex == DUPLEX_FULL) {
8676 if (!(bp->port.supported &
8677 SUPPORTED_10baseT_Full)) {
8679 "10M full not supported\n");
8683 advertising = (ADVERTISED_10baseT_Full |
8686 if (!(bp->port.supported &
8687 SUPPORTED_10baseT_Half)) {
8689 "10M half not supported\n");
8693 advertising = (ADVERTISED_10baseT_Half |
8699 if (cmd->duplex == DUPLEX_FULL) {
8700 if (!(bp->port.supported &
8701 SUPPORTED_100baseT_Full)) {
8703 "100M full not supported\n");
8707 advertising = (ADVERTISED_100baseT_Full |
8710 if (!(bp->port.supported &
8711 SUPPORTED_100baseT_Half)) {
8713 "100M half not supported\n");
8717 advertising = (ADVERTISED_100baseT_Half |
8723 if (cmd->duplex != DUPLEX_FULL) {
8724 DP(NETIF_MSG_LINK, "1G half not supported\n");
8728 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8729 DP(NETIF_MSG_LINK, "1G full not supported\n");
8733 advertising = (ADVERTISED_1000baseT_Full |
8738 if (cmd->duplex != DUPLEX_FULL) {
8740 "2.5G half not supported\n");
8744 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8746 "2.5G full not supported\n");
8750 advertising = (ADVERTISED_2500baseX_Full |
8755 if (cmd->duplex != DUPLEX_FULL) {
8756 DP(NETIF_MSG_LINK, "10G half not supported\n");
8760 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8761 DP(NETIF_MSG_LINK, "10G full not supported\n");
8765 advertising = (ADVERTISED_10000baseT_Full |
8770 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8774 bp->link_params.req_line_speed = cmd->speed;
8775 bp->link_params.req_duplex = cmd->duplex;
8776 bp->port.advertising = advertising;
8779 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8780 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8781 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8782 bp->port.advertising);
8784 if (netif_running(dev)) {
8785 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8792 #define PHY_FW_VER_LEN 10
8794 static void bnx2x_get_drvinfo(struct net_device *dev,
8795 struct ethtool_drvinfo *info)
8797 struct bnx2x *bp = netdev_priv(dev);
8798 u8 phy_fw_ver[PHY_FW_VER_LEN];
8800 strcpy(info->driver, DRV_MODULE_NAME);
8801 strcpy(info->version, DRV_MODULE_VERSION);
8803 phy_fw_ver[0] = '\0';
8805 bnx2x_acquire_phy_lock(bp);
8806 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8807 (bp->state != BNX2X_STATE_CLOSED),
8808 phy_fw_ver, PHY_FW_VER_LEN);
8809 bnx2x_release_phy_lock(bp);
8812 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8813 (bp->common.bc_ver & 0xff0000) >> 16,
8814 (bp->common.bc_ver & 0xff00) >> 8,
8815 (bp->common.bc_ver & 0xff),
8816 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8817 strcpy(info->bus_info, pci_name(bp->pdev));
8818 info->n_stats = BNX2X_NUM_STATS;
8819 info->testinfo_len = BNX2X_NUM_TESTS;
8820 info->eedump_len = bp->common.flash_size;
8821 info->regdump_len = 0;
8824 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8825 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8827 static int bnx2x_get_regs_len(struct net_device *dev)
8829 static u32 regdump_len;
8830 struct bnx2x *bp = netdev_priv(dev);
8836 if (CHIP_IS_E1(bp)) {
8837 for (i = 0; i < REGS_COUNT; i++)
8838 if (IS_E1_ONLINE(reg_addrs[i].info))
8839 regdump_len += reg_addrs[i].size;
8841 for (i = 0; i < WREGS_COUNT_E1; i++)
8842 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8843 regdump_len += wreg_addrs_e1[i].size *
8844 (1 + wreg_addrs_e1[i].read_regs_count);
8847 for (i = 0; i < REGS_COUNT; i++)
8848 if (IS_E1H_ONLINE(reg_addrs[i].info))
8849 regdump_len += reg_addrs[i].size;
8851 for (i = 0; i < WREGS_COUNT_E1H; i++)
8852 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8853 regdump_len += wreg_addrs_e1h[i].size *
8854 (1 + wreg_addrs_e1h[i].read_regs_count);
8857 regdump_len += sizeof(struct dump_hdr);
8862 static void bnx2x_get_regs(struct net_device *dev,
8863 struct ethtool_regs *regs, void *_p)
8866 struct bnx2x *bp = netdev_priv(dev);
8867 struct dump_hdr dump_hdr = {0};
8870 memset(p, 0, regs->len);
8872 if (!netif_running(bp->dev))
8875 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8876 dump_hdr.dump_sign = dump_sign_all;
8877 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8878 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8879 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8880 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8881 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8883 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8884 p += dump_hdr.hdr_size + 1;
8886 if (CHIP_IS_E1(bp)) {
8887 for (i = 0; i < REGS_COUNT; i++)
8888 if (IS_E1_ONLINE(reg_addrs[i].info))
8889 for (j = 0; j < reg_addrs[i].size; j++)
8891 reg_addrs[i].addr + j*4);
8894 for (i = 0; i < REGS_COUNT; i++)
8895 if (IS_E1H_ONLINE(reg_addrs[i].info))
8896 for (j = 0; j < reg_addrs[i].size; j++)
8898 reg_addrs[i].addr + j*4);
8902 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8904 struct bnx2x *bp = netdev_priv(dev);
8906 if (bp->flags & NO_WOL_FLAG) {
8910 wol->supported = WAKE_MAGIC;
8912 wol->wolopts = WAKE_MAGIC;
8916 memset(&wol->sopass, 0, sizeof(wol->sopass));
8919 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8921 struct bnx2x *bp = netdev_priv(dev);
8923 if (wol->wolopts & ~WAKE_MAGIC)
8926 if (wol->wolopts & WAKE_MAGIC) {
8927 if (bp->flags & NO_WOL_FLAG)
8937 static u32 bnx2x_get_msglevel(struct net_device *dev)
8939 struct bnx2x *bp = netdev_priv(dev);
8941 return bp->msglevel;
8944 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8946 struct bnx2x *bp = netdev_priv(dev);
8948 if (capable(CAP_NET_ADMIN))
8949 bp->msglevel = level;
8952 static int bnx2x_nway_reset(struct net_device *dev)
8954 struct bnx2x *bp = netdev_priv(dev);
8959 if (netif_running(dev)) {
8960 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8968 bnx2x_get_link(struct net_device *dev)
8970 struct bnx2x *bp = netdev_priv(dev);
8972 return bp->link_vars.link_up;
8975 static int bnx2x_get_eeprom_len(struct net_device *dev)
8977 struct bnx2x *bp = netdev_priv(dev);
8979 return bp->common.flash_size;
8982 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8984 int port = BP_PORT(bp);
8988 /* adjust timeout for emulation/FPGA */
8989 count = NVRAM_TIMEOUT_COUNT;
8990 if (CHIP_REV_IS_SLOW(bp))
8993 /* request access to nvram interface */
8994 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8995 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8997 for (i = 0; i < count*10; i++) {
8998 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8999 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9005 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9006 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9013 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9015 int port = BP_PORT(bp);
9019 /* adjust timeout for emulation/FPGA */
9020 count = NVRAM_TIMEOUT_COUNT;
9021 if (CHIP_REV_IS_SLOW(bp))
9024 /* relinquish nvram interface */
9025 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9026 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9028 for (i = 0; i < count*10; i++) {
9029 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9030 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9036 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9037 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9044 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9048 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9050 /* enable both bits, even on read */
9051 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9052 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9053 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9056 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9060 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9062 /* disable both bits, even after read */
9063 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9064 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9065 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9068 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9074 /* build the command word */
9075 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9077 /* need to clear DONE bit separately */
9078 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9080 /* address of the NVRAM to read from */
9081 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9082 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9084 /* issue a read command */
9085 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9087 /* adjust timeout for emulation/FPGA */
9088 count = NVRAM_TIMEOUT_COUNT;
9089 if (CHIP_REV_IS_SLOW(bp))
9092 /* wait for completion */
9095 for (i = 0; i < count; i++) {
9097 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9099 if (val & MCPR_NVM_COMMAND_DONE) {
9100 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9101 /* we read nvram data in cpu order
9102 * but ethtool sees it as an array of bytes
9103 * converting to big-endian will do the work */
9104 *ret_val = cpu_to_be32(val);
9113 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9120 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9122 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9127 if (offset + buf_size > bp->common.flash_size) {
9128 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9129 " buf_size (0x%x) > flash_size (0x%x)\n",
9130 offset, buf_size, bp->common.flash_size);
9134 /* request access to nvram interface */
9135 rc = bnx2x_acquire_nvram_lock(bp);
9139 /* enable access to nvram interface */
9140 bnx2x_enable_nvram_access(bp);
9142 /* read the first word(s) */
9143 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9144 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9145 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9146 memcpy(ret_buf, &val, 4);
9148 /* advance to the next dword */
9149 offset += sizeof(u32);
9150 ret_buf += sizeof(u32);
9151 buf_size -= sizeof(u32);
9156 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9157 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9158 memcpy(ret_buf, &val, 4);
9161 /* disable access to nvram interface */
9162 bnx2x_disable_nvram_access(bp);
9163 bnx2x_release_nvram_lock(bp);
9168 static int bnx2x_get_eeprom(struct net_device *dev,
9169 struct ethtool_eeprom *eeprom, u8 *eebuf)
9171 struct bnx2x *bp = netdev_priv(dev);
9174 if (!netif_running(dev))
9177 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9178 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9179 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9180 eeprom->len, eeprom->len);
9182 /* parameters already validated in ethtool_get_eeprom */
9184 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9189 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9194 /* build the command word */
9195 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9197 /* need to clear DONE bit separately */
9198 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9200 /* write the data */
9201 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9203 /* address of the NVRAM to write to */
9204 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9205 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9207 /* issue the write command */
9208 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9210 /* adjust timeout for emulation/FPGA */
9211 count = NVRAM_TIMEOUT_COUNT;
9212 if (CHIP_REV_IS_SLOW(bp))
9215 /* wait for completion */
9217 for (i = 0; i < count; i++) {
9219 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9220 if (val & MCPR_NVM_COMMAND_DONE) {
9229 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9231 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9239 if (offset + buf_size > bp->common.flash_size) {
9240 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9241 " buf_size (0x%x) > flash_size (0x%x)\n",
9242 offset, buf_size, bp->common.flash_size);
9246 /* request access to nvram interface */
9247 rc = bnx2x_acquire_nvram_lock(bp);
9251 /* enable access to nvram interface */
9252 bnx2x_enable_nvram_access(bp);
9254 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9255 align_offset = (offset & ~0x03);
9256 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9259 val &= ~(0xff << BYTE_OFFSET(offset));
9260 val |= (*data_buf << BYTE_OFFSET(offset));
9262 /* nvram data is returned as an array of bytes
9263 * convert it back to cpu order */
9264 val = be32_to_cpu(val);
9266 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9270 /* disable access to nvram interface */
9271 bnx2x_disable_nvram_access(bp);
9272 bnx2x_release_nvram_lock(bp);
9277 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9285 if (buf_size == 1) /* ethtool */
9286 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9288 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9290 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9295 if (offset + buf_size > bp->common.flash_size) {
9296 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9297 " buf_size (0x%x) > flash_size (0x%x)\n",
9298 offset, buf_size, bp->common.flash_size);
9302 /* request access to nvram interface */
9303 rc = bnx2x_acquire_nvram_lock(bp);
9307 /* enable access to nvram interface */
9308 bnx2x_enable_nvram_access(bp);
9311 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9312 while ((written_so_far < buf_size) && (rc == 0)) {
9313 if (written_so_far == (buf_size - sizeof(u32)))
9314 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9315 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9316 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9317 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9318 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9320 memcpy(&val, data_buf, 4);
9322 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9324 /* advance to the next dword */
9325 offset += sizeof(u32);
9326 data_buf += sizeof(u32);
9327 written_so_far += sizeof(u32);
9331 /* disable access to nvram interface */
9332 bnx2x_disable_nvram_access(bp);
9333 bnx2x_release_nvram_lock(bp);
9338 static int bnx2x_set_eeprom(struct net_device *dev,
9339 struct ethtool_eeprom *eeprom, u8 *eebuf)
9341 struct bnx2x *bp = netdev_priv(dev);
9344 if (!netif_running(dev))
9347 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9348 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9349 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9350 eeprom->len, eeprom->len);
9352 /* parameters already validated in ethtool_set_eeprom */
9354 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9355 if (eeprom->magic == 0x00504859)
9358 bnx2x_acquire_phy_lock(bp);
9359 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9360 bp->link_params.ext_phy_config,
9361 (bp->state != BNX2X_STATE_CLOSED),
9362 eebuf, eeprom->len);
9363 if ((bp->state == BNX2X_STATE_OPEN) ||
9364 (bp->state == BNX2X_STATE_DISABLED)) {
9365 rc |= bnx2x_link_reset(&bp->link_params,
9367 rc |= bnx2x_phy_init(&bp->link_params,
9370 bnx2x_release_phy_lock(bp);
9372 } else /* Only the PMF can access the PHY */
9375 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9380 static int bnx2x_get_coalesce(struct net_device *dev,
9381 struct ethtool_coalesce *coal)
9383 struct bnx2x *bp = netdev_priv(dev);
9385 memset(coal, 0, sizeof(struct ethtool_coalesce));
9387 coal->rx_coalesce_usecs = bp->rx_ticks;
9388 coal->tx_coalesce_usecs = bp->tx_ticks;
9393 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9394 static int bnx2x_set_coalesce(struct net_device *dev,
9395 struct ethtool_coalesce *coal)
9397 struct bnx2x *bp = netdev_priv(dev);
9399 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9400 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9401 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9403 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9404 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9405 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9407 if (netif_running(dev))
9408 bnx2x_update_coalesce(bp);
9413 static void bnx2x_get_ringparam(struct net_device *dev,
9414 struct ethtool_ringparam *ering)
9416 struct bnx2x *bp = netdev_priv(dev);
9418 ering->rx_max_pending = MAX_RX_AVAIL;
9419 ering->rx_mini_max_pending = 0;
9420 ering->rx_jumbo_max_pending = 0;
9422 ering->rx_pending = bp->rx_ring_size;
9423 ering->rx_mini_pending = 0;
9424 ering->rx_jumbo_pending = 0;
9426 ering->tx_max_pending = MAX_TX_AVAIL;
9427 ering->tx_pending = bp->tx_ring_size;
9430 static int bnx2x_set_ringparam(struct net_device *dev,
9431 struct ethtool_ringparam *ering)
9433 struct bnx2x *bp = netdev_priv(dev);
9436 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9437 (ering->tx_pending > MAX_TX_AVAIL) ||
9438 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9441 bp->rx_ring_size = ering->rx_pending;
9442 bp->tx_ring_size = ering->tx_pending;
9444 if (netif_running(dev)) {
9445 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9446 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9452 static void bnx2x_get_pauseparam(struct net_device *dev,
9453 struct ethtool_pauseparam *epause)
9455 struct bnx2x *bp = netdev_priv(dev);
9457 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9458 BNX2X_FLOW_CTRL_AUTO) &&
9459 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9461 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9462 BNX2X_FLOW_CTRL_RX);
9463 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9464 BNX2X_FLOW_CTRL_TX);
9466 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9467 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9468 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9471 static int bnx2x_set_pauseparam(struct net_device *dev,
9472 struct ethtool_pauseparam *epause)
9474 struct bnx2x *bp = netdev_priv(dev);
9479 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9480 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9481 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9483 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9485 if (epause->rx_pause)
9486 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9488 if (epause->tx_pause)
9489 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9491 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9492 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9494 if (epause->autoneg) {
9495 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9496 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9500 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9501 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9505 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9507 if (netif_running(dev)) {
9508 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9515 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9517 struct bnx2x *bp = netdev_priv(dev);
9521 /* TPA requires Rx CSUM offloading */
9522 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9523 if (!(dev->features & NETIF_F_LRO)) {
9524 dev->features |= NETIF_F_LRO;
9525 bp->flags |= TPA_ENABLE_FLAG;
9529 } else if (dev->features & NETIF_F_LRO) {
9530 dev->features &= ~NETIF_F_LRO;
9531 bp->flags &= ~TPA_ENABLE_FLAG;
9535 if (changed && netif_running(dev)) {
9536 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9537 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9543 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9545 struct bnx2x *bp = netdev_priv(dev);
9550 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9552 struct bnx2x *bp = netdev_priv(dev);
9557 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9558 TPA'ed packets will be discarded due to wrong TCP CSUM */
9560 u32 flags = ethtool_op_get_flags(dev);
9562 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9568 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9571 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9572 dev->features |= NETIF_F_TSO6;
9574 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9575 dev->features &= ~NETIF_F_TSO6;
9581 static const struct {
9582 char string[ETH_GSTRING_LEN];
9583 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9584 { "register_test (offline)" },
9585 { "memory_test (offline)" },
9586 { "loopback_test (offline)" },
9587 { "nvram_test (online)" },
9588 { "interrupt_test (online)" },
9589 { "link_test (online)" },
9590 { "idle check (online)" }
9593 static int bnx2x_self_test_count(struct net_device *dev)
9595 return BNX2X_NUM_TESTS;
9598 static int bnx2x_test_registers(struct bnx2x *bp)
9600 int idx, i, rc = -ENODEV;
9602 int port = BP_PORT(bp);
9603 static const struct {
9608 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9609 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9610 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9611 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9612 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9613 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9614 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9615 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9616 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9617 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9618 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9619 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9620 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9621 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9622 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9623 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9624 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9625 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9626 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9627 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9628 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9629 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9630 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9631 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9632 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9633 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9634 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9635 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9636 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9637 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9638 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9639 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9640 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9641 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9642 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9643 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9644 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9646 { 0xffffffff, 0, 0x00000000 }
9649 if (!netif_running(bp->dev))
9652 /* Repeat the test twice:
9653 First by writing 0x00000000, second by writing 0xffffffff */
9654 for (idx = 0; idx < 2; idx++) {
9661 wr_val = 0xffffffff;
9665 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9666 u32 offset, mask, save_val, val;
9668 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9669 mask = reg_tbl[i].mask;
9671 save_val = REG_RD(bp, offset);
9673 REG_WR(bp, offset, wr_val);
9674 val = REG_RD(bp, offset);
9676 /* Restore the original register's value */
9677 REG_WR(bp, offset, save_val);
9679 /* verify that value is as expected value */
9680 if ((val & mask) != (wr_val & mask))
9691 static int bnx2x_test_memory(struct bnx2x *bp)
9693 int i, j, rc = -ENODEV;
9695 static const struct {
9699 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9700 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9701 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9702 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9703 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9704 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9705 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9709 static const struct {
9715 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9716 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9717 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9718 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9719 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9720 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9722 { NULL, 0xffffffff, 0, 0 }
9725 if (!netif_running(bp->dev))
9728 /* Go through all the memories */
9729 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9730 for (j = 0; j < mem_tbl[i].size; j++)
9731 REG_RD(bp, mem_tbl[i].offset + j*4);
9733 /* Check the parity status */
9734 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9735 val = REG_RD(bp, prty_tbl[i].offset);
9736 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9737 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9739 "%s is 0x%x\n", prty_tbl[i].name, val);
9750 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9755 while (bnx2x_link_test(bp) && cnt--)
9759 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9761 unsigned int pkt_size, num_pkts, i;
9762 struct sk_buff *skb;
9763 unsigned char *packet;
9764 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9765 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9766 u16 tx_start_idx, tx_idx;
9767 u16 rx_start_idx, rx_idx;
9768 u16 pkt_prod, bd_prod;
9769 struct sw_tx_bd *tx_buf;
9770 struct eth_tx_start_bd *tx_start_bd;
9771 struct eth_tx_parse_bd *pbd = NULL;
9773 union eth_rx_cqe *cqe;
9775 struct sw_rx_bd *rx_buf;
9779 /* check the loopback mode */
9780 switch (loopback_mode) {
9781 case BNX2X_PHY_LOOPBACK:
9782 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9785 case BNX2X_MAC_LOOPBACK:
9786 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9787 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9793 /* prepare the loopback packet */
9794 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9795 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9796 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9799 goto test_loopback_exit;
9801 packet = skb_put(skb, pkt_size);
9802 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9803 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9804 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9805 for (i = ETH_HLEN; i < pkt_size; i++)
9806 packet[i] = (unsigned char) (i & 0xff);
9808 /* send the loopback packet */
9810 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9811 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9813 pkt_prod = fp_tx->tx_pkt_prod++;
9814 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9815 tx_buf->first_bd = fp_tx->tx_bd_prod;
9819 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9820 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
9821 mapping = pci_map_single(bp->pdev, skb->data,
9822 skb_headlen(skb), PCI_DMA_TODEVICE);
9823 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9824 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9825 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9826 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9827 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
9828 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9829 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
9830 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9832 /* turn on parsing and get a BD */
9833 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9834 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
9836 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9840 fp_tx->tx_db.data.prod += 2;
9842 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
9847 fp_tx->tx_bd_prod += 2; /* start + pbd */
9848 bp->dev->trans_start = jiffies;
9852 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9853 if (tx_idx != tx_start_idx + num_pkts)
9854 goto test_loopback_exit;
9856 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9857 if (rx_idx != rx_start_idx + num_pkts)
9858 goto test_loopback_exit;
9860 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
9861 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9862 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9863 goto test_loopback_rx_exit;
9865 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9866 if (len != pkt_size)
9867 goto test_loopback_rx_exit;
9869 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
9871 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9872 for (i = ETH_HLEN; i < pkt_size; i++)
9873 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9874 goto test_loopback_rx_exit;
9878 test_loopback_rx_exit:
9880 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
9881 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
9882 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
9883 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
9885 /* Update producers */
9886 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
9887 fp_rx->rx_sge_prod);
9890 bp->link_params.loopback_mode = LOOPBACK_NONE;
9895 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9899 if (!netif_running(bp->dev))
9900 return BNX2X_LOOPBACK_FAILED;
9902 bnx2x_netif_stop(bp, 1);
9903 bnx2x_acquire_phy_lock(bp);
9905 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9907 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9908 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9911 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9913 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9914 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9917 bnx2x_release_phy_lock(bp);
9918 bnx2x_netif_start(bp);
9923 #define CRC32_RESIDUAL 0xdebb20e3
9925 static int bnx2x_test_nvram(struct bnx2x *bp)
9927 static const struct {
9931 { 0, 0x14 }, /* bootstrap */
9932 { 0x14, 0xec }, /* dir */
9933 { 0x100, 0x350 }, /* manuf_info */
9934 { 0x450, 0xf0 }, /* feature_info */
9935 { 0x640, 0x64 }, /* upgrade_key_info */
9937 { 0x708, 0x70 }, /* manuf_key_info */
9941 __be32 buf[0x350 / 4];
9942 u8 *data = (u8 *)buf;
9946 rc = bnx2x_nvram_read(bp, 0, data, 4);
9948 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9949 goto test_nvram_exit;
9952 magic = be32_to_cpu(buf[0]);
9953 if (magic != 0x669955aa) {
9954 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9956 goto test_nvram_exit;
9959 for (i = 0; nvram_tbl[i].size; i++) {
9961 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9965 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9966 goto test_nvram_exit;
9969 csum = ether_crc_le(nvram_tbl[i].size, data);
9970 if (csum != CRC32_RESIDUAL) {
9972 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9974 goto test_nvram_exit;
9982 static int bnx2x_test_intr(struct bnx2x *bp)
9984 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9987 if (!netif_running(bp->dev))
9990 config->hdr.length = 0;
9992 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9994 config->hdr.offset = BP_FUNC(bp);
9995 config->hdr.client_id = bp->fp->cl_id;
9996 config->hdr.reserved1 = 0;
9998 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9999 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10000 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10002 bp->set_mac_pending++;
10003 for (i = 0; i < 10; i++) {
10004 if (!bp->set_mac_pending)
10006 msleep_interruptible(10);
10015 static void bnx2x_self_test(struct net_device *dev,
10016 struct ethtool_test *etest, u64 *buf)
10018 struct bnx2x *bp = netdev_priv(dev);
10020 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10022 if (!netif_running(dev))
10025 /* offline tests are not supported in MF mode */
10027 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10029 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10030 int port = BP_PORT(bp);
10034 /* save current value of input enable for TX port IF */
10035 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10036 /* disable input for TX port IF */
10037 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10039 link_up = bp->link_vars.link_up;
10040 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10041 bnx2x_nic_load(bp, LOAD_DIAG);
10042 /* wait until link state is restored */
10043 bnx2x_wait_for_link(bp, link_up);
10045 if (bnx2x_test_registers(bp) != 0) {
10047 etest->flags |= ETH_TEST_FL_FAILED;
10049 if (bnx2x_test_memory(bp) != 0) {
10051 etest->flags |= ETH_TEST_FL_FAILED;
10053 buf[2] = bnx2x_test_loopback(bp, link_up);
10055 etest->flags |= ETH_TEST_FL_FAILED;
10057 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10059 /* restore input for TX port IF */
10060 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10062 bnx2x_nic_load(bp, LOAD_NORMAL);
10063 /* wait until link state is restored */
10064 bnx2x_wait_for_link(bp, link_up);
10066 if (bnx2x_test_nvram(bp) != 0) {
10068 etest->flags |= ETH_TEST_FL_FAILED;
10070 if (bnx2x_test_intr(bp) != 0) {
10072 etest->flags |= ETH_TEST_FL_FAILED;
10075 if (bnx2x_link_test(bp) != 0) {
10077 etest->flags |= ETH_TEST_FL_FAILED;
10080 #ifdef BNX2X_EXTRA_DEBUG
10081 bnx2x_panic_dump(bp);
10085 static const struct {
10088 u8 string[ETH_GSTRING_LEN];
10089 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10090 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10091 { Q_STATS_OFFSET32(error_bytes_received_hi),
10092 8, "[%d]: rx_error_bytes" },
10093 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10094 8, "[%d]: rx_ucast_packets" },
10095 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10096 8, "[%d]: rx_mcast_packets" },
10097 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10098 8, "[%d]: rx_bcast_packets" },
10099 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10100 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10101 4, "[%d]: rx_phy_ip_err_discards"},
10102 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10103 4, "[%d]: rx_skb_alloc_discard" },
10104 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10106 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10107 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10108 8, "[%d]: tx_packets" }
10111 static const struct {
10115 #define STATS_FLAGS_PORT 1
10116 #define STATS_FLAGS_FUNC 2
10117 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10118 u8 string[ETH_GSTRING_LEN];
10119 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10120 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10121 8, STATS_FLAGS_BOTH, "rx_bytes" },
10122 { STATS_OFFSET32(error_bytes_received_hi),
10123 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10124 { STATS_OFFSET32(total_unicast_packets_received_hi),
10125 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10126 { STATS_OFFSET32(total_multicast_packets_received_hi),
10127 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10128 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10129 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10130 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10131 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10132 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10133 8, STATS_FLAGS_PORT, "rx_align_errors" },
10134 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10135 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10136 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10137 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10138 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10139 8, STATS_FLAGS_PORT, "rx_fragments" },
10140 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10141 8, STATS_FLAGS_PORT, "rx_jabbers" },
10142 { STATS_OFFSET32(no_buff_discard_hi),
10143 8, STATS_FLAGS_BOTH, "rx_discards" },
10144 { STATS_OFFSET32(mac_filter_discard),
10145 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10146 { STATS_OFFSET32(xxoverflow_discard),
10147 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10148 { STATS_OFFSET32(brb_drop_hi),
10149 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10150 { STATS_OFFSET32(brb_truncate_hi),
10151 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10152 { STATS_OFFSET32(pause_frames_received_hi),
10153 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10154 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10155 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10156 { STATS_OFFSET32(nig_timer_max),
10157 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10158 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10159 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10160 { STATS_OFFSET32(rx_skb_alloc_failed),
10161 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10162 { STATS_OFFSET32(hw_csum_err),
10163 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10165 { STATS_OFFSET32(total_bytes_transmitted_hi),
10166 8, STATS_FLAGS_BOTH, "tx_bytes" },
10167 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10168 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10169 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10170 8, STATS_FLAGS_BOTH, "tx_packets" },
10171 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10172 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10173 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10174 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10175 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10176 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10177 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10178 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10179 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10180 8, STATS_FLAGS_PORT, "tx_deferred" },
10181 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10182 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10183 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10184 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10185 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10186 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10187 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10188 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10189 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10190 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10191 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10192 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10193 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10194 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10195 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10196 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10197 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10198 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10199 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10200 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10201 { STATS_OFFSET32(pause_frames_sent_hi),
10202 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10205 #define IS_PORT_STAT(i) \
10206 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10207 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10208 #define IS_E1HMF_MODE_STAT(bp) \
10209 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10211 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10213 struct bnx2x *bp = netdev_priv(dev);
10216 switch (stringset) {
10218 if (is_multi(bp)) {
10220 for_each_rx_queue(bp, i) {
10221 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10222 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10223 bnx2x_q_stats_arr[j].string, i);
10224 k += BNX2X_NUM_Q_STATS;
10226 if (IS_E1HMF_MODE_STAT(bp))
10228 for (j = 0; j < BNX2X_NUM_STATS; j++)
10229 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10230 bnx2x_stats_arr[j].string);
10232 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10233 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10235 strcpy(buf + j*ETH_GSTRING_LEN,
10236 bnx2x_stats_arr[i].string);
10243 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10248 static int bnx2x_get_stats_count(struct net_device *dev)
10250 struct bnx2x *bp = netdev_priv(dev);
10253 if (is_multi(bp)) {
10254 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10255 if (!IS_E1HMF_MODE_STAT(bp))
10256 num_stats += BNX2X_NUM_STATS;
10258 if (IS_E1HMF_MODE_STAT(bp)) {
10260 for (i = 0; i < BNX2X_NUM_STATS; i++)
10261 if (IS_FUNC_STAT(i))
10264 num_stats = BNX2X_NUM_STATS;
10270 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10271 struct ethtool_stats *stats, u64 *buf)
10273 struct bnx2x *bp = netdev_priv(dev);
10274 u32 *hw_stats, *offset;
10277 if (is_multi(bp)) {
10279 for_each_rx_queue(bp, i) {
10280 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10281 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10282 if (bnx2x_q_stats_arr[j].size == 0) {
10283 /* skip this counter */
10287 offset = (hw_stats +
10288 bnx2x_q_stats_arr[j].offset);
10289 if (bnx2x_q_stats_arr[j].size == 4) {
10290 /* 4-byte counter */
10291 buf[k + j] = (u64) *offset;
10294 /* 8-byte counter */
10295 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10297 k += BNX2X_NUM_Q_STATS;
10299 if (IS_E1HMF_MODE_STAT(bp))
10301 hw_stats = (u32 *)&bp->eth_stats;
10302 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10303 if (bnx2x_stats_arr[j].size == 0) {
10304 /* skip this counter */
10308 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10309 if (bnx2x_stats_arr[j].size == 4) {
10310 /* 4-byte counter */
10311 buf[k + j] = (u64) *offset;
10314 /* 8-byte counter */
10315 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10318 hw_stats = (u32 *)&bp->eth_stats;
10319 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10320 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10322 if (bnx2x_stats_arr[i].size == 0) {
10323 /* skip this counter */
10328 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10329 if (bnx2x_stats_arr[i].size == 4) {
10330 /* 4-byte counter */
10331 buf[j] = (u64) *offset;
10335 /* 8-byte counter */
10336 buf[j] = HILO_U64(*offset, *(offset + 1));
10342 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10344 struct bnx2x *bp = netdev_priv(dev);
10345 int port = BP_PORT(bp);
10348 if (!netif_running(dev))
10357 for (i = 0; i < (data * 2); i++) {
10359 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10360 bp->link_params.hw_led_mode,
10361 bp->link_params.chip_id);
10363 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10364 bp->link_params.hw_led_mode,
10365 bp->link_params.chip_id);
10367 msleep_interruptible(500);
10368 if (signal_pending(current))
10372 if (bp->link_vars.link_up)
10373 bnx2x_set_led(bp, port, LED_MODE_OPER,
10374 bp->link_vars.line_speed,
10375 bp->link_params.hw_led_mode,
10376 bp->link_params.chip_id);
10381 static struct ethtool_ops bnx2x_ethtool_ops = {
10382 .get_settings = bnx2x_get_settings,
10383 .set_settings = bnx2x_set_settings,
10384 .get_drvinfo = bnx2x_get_drvinfo,
10385 .get_regs_len = bnx2x_get_regs_len,
10386 .get_regs = bnx2x_get_regs,
10387 .get_wol = bnx2x_get_wol,
10388 .set_wol = bnx2x_set_wol,
10389 .get_msglevel = bnx2x_get_msglevel,
10390 .set_msglevel = bnx2x_set_msglevel,
10391 .nway_reset = bnx2x_nway_reset,
10392 .get_link = bnx2x_get_link,
10393 .get_eeprom_len = bnx2x_get_eeprom_len,
10394 .get_eeprom = bnx2x_get_eeprom,
10395 .set_eeprom = bnx2x_set_eeprom,
10396 .get_coalesce = bnx2x_get_coalesce,
10397 .set_coalesce = bnx2x_set_coalesce,
10398 .get_ringparam = bnx2x_get_ringparam,
10399 .set_ringparam = bnx2x_set_ringparam,
10400 .get_pauseparam = bnx2x_get_pauseparam,
10401 .set_pauseparam = bnx2x_set_pauseparam,
10402 .get_rx_csum = bnx2x_get_rx_csum,
10403 .set_rx_csum = bnx2x_set_rx_csum,
10404 .get_tx_csum = ethtool_op_get_tx_csum,
10405 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10406 .set_flags = bnx2x_set_flags,
10407 .get_flags = ethtool_op_get_flags,
10408 .get_sg = ethtool_op_get_sg,
10409 .set_sg = ethtool_op_set_sg,
10410 .get_tso = ethtool_op_get_tso,
10411 .set_tso = bnx2x_set_tso,
10412 .self_test_count = bnx2x_self_test_count,
10413 .self_test = bnx2x_self_test,
10414 .get_strings = bnx2x_get_strings,
10415 .phys_id = bnx2x_phys_id,
10416 .get_stats_count = bnx2x_get_stats_count,
10417 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10420 /* end of ethtool_ops */
10422 /****************************************************************************
10423 * General service functions
10424 ****************************************************************************/
10426 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10430 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10434 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10435 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10436 PCI_PM_CTRL_PME_STATUS));
10438 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10439 /* delay required during transition out of D3hot */
10444 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10448 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10450 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10453 /* No more memory access after this point until
10454 * device is brought back to D0.
10464 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10468 /* Tell compiler that status block fields can change */
10470 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10471 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10473 return (fp->rx_comp_cons != rx_cons_sb);
10477 * net_device service functions
10480 static int bnx2x_poll(struct napi_struct *napi, int budget)
10482 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10484 struct bnx2x *bp = fp->bp;
10487 #ifdef BNX2X_STOP_ON_ERROR
10488 if (unlikely(bp->panic))
10492 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10493 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10495 bnx2x_update_fpsb_idx(fp);
10497 if (bnx2x_has_rx_work(fp)) {
10498 work_done = bnx2x_rx_int(fp, budget);
10500 /* must not complete if we consumed full budget */
10501 if (work_done >= budget)
10505 /* bnx2x_has_rx_work() reads the status block, thus we need to
10506 * ensure that status block indices have been actually read
10507 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10508 * so that we won't write the "newer" value of the status block to IGU
10509 * (if there was a DMA right after bnx2x_has_rx_work and
10510 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10511 * may be postponed to right before bnx2x_ack_sb). In this case
10512 * there will never be another interrupt until there is another update
10513 * of the status block, while there is still unhandled work.
10517 if (!bnx2x_has_rx_work(fp)) {
10518 #ifdef BNX2X_STOP_ON_ERROR
10521 napi_complete(napi);
10523 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10524 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10525 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10526 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10534 /* we split the first BD into headers and data BDs
10535 * to ease the pain of our fellow microcode engineers
10536 * we use one mapping for both BDs
10537 * So far this has only been observed to happen
10538 * in Other Operating Systems(TM)
10540 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10541 struct bnx2x_fastpath *fp,
10542 struct sw_tx_bd *tx_buf,
10543 struct eth_tx_start_bd **tx_bd, u16 hlen,
10544 u16 bd_prod, int nbd)
10546 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10547 struct eth_tx_bd *d_tx_bd;
10548 dma_addr_t mapping;
10549 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10551 /* first fix first BD */
10552 h_tx_bd->nbd = cpu_to_le16(nbd);
10553 h_tx_bd->nbytes = cpu_to_le16(hlen);
10555 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10556 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10557 h_tx_bd->addr_lo, h_tx_bd->nbd);
10559 /* now get a new data BD
10560 * (after the pbd) and fill it */
10561 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10562 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10564 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10565 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10567 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10568 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10569 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10571 /* this marks the BD as one that has no individual mapping */
10572 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10574 DP(NETIF_MSG_TX_QUEUED,
10575 "TSO split data size is %d (%x:%x)\n",
10576 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10579 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10584 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10587 csum = (u16) ~csum_fold(csum_sub(csum,
10588 csum_partial(t_header - fix, fix, 0)));
10591 csum = (u16) ~csum_fold(csum_add(csum,
10592 csum_partial(t_header, -fix, 0)));
10594 return swab16(csum);
10597 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10601 if (skb->ip_summed != CHECKSUM_PARTIAL)
10605 if (skb->protocol == htons(ETH_P_IPV6)) {
10607 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10608 rc |= XMIT_CSUM_TCP;
10612 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10613 rc |= XMIT_CSUM_TCP;
10617 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10620 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10626 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10627 /* check if packet requires linearization (packet is too fragmented)
10628 no need to check fragmentation if page size > 8K (there will be no
10629 violation to FW restrictions) */
10630 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10635 int first_bd_sz = 0;
10637 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10638 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10640 if (xmit_type & XMIT_GSO) {
10641 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10642 /* Check if LSO packet needs to be copied:
10643 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10644 int wnd_size = MAX_FETCH_BD - 3;
10645 /* Number of windows to check */
10646 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10651 /* Headers length */
10652 hlen = (int)(skb_transport_header(skb) - skb->data) +
10655 /* Amount of data (w/o headers) on linear part of SKB*/
10656 first_bd_sz = skb_headlen(skb) - hlen;
10658 wnd_sum = first_bd_sz;
10660 /* Calculate the first sum - it's special */
10661 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10663 skb_shinfo(skb)->frags[frag_idx].size;
10665 /* If there was data on linear skb data - check it */
10666 if (first_bd_sz > 0) {
10667 if (unlikely(wnd_sum < lso_mss)) {
10672 wnd_sum -= first_bd_sz;
10675 /* Others are easier: run through the frag list and
10676 check all windows */
10677 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10679 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10681 if (unlikely(wnd_sum < lso_mss)) {
10686 skb_shinfo(skb)->frags[wnd_idx].size;
10689 /* in non-LSO too fragmented packet should always
10696 if (unlikely(to_copy))
10697 DP(NETIF_MSG_TX_QUEUED,
10698 "Linearization IS REQUIRED for %s packet. "
10699 "num_frags %d hlen %d first_bd_sz %d\n",
10700 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10701 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10707 /* called with netif_tx_lock
10708 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10709 * netif_wake_queue()
10711 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10713 struct bnx2x *bp = netdev_priv(dev);
10714 struct bnx2x_fastpath *fp, *fp_stat;
10715 struct netdev_queue *txq;
10716 struct sw_tx_bd *tx_buf;
10717 struct eth_tx_start_bd *tx_start_bd;
10718 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10719 struct eth_tx_parse_bd *pbd = NULL;
10720 u16 pkt_prod, bd_prod;
10722 dma_addr_t mapping;
10723 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10726 __le16 pkt_size = 0;
10728 #ifdef BNX2X_STOP_ON_ERROR
10729 if (unlikely(bp->panic))
10730 return NETDEV_TX_BUSY;
10733 fp_index = skb_get_queue_mapping(skb);
10734 txq = netdev_get_tx_queue(dev, fp_index);
10736 fp = &bp->fp[fp_index + bp->num_rx_queues];
10737 fp_stat = &bp->fp[fp_index];
10739 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10740 fp_stat->eth_q_stats.driver_xoff++;
10741 netif_tx_stop_queue(txq);
10742 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10743 return NETDEV_TX_BUSY;
10746 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10747 " gso type %x xmit_type %x\n",
10748 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10749 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10751 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10752 /* First, check if we need to linearize the skb (due to FW
10753 restrictions). No need to check fragmentation if page size > 8K
10754 (there will be no violation to FW restrictions) */
10755 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10756 /* Statistics of linearization */
10758 if (skb_linearize(skb) != 0) {
10759 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10760 "silently dropping this SKB\n");
10761 dev_kfree_skb_any(skb);
10762 return NETDEV_TX_OK;
10768 Please read carefully. First we use one BD which we mark as start,
10769 then we have a parsing info BD (used for TSO or xsum),
10770 and only then we have the rest of the TSO BDs.
10771 (don't forget to mark the last one as last,
10772 and to unmap only AFTER you write to the BD ...)
10773 And above all, all pdb sizes are in words - NOT DWORDS!
10776 pkt_prod = fp->tx_pkt_prod++;
10777 bd_prod = TX_BD(fp->tx_bd_prod);
10779 /* get a tx_buf and first BD */
10780 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10781 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10783 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10784 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10785 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10787 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10789 /* remember the first BD of the packet */
10790 tx_buf->first_bd = fp->tx_bd_prod;
10794 DP(NETIF_MSG_TX_QUEUED,
10795 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10796 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10799 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10800 (bp->flags & HW_VLAN_TX_FLAG)) {
10801 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10802 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10805 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10807 /* turn on parsing and get a BD */
10808 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10809 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
10811 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10813 if (xmit_type & XMIT_CSUM) {
10814 hlen = (skb_network_header(skb) - skb->data) / 2;
10816 /* for now NS flag is not used in Linux */
10818 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10819 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10821 pbd->ip_hlen = (skb_transport_header(skb) -
10822 skb_network_header(skb)) / 2;
10824 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10826 pbd->total_hlen = cpu_to_le16(hlen);
10829 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
10831 if (xmit_type & XMIT_CSUM_V4)
10832 tx_start_bd->bd_flags.as_bitfield |=
10833 ETH_TX_BD_FLAGS_IP_CSUM;
10835 tx_start_bd->bd_flags.as_bitfield |=
10836 ETH_TX_BD_FLAGS_IPV6;
10838 if (xmit_type & XMIT_CSUM_TCP) {
10839 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10842 s8 fix = SKB_CS_OFF(skb); /* signed! */
10844 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
10846 DP(NETIF_MSG_TX_QUEUED,
10847 "hlen %d fix %d csum before fix %x\n",
10848 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
10850 /* HW bug: fixup the CSUM */
10851 pbd->tcp_pseudo_csum =
10852 bnx2x_csum_fix(skb_transport_header(skb),
10855 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10856 pbd->tcp_pseudo_csum);
10860 mapping = pci_map_single(bp->pdev, skb->data,
10861 skb_headlen(skb), PCI_DMA_TODEVICE);
10863 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10864 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10865 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
10866 tx_start_bd->nbd = cpu_to_le16(nbd);
10867 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10868 pkt_size = tx_start_bd->nbytes;
10870 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10871 " nbytes %d flags %x vlan %x\n",
10872 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
10873 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
10874 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
10876 if (xmit_type & XMIT_GSO) {
10878 DP(NETIF_MSG_TX_QUEUED,
10879 "TSO packet len %d hlen %d total len %d tso size %d\n",
10880 skb->len, hlen, skb_headlen(skb),
10881 skb_shinfo(skb)->gso_size);
10883 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10885 if (unlikely(skb_headlen(skb) > hlen))
10886 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
10887 hlen, bd_prod, ++nbd);
10889 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10890 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10891 pbd->tcp_flags = pbd_tcp_flags(skb);
10893 if (xmit_type & XMIT_GSO_V4) {
10894 pbd->ip_id = swab16(ip_hdr(skb)->id);
10895 pbd->tcp_pseudo_csum =
10896 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10897 ip_hdr(skb)->daddr,
10898 0, IPPROTO_TCP, 0));
10901 pbd->tcp_pseudo_csum =
10902 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10903 &ipv6_hdr(skb)->daddr,
10904 0, IPPROTO_TCP, 0));
10906 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10908 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
10910 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10911 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10913 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10914 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10915 if (total_pkt_bd == NULL)
10916 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10918 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10919 frag->size, PCI_DMA_TODEVICE);
10921 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10922 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10923 tx_data_bd->nbytes = cpu_to_le16(frag->size);
10924 le16_add_cpu(&pkt_size, frag->size);
10926 DP(NETIF_MSG_TX_QUEUED,
10927 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
10928 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
10929 le16_to_cpu(tx_data_bd->nbytes));
10932 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
10934 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10936 /* now send a tx doorbell, counting the next BD
10937 * if the packet contains or ends with it
10939 if (TX_BD_POFF(bd_prod) < nbd)
10942 if (total_pkt_bd != NULL)
10943 total_pkt_bd->total_pkt_bytes = pkt_size;
10946 DP(NETIF_MSG_TX_QUEUED,
10947 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10948 " tcp_flags %x xsum %x seq %u hlen %u\n",
10949 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10950 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10951 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10953 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10956 * Make sure that the BD data is updated before updating the producer
10957 * since FW might read the BD right after the producer is updated.
10958 * This is only applicable for weak-ordered memory model archs such
10959 * as IA-64. The following barrier is also mandatory since FW will
10960 * assumes packets must have BDs.
10964 fp->tx_db.data.prod += nbd;
10966 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
10970 fp->tx_bd_prod += nbd;
10972 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10973 netif_tx_stop_queue(txq);
10974 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10975 if we put Tx into XOFF state. */
10977 fp_stat->eth_q_stats.driver_xoff++;
10978 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10979 netif_tx_wake_queue(txq);
10983 return NETDEV_TX_OK;
10986 /* called with rtnl_lock */
10987 static int bnx2x_open(struct net_device *dev)
10989 struct bnx2x *bp = netdev_priv(dev);
10991 netif_carrier_off(dev);
10993 bnx2x_set_power_state(bp, PCI_D0);
10995 return bnx2x_nic_load(bp, LOAD_OPEN);
10998 /* called with rtnl_lock */
10999 static int bnx2x_close(struct net_device *dev)
11001 struct bnx2x *bp = netdev_priv(dev);
11003 /* Unload the driver, release IRQs */
11004 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11005 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11006 if (!CHIP_REV_IS_SLOW(bp))
11007 bnx2x_set_power_state(bp, PCI_D3hot);
11012 /* called with netif_tx_lock from dev_mcast.c */
11013 static void bnx2x_set_rx_mode(struct net_device *dev)
11015 struct bnx2x *bp = netdev_priv(dev);
11016 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11017 int port = BP_PORT(bp);
11019 if (bp->state != BNX2X_STATE_OPEN) {
11020 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11024 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11026 if (dev->flags & IFF_PROMISC)
11027 rx_mode = BNX2X_RX_MODE_PROMISC;
11029 else if ((dev->flags & IFF_ALLMULTI) ||
11030 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11031 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11033 else { /* some multicasts */
11034 if (CHIP_IS_E1(bp)) {
11035 int i, old, offset;
11036 struct dev_mc_list *mclist;
11037 struct mac_configuration_cmd *config =
11038 bnx2x_sp(bp, mcast_config);
11040 for (i = 0, mclist = dev->mc_list;
11041 mclist && (i < dev->mc_count);
11042 i++, mclist = mclist->next) {
11044 config->config_table[i].
11045 cam_entry.msb_mac_addr =
11046 swab16(*(u16 *)&mclist->dmi_addr[0]);
11047 config->config_table[i].
11048 cam_entry.middle_mac_addr =
11049 swab16(*(u16 *)&mclist->dmi_addr[2]);
11050 config->config_table[i].
11051 cam_entry.lsb_mac_addr =
11052 swab16(*(u16 *)&mclist->dmi_addr[4]);
11053 config->config_table[i].cam_entry.flags =
11055 config->config_table[i].
11056 target_table_entry.flags = 0;
11057 config->config_table[i].target_table_entry.
11058 clients_bit_vector =
11059 cpu_to_le32(1 << BP_L_ID(bp));
11060 config->config_table[i].
11061 target_table_entry.vlan_id = 0;
11064 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11065 config->config_table[i].
11066 cam_entry.msb_mac_addr,
11067 config->config_table[i].
11068 cam_entry.middle_mac_addr,
11069 config->config_table[i].
11070 cam_entry.lsb_mac_addr);
11072 old = config->hdr.length;
11074 for (; i < old; i++) {
11075 if (CAM_IS_INVALID(config->
11076 config_table[i])) {
11077 /* already invalidated */
11081 CAM_INVALIDATE(config->
11086 if (CHIP_REV_IS_SLOW(bp))
11087 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11089 offset = BNX2X_MAX_MULTICAST*(1 + port);
11091 config->hdr.length = i;
11092 config->hdr.offset = offset;
11093 config->hdr.client_id = bp->fp->cl_id;
11094 config->hdr.reserved1 = 0;
11096 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11097 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11098 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11101 /* Accept one or more multicasts */
11102 struct dev_mc_list *mclist;
11103 u32 mc_filter[MC_HASH_SIZE];
11104 u32 crc, bit, regidx;
11107 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11109 for (i = 0, mclist = dev->mc_list;
11110 mclist && (i < dev->mc_count);
11111 i++, mclist = mclist->next) {
11113 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11116 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11117 bit = (crc >> 24) & 0xff;
11120 mc_filter[regidx] |= (1 << bit);
11123 for (i = 0; i < MC_HASH_SIZE; i++)
11124 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11129 bp->rx_mode = rx_mode;
11130 bnx2x_set_storm_rx_mode(bp);
11133 /* called with rtnl_lock */
11134 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11136 struct sockaddr *addr = p;
11137 struct bnx2x *bp = netdev_priv(dev);
11139 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11142 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11143 if (netif_running(dev)) {
11144 if (CHIP_IS_E1(bp))
11145 bnx2x_set_mac_addr_e1(bp, 1);
11147 bnx2x_set_mac_addr_e1h(bp, 1);
11153 /* called with rtnl_lock */
11154 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11156 struct mii_ioctl_data *data = if_mii(ifr);
11157 struct bnx2x *bp = netdev_priv(dev);
11158 int port = BP_PORT(bp);
11163 data->phy_id = bp->port.phy_addr;
11167 case SIOCGMIIREG: {
11170 if (!netif_running(dev))
11173 mutex_lock(&bp->port.phy_mutex);
11174 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
11175 DEFAULT_PHY_DEV_ADDR,
11176 (data->reg_num & 0x1f), &mii_regval);
11177 data->val_out = mii_regval;
11178 mutex_unlock(&bp->port.phy_mutex);
11183 if (!capable(CAP_NET_ADMIN))
11186 if (!netif_running(dev))
11189 mutex_lock(&bp->port.phy_mutex);
11190 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
11191 DEFAULT_PHY_DEV_ADDR,
11192 (data->reg_num & 0x1f), data->val_in);
11193 mutex_unlock(&bp->port.phy_mutex);
11201 return -EOPNOTSUPP;
11204 /* called with rtnl_lock */
11205 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11207 struct bnx2x *bp = netdev_priv(dev);
11210 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11211 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11214 /* This does not race with packet allocation
11215 * because the actual alloc size is
11216 * only updated as part of load
11218 dev->mtu = new_mtu;
11220 if (netif_running(dev)) {
11221 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11222 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11228 static void bnx2x_tx_timeout(struct net_device *dev)
11230 struct bnx2x *bp = netdev_priv(dev);
11232 #ifdef BNX2X_STOP_ON_ERROR
11236 /* This allows the netif to be shutdown gracefully before resetting */
11237 schedule_work(&bp->reset_task);
11241 /* called with rtnl_lock */
11242 static void bnx2x_vlan_rx_register(struct net_device *dev,
11243 struct vlan_group *vlgrp)
11245 struct bnx2x *bp = netdev_priv(dev);
11249 /* Set flags according to the required capabilities */
11250 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11252 if (dev->features & NETIF_F_HW_VLAN_TX)
11253 bp->flags |= HW_VLAN_TX_FLAG;
11255 if (dev->features & NETIF_F_HW_VLAN_RX)
11256 bp->flags |= HW_VLAN_RX_FLAG;
11258 if (netif_running(dev))
11259 bnx2x_set_client_config(bp);
11264 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11265 static void poll_bnx2x(struct net_device *dev)
11267 struct bnx2x *bp = netdev_priv(dev);
11269 disable_irq(bp->pdev->irq);
11270 bnx2x_interrupt(bp->pdev->irq, dev);
11271 enable_irq(bp->pdev->irq);
11275 static const struct net_device_ops bnx2x_netdev_ops = {
11276 .ndo_open = bnx2x_open,
11277 .ndo_stop = bnx2x_close,
11278 .ndo_start_xmit = bnx2x_start_xmit,
11279 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11280 .ndo_set_mac_address = bnx2x_change_mac_addr,
11281 .ndo_validate_addr = eth_validate_addr,
11282 .ndo_do_ioctl = bnx2x_ioctl,
11283 .ndo_change_mtu = bnx2x_change_mtu,
11284 .ndo_tx_timeout = bnx2x_tx_timeout,
11286 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11288 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11289 .ndo_poll_controller = poll_bnx2x,
11293 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11294 struct net_device *dev)
11299 SET_NETDEV_DEV(dev, &pdev->dev);
11300 bp = netdev_priv(dev);
11305 bp->func = PCI_FUNC(pdev->devfn);
11307 rc = pci_enable_device(pdev);
11309 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11313 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11314 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11317 goto err_out_disable;
11320 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11321 printk(KERN_ERR PFX "Cannot find second PCI device"
11322 " base address, aborting\n");
11324 goto err_out_disable;
11327 if (atomic_read(&pdev->enable_cnt) == 1) {
11328 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11330 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11332 goto err_out_disable;
11335 pci_set_master(pdev);
11336 pci_save_state(pdev);
11339 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11340 if (bp->pm_cap == 0) {
11341 printk(KERN_ERR PFX "Cannot find power management"
11342 " capability, aborting\n");
11344 goto err_out_release;
11347 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11348 if (bp->pcie_cap == 0) {
11349 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11352 goto err_out_release;
11355 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11356 bp->flags |= USING_DAC_FLAG;
11357 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11358 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11359 " failed, aborting\n");
11361 goto err_out_release;
11364 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11365 printk(KERN_ERR PFX "System does not support DMA,"
11368 goto err_out_release;
11371 dev->mem_start = pci_resource_start(pdev, 0);
11372 dev->base_addr = dev->mem_start;
11373 dev->mem_end = pci_resource_end(pdev, 0);
11375 dev->irq = pdev->irq;
11377 bp->regview = pci_ioremap_bar(pdev, 0);
11378 if (!bp->regview) {
11379 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11381 goto err_out_release;
11384 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11385 min_t(u64, BNX2X_DB_SIZE,
11386 pci_resource_len(pdev, 2)));
11387 if (!bp->doorbells) {
11388 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11390 goto err_out_unmap;
11393 bnx2x_set_power_state(bp, PCI_D0);
11395 /* clean indirect addresses */
11396 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11397 PCICFG_VENDOR_ID_OFFSET);
11398 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11399 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11400 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11401 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11403 dev->watchdog_timeo = TX_TIMEOUT;
11405 dev->netdev_ops = &bnx2x_netdev_ops;
11406 dev->ethtool_ops = &bnx2x_ethtool_ops;
11407 dev->features |= NETIF_F_SG;
11408 dev->features |= NETIF_F_HW_CSUM;
11409 if (bp->flags & USING_DAC_FLAG)
11410 dev->features |= NETIF_F_HIGHDMA;
11411 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11412 dev->features |= NETIF_F_TSO6;
11414 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11415 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11417 dev->vlan_features |= NETIF_F_SG;
11418 dev->vlan_features |= NETIF_F_HW_CSUM;
11419 if (bp->flags & USING_DAC_FLAG)
11420 dev->vlan_features |= NETIF_F_HIGHDMA;
11421 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11422 dev->vlan_features |= NETIF_F_TSO6;
11429 iounmap(bp->regview);
11430 bp->regview = NULL;
11432 if (bp->doorbells) {
11433 iounmap(bp->doorbells);
11434 bp->doorbells = NULL;
11438 if (atomic_read(&pdev->enable_cnt) == 1)
11439 pci_release_regions(pdev);
11442 pci_disable_device(pdev);
11443 pci_set_drvdata(pdev, NULL);
11449 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11451 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11453 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11457 /* return value of 1=2.5GHz 2=5GHz */
11458 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11460 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11462 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11465 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11467 struct bnx2x_fw_file_hdr *fw_hdr;
11468 struct bnx2x_fw_file_section *sections;
11470 u32 offset, len, num_ops;
11472 const struct firmware *firmware = bp->firmware;
11475 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11478 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11479 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11481 /* Make sure none of the offsets and sizes make us read beyond
11482 * the end of the firmware data */
11483 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11484 offset = be32_to_cpu(sections[i].offset);
11485 len = be32_to_cpu(sections[i].len);
11486 if (offset + len > firmware->size) {
11487 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11492 /* Likewise for the init_ops offsets */
11493 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11494 ops_offsets = (u16 *)(firmware->data + offset);
11495 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11497 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11498 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11499 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11504 /* Check FW version */
11505 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11506 fw_ver = firmware->data + offset;
11507 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11508 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11509 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11510 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11511 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11512 " Should be %d.%d.%d.%d\n",
11513 fw_ver[0], fw_ver[1], fw_ver[2],
11514 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11515 BCM_5710_FW_MINOR_VERSION,
11516 BCM_5710_FW_REVISION_VERSION,
11517 BCM_5710_FW_ENGINEERING_VERSION);
11524 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11527 const __be32 *source = (const __be32*)_source;
11528 u32 *target = (u32*)_target;
11530 for (i = 0; i < n/4; i++)
11531 target[i] = be32_to_cpu(source[i]);
11535 Ops array is stored in the following format:
11536 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11538 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11541 const __be32 *source = (const __be32*)_source;
11542 struct raw_op *target = (struct raw_op*)_target;
11544 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11545 tmp = be32_to_cpu(source[j]);
11546 target[i].op = (tmp >> 24) & 0xff;
11547 target[i].offset = tmp & 0xffffff;
11548 target[i].raw_data = be32_to_cpu(source[j+1]);
11551 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11554 u16 *target = (u16*)_target;
11555 const __be16 *source = (const __be16*)_source;
11557 for (i = 0; i < n/2; i++)
11558 target[i] = be16_to_cpu(source[i]);
11561 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11563 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11564 bp->arr = kmalloc(len, GFP_KERNEL); \
11566 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11569 func(bp->firmware->data + \
11570 be32_to_cpu(fw_hdr->arr.offset), \
11571 (u8*)bp->arr, len); \
11575 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11577 char fw_file_name[40] = {0};
11579 struct bnx2x_fw_file_hdr *fw_hdr;
11581 /* Create a FW file name */
11582 if (CHIP_IS_E1(bp))
11583 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11585 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11587 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11588 BCM_5710_FW_MAJOR_VERSION,
11589 BCM_5710_FW_MINOR_VERSION,
11590 BCM_5710_FW_REVISION_VERSION,
11591 BCM_5710_FW_ENGINEERING_VERSION);
11593 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11595 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11597 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11598 goto request_firmware_exit;
11601 rc = bnx2x_check_firmware(bp);
11603 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11604 goto request_firmware_exit;
11607 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11609 /* Initialize the pointers to the init arrays */
11611 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11614 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11617 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11619 /* STORMs firmware */
11620 bp->tsem_int_table_data = bp->firmware->data +
11621 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11622 bp->tsem_pram_data = bp->firmware->data +
11623 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11624 bp->usem_int_table_data = bp->firmware->data +
11625 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11626 bp->usem_pram_data = bp->firmware->data +
11627 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11628 bp->xsem_int_table_data = bp->firmware->data +
11629 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11630 bp->xsem_pram_data = bp->firmware->data +
11631 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11632 bp->csem_int_table_data = bp->firmware->data +
11633 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11634 bp->csem_pram_data = bp->firmware->data +
11635 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11638 init_offsets_alloc_err:
11639 kfree(bp->init_ops);
11640 init_ops_alloc_err:
11641 kfree(bp->init_data);
11642 request_firmware_exit:
11643 release_firmware(bp->firmware);
11650 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11651 const struct pci_device_id *ent)
11653 static int version_printed;
11654 struct net_device *dev = NULL;
11658 if (version_printed++ == 0)
11659 printk(KERN_INFO "%s", version);
11661 /* dev zeroed in init_etherdev */
11662 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11664 printk(KERN_ERR PFX "Cannot allocate net device\n");
11668 bp = netdev_priv(dev);
11669 bp->msglevel = debug;
11671 rc = bnx2x_init_dev(pdev, dev);
11677 pci_set_drvdata(pdev, dev);
11679 rc = bnx2x_init_bp(bp);
11681 goto init_one_exit;
11683 /* Set init arrays */
11684 rc = bnx2x_init_firmware(bp, &pdev->dev);
11686 printk(KERN_ERR PFX "Error loading firmware\n");
11687 goto init_one_exit;
11690 rc = register_netdev(dev);
11692 dev_err(&pdev->dev, "Cannot register net device\n");
11693 goto init_one_exit;
11696 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11697 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11698 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11699 bnx2x_get_pcie_width(bp),
11700 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11701 dev->base_addr, bp->pdev->irq);
11702 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11708 iounmap(bp->regview);
11711 iounmap(bp->doorbells);
11715 if (atomic_read(&pdev->enable_cnt) == 1)
11716 pci_release_regions(pdev);
11718 pci_disable_device(pdev);
11719 pci_set_drvdata(pdev, NULL);
11724 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11726 struct net_device *dev = pci_get_drvdata(pdev);
11730 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11733 bp = netdev_priv(dev);
11735 unregister_netdev(dev);
11737 kfree(bp->init_ops_offsets);
11738 kfree(bp->init_ops);
11739 kfree(bp->init_data);
11740 release_firmware(bp->firmware);
11743 iounmap(bp->regview);
11746 iounmap(bp->doorbells);
11750 if (atomic_read(&pdev->enable_cnt) == 1)
11751 pci_release_regions(pdev);
11753 pci_disable_device(pdev);
11754 pci_set_drvdata(pdev, NULL);
11757 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11759 struct net_device *dev = pci_get_drvdata(pdev);
11763 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11766 bp = netdev_priv(dev);
11770 pci_save_state(pdev);
11772 if (!netif_running(dev)) {
11777 netif_device_detach(dev);
11779 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11781 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11788 static int bnx2x_resume(struct pci_dev *pdev)
11790 struct net_device *dev = pci_get_drvdata(pdev);
11795 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11798 bp = netdev_priv(dev);
11802 pci_restore_state(pdev);
11804 if (!netif_running(dev)) {
11809 bnx2x_set_power_state(bp, PCI_D0);
11810 netif_device_attach(dev);
11812 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11819 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11823 bp->state = BNX2X_STATE_ERROR;
11825 bp->rx_mode = BNX2X_RX_MODE_NONE;
11827 bnx2x_netif_stop(bp, 0);
11829 del_timer_sync(&bp->timer);
11830 bp->stats_state = STATS_STATE_DISABLED;
11831 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11834 bnx2x_free_irq(bp);
11836 if (CHIP_IS_E1(bp)) {
11837 struct mac_configuration_cmd *config =
11838 bnx2x_sp(bp, mcast_config);
11840 for (i = 0; i < config->hdr.length; i++)
11841 CAM_INVALIDATE(config->config_table[i]);
11844 /* Free SKBs, SGEs, TPA pool and driver internals */
11845 bnx2x_free_skbs(bp);
11846 for_each_rx_queue(bp, i)
11847 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11848 for_each_rx_queue(bp, i)
11849 netif_napi_del(&bnx2x_fp(bp, i, napi));
11850 bnx2x_free_mem(bp);
11852 bp->state = BNX2X_STATE_CLOSED;
11854 netif_carrier_off(bp->dev);
11859 static void bnx2x_eeh_recover(struct bnx2x *bp)
11863 mutex_init(&bp->port.phy_mutex);
11865 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11866 bp->link_params.shmem_base = bp->common.shmem_base;
11867 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11869 if (!bp->common.shmem_base ||
11870 (bp->common.shmem_base < 0xA0000) ||
11871 (bp->common.shmem_base >= 0xC0000)) {
11872 BNX2X_DEV_INFO("MCP not active\n");
11873 bp->flags |= NO_MCP_FLAG;
11877 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11878 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11879 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11880 BNX2X_ERR("BAD MCP validity signature\n");
11882 if (!BP_NOMCP(bp)) {
11883 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11884 & DRV_MSG_SEQ_NUMBER_MASK);
11885 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11890 * bnx2x_io_error_detected - called when PCI error is detected
11891 * @pdev: Pointer to PCI device
11892 * @state: The current pci connection state
11894 * This function is called after a PCI bus error affecting
11895 * this device has been detected.
11897 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11898 pci_channel_state_t state)
11900 struct net_device *dev = pci_get_drvdata(pdev);
11901 struct bnx2x *bp = netdev_priv(dev);
11905 netif_device_detach(dev);
11907 if (state == pci_channel_io_perm_failure) {
11909 return PCI_ERS_RESULT_DISCONNECT;
11912 if (netif_running(dev))
11913 bnx2x_eeh_nic_unload(bp);
11915 pci_disable_device(pdev);
11919 /* Request a slot reset */
11920 return PCI_ERS_RESULT_NEED_RESET;
11924 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11925 * @pdev: Pointer to PCI device
11927 * Restart the card from scratch, as if from a cold-boot.
11929 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11931 struct net_device *dev = pci_get_drvdata(pdev);
11932 struct bnx2x *bp = netdev_priv(dev);
11936 if (pci_enable_device(pdev)) {
11937 dev_err(&pdev->dev,
11938 "Cannot re-enable PCI device after reset\n");
11940 return PCI_ERS_RESULT_DISCONNECT;
11943 pci_set_master(pdev);
11944 pci_restore_state(pdev);
11946 if (netif_running(dev))
11947 bnx2x_set_power_state(bp, PCI_D0);
11951 return PCI_ERS_RESULT_RECOVERED;
11955 * bnx2x_io_resume - called when traffic can start flowing again
11956 * @pdev: Pointer to PCI device
11958 * This callback is called when the error recovery driver tells us that
11959 * its OK to resume normal operation.
11961 static void bnx2x_io_resume(struct pci_dev *pdev)
11963 struct net_device *dev = pci_get_drvdata(pdev);
11964 struct bnx2x *bp = netdev_priv(dev);
11968 bnx2x_eeh_recover(bp);
11970 if (netif_running(dev))
11971 bnx2x_nic_load(bp, LOAD_NORMAL);
11973 netif_device_attach(dev);
11978 static struct pci_error_handlers bnx2x_err_handler = {
11979 .error_detected = bnx2x_io_error_detected,
11980 .slot_reset = bnx2x_io_slot_reset,
11981 .resume = bnx2x_io_resume,
11984 static struct pci_driver bnx2x_pci_driver = {
11985 .name = DRV_MODULE_NAME,
11986 .id_table = bnx2x_pci_tbl,
11987 .probe = bnx2x_init_one,
11988 .remove = __devexit_p(bnx2x_remove_one),
11989 .suspend = bnx2x_suspend,
11990 .resume = bnx2x_resume,
11991 .err_handler = &bnx2x_err_handler,
11994 static int __init bnx2x_init(void)
11998 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11999 if (bnx2x_wq == NULL) {
12000 printk(KERN_ERR PFX "Cannot create workqueue\n");
12004 ret = pci_register_driver(&bnx2x_pci_driver);
12006 printk(KERN_ERR PFX "Cannot register driver\n");
12007 destroy_workqueue(bnx2x_wq);
12012 static void __exit bnx2x_cleanup(void)
12014 pci_unregister_driver(&bnx2x_pci_driver);
12016 destroy_workqueue(bnx2x_wq);
12019 module_init(bnx2x_init);
12020 module_exit(bnx2x_cleanup);