1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.26"
61 #define DRV_MODULE_RELDATE "2009/01/26"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int multi_mode = 1;
77 module_param(multi_mode, int, 0);
79 static int disable_tpa;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 module_param(disable_tpa, int, 0);
87 module_param(int_mode, int, 0);
88 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
90 module_param(poll, int, 0);
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
93 MODULE_PARM_DESC(poll, "use polling (for debug)");
94 MODULE_PARM_DESC(debug, "default debug msglevel");
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
104 /* indexed by board_type, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 DMAE_CMD_ENDIANITY_DW_SWAP |
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 DMAE_CMD_ENDIANITY_DW_SWAP |
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306 while (*wb_comp != DMAE_COMP_VAL) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
347 static int bnx2x_mc_assert(struct bnx2x *bp)
351 u32 row0, row1, row2, row3;
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
468 static void bnx2x_fw_dump(struct bnx2x *bp)
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
614 REG_WR(bp, addr, val);
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
622 REG_WR(bp, addr, val);
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
629 /* enable nig attention */
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
639 static void bnx2x_int_disable(struct bnx2x *bp)
641 int port = BP_PORT(bp);
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
653 /* flush all outstanding writes */
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
661 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
666 /* disable interrupt handling */
667 atomic_inc(&bp->intr_sem);
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
672 /* make sure all ISRs are done */
674 synchronize_irq(bp->msix_table[0].vector);
676 for_each_queue(bp, i)
677 synchronize_irq(bp->msix_table[i + offset].vector);
679 synchronize_irq(bp->pdev->irq);
681 /* make sure sp_task is not running */
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
689 * General service functions
692 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
693 u8 storm, u16 index, u8 op, u8 update)
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
697 struct igu_ack_register igu_ack;
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
711 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
713 struct host_status_block *fpsb = fp->status_blk;
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
728 static u16 bnx2x_ack_int(struct bnx2x *bp)
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
742 * fast path service functions
745 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
749 /* Tell compiler that status block fields can change */
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
752 return (fp->tx_pkt_cons != tx_cons_sb);
755 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
757 /* Tell compiler that consumer and producer can change */
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
763 /* free skb in the packet ring at pos idx
764 * return idx of last bd freed
766 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
785 new_cons = nbd + tx_buf->first_bd;
786 #ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
788 BNX2X_ERR("BAD nbd!\n");
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
825 tx_buf->first_bd = 0;
831 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
837 barrier(); /* Tell compiler that prod and cons can change */
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
845 #ifdef BNX2X_STOP_ON_ERROR
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
851 return (s16)(fp->bp->tx_ring_size) - used;
854 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
856 struct bnx2x *bp = fp->bp;
857 struct netdev_queue *txq;
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
861 #ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
866 txq = netdev_get_tx_queue(bp->dev, fp->index);
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
870 while (sw_cons != hw_cons) {
873 pkt_cons = TX_BD(sw_cons);
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
878 hw_cons, sw_cons, pkt_cons);
880 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
903 /* TBD need a thresh? */
904 if (unlikely(netif_tx_queue_stopped(txq))) {
906 __netif_tx_lock(txq, smp_processor_id());
908 if ((netif_tx_queue_stopped(txq)) &&
909 (bp->state == BNX2X_STATE_OPEN) &&
910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
911 netif_tx_wake_queue(txq);
913 __netif_tx_unlock(txq);
918 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
938 fp->state = BNX2X_FP_STATE_OPEN;
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
944 fp->state = BNX2X_FP_STATE_HALTED;
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
952 mb(); /* force bnx2x_wait_ramrod() to see the change */
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
977 bp->set_mac_pending = 0;
980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
989 mb(); /* force bnx2x_wait_ramrod() to see the change */
992 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
999 /* Skip "next page" elements */
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 sw_buf->page = NULL;
1012 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1021 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1029 if (unlikely(page == NULL))
1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1033 PCI_DMA_FROMDEVICE);
1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1048 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1061 PCI_DMA_FROMDEVICE);
1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1076 /* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1081 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 bp->rx_offset + RX_COPY_THRESH,
1093 PCI_DMA_FROMDEVICE);
1095 prod_rx_buf->skb = cons_rx_buf->skb;
1096 pci_unmap_addr_set(prod_rx_buf, mapping,
1097 pci_unmap_addr(cons_rx_buf, mapping));
1098 *prod_bd = *cons_bd;
1101 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1104 u16 last_max = fp->last_max_sge;
1106 if (SUB_S16(idx, last_max) > 0)
1107 fp->last_max_sge = idx;
1110 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1114 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1115 int idx = RX_SGE_CNT * i - 1;
1117 for (j = 0; j < 2; j++) {
1118 SGE_MASK_CLEAR_BIT(fp, idx);
1124 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1125 struct eth_fast_path_rx_cqe *fp_cqe)
1127 struct bnx2x *bp = fp->bp;
1128 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1129 le16_to_cpu(fp_cqe->len_on_bd)) >>
1131 u16 last_max, last_elem, first_elem;
1138 /* First mark all used pages */
1139 for (i = 0; i < sge_len; i++)
1140 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1142 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1143 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1145 /* Here we assume that the last SGE index is the biggest */
1146 prefetch((void *)(fp->sge_mask));
1147 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1149 last_max = RX_SGE(fp->last_max_sge);
1150 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1151 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1153 /* If ring is not full */
1154 if (last_elem + 1 != first_elem)
1157 /* Now update the prod */
1158 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1159 if (likely(fp->sge_mask[i]))
1162 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1163 delta += RX_SGE_MASK_ELEM_SZ;
1167 fp->rx_sge_prod += delta;
1168 /* clear page-end entries */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1172 DP(NETIF_MSG_RX_STATUS,
1173 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1174 fp->last_max_sge, fp->rx_sge_prod);
1177 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1179 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1180 memset(fp->sge_mask, 0xff,
1181 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1183 /* Clear the two last indices in the page to 1:
1184 these are the indices that correspond to the "next" element,
1185 hence will never be indicated and should be removed from
1186 the calculations. */
1187 bnx2x_clear_sge_mask_next_elems(fp);
1190 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1191 struct sk_buff *skb, u16 cons, u16 prod)
1193 struct bnx2x *bp = fp->bp;
1194 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1195 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1196 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1199 /* move empty skb from pool to prod and map it */
1200 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1201 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1202 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1203 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1205 /* move partial skb from cons to pool (don't unmap yet) */
1206 fp->tpa_pool[queue] = *cons_rx_buf;
1208 /* mark bin state as start - print error if current state != stop */
1209 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1210 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1212 fp->tpa_state[queue] = BNX2X_TPA_START;
1214 /* point prod_bd to new skb */
1215 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1216 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1218 #ifdef BNX2X_STOP_ON_ERROR
1219 fp->tpa_queue_used |= (1 << queue);
1220 #ifdef __powerpc64__
1221 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1223 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1225 fp->tpa_queue_used);
1229 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1230 struct sk_buff *skb,
1231 struct eth_fast_path_rx_cqe *fp_cqe,
1234 struct sw_rx_page *rx_pg, old_rx_pg;
1235 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1236 u32 i, frag_len, frag_size, pages;
1240 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1241 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1243 /* This is needed in order to enable forwarding support */
1245 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1246 max(frag_size, (u32)len_on_bd));
1248 #ifdef BNX2X_STOP_ON_ERROR
1250 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1251 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1253 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1254 fp_cqe->pkt_len, len_on_bd);
1260 /* Run through the SGL and compose the fragmented skb */
1261 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1262 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1264 /* FW gives the indices of the SGE as if the ring is an array
1265 (meaning that "next" element will consume 2 indices) */
1266 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1267 rx_pg = &fp->rx_page_ring[sge_idx];
1270 /* If we fail to allocate a substitute page, we simply stop
1271 where we are and drop the whole packet */
1272 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1273 if (unlikely(err)) {
1274 bp->eth_stats.rx_skb_alloc_failed++;
1278 /* Unmap the page as we r going to pass it to the stack */
1279 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1280 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1282 /* Add one frag and update the appropriate fields in the skb */
1283 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1285 skb->data_len += frag_len;
1286 skb->truesize += frag_len;
1287 skb->len += frag_len;
1289 frag_size -= frag_len;
1295 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1299 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1300 struct sk_buff *skb = rx_buf->skb;
1302 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1304 /* Unmap skb in the pool anyway, as we are going to change
1305 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1307 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1308 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1310 if (likely(new_skb)) {
1311 /* fix ip xsum and give it to the stack */
1312 /* (no need to map the new skb) */
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN);
1317 int is_not_hwaccel_vlan_cqe =
1318 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1322 prefetch(((char *)(skb)) + 128);
1324 #ifdef BNX2X_STOP_ON_ERROR
1325 if (pad + len > bp->rx_buf_size) {
1326 BNX2X_ERR("skb_put is about to fail... "
1327 "pad %d len %d rx_buf_size %d\n",
1328 pad, len, bp->rx_buf_size);
1334 skb_reserve(skb, pad);
1337 skb->protocol = eth_type_trans(skb, bp->dev);
1338 skb->ip_summed = CHECKSUM_UNNECESSARY;
1339 skb_record_rx_queue(skb, queue);
1344 iph = (struct iphdr *)skb->data;
1346 /* If there is no Rx VLAN offloading -
1347 take VLAN tag into an account */
1348 if (unlikely(is_not_hwaccel_vlan_cqe))
1349 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1352 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1355 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1356 &cqe->fast_path_cqe, cqe_idx)) {
1358 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1359 (!is_not_hwaccel_vlan_cqe))
1360 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1361 le16_to_cpu(cqe->fast_path_cqe.
1365 netif_receive_skb(skb);
1367 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1368 " - dropping packet!\n");
1373 /* put new skb in bin */
1374 fp->tpa_pool[queue].skb = new_skb;
1377 /* else drop the packet and keep the buffer in the bin */
1378 DP(NETIF_MSG_RX_STATUS,
1379 "Failed to allocate new skb - dropping packet!\n");
1380 bp->eth_stats.rx_skb_alloc_failed++;
1383 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1386 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1387 struct bnx2x_fastpath *fp,
1388 u16 bd_prod, u16 rx_comp_prod,
1391 struct ustorm_eth_rx_producers rx_prods = {0};
1394 /* Update producers */
1395 rx_prods.bd_prod = bd_prod;
1396 rx_prods.cqe_prod = rx_comp_prod;
1397 rx_prods.sge_prod = rx_sge_prod;
1400 * Make sure that the BD and SGE data is updated before updating the
1401 * producers since FW might read the BD/SGE right after the producer
1403 * This is only applicable for weak-ordered memory model archs such
1404 * as IA-64. The following barrier is also mandatory since FW will
1405 * assumes BDs must have buffers.
1409 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1410 REG_WR(bp, BAR_USTRORM_INTMEM +
1411 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1412 ((u32 *)&rx_prods)[i]);
1414 mmiowb(); /* keep prod updates ordered */
1416 DP(NETIF_MSG_RX_STATUS,
1417 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1418 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1421 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1423 struct bnx2x *bp = fp->bp;
1424 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1425 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1428 #ifdef BNX2X_STOP_ON_ERROR
1429 if (unlikely(bp->panic))
1433 /* CQ "next element" is of the size of the regular element,
1434 that's why it's ok here */
1435 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1436 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1439 bd_cons = fp->rx_bd_cons;
1440 bd_prod = fp->rx_bd_prod;
1441 bd_prod_fw = bd_prod;
1442 sw_comp_cons = fp->rx_comp_cons;
1443 sw_comp_prod = fp->rx_comp_prod;
1445 /* Memory barrier necessary as speculative reads of the rx
1446 * buffer can be ahead of the index in the status block
1450 DP(NETIF_MSG_RX_STATUS,
1451 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1452 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1454 while (sw_comp_cons != hw_comp_cons) {
1455 struct sw_rx_bd *rx_buf = NULL;
1456 struct sk_buff *skb;
1457 union eth_rx_cqe *cqe;
1461 comp_ring_cons = RCQ_BD(sw_comp_cons);
1462 bd_prod = RX_BD(bd_prod);
1463 bd_cons = RX_BD(bd_cons);
1465 cqe = &fp->rx_comp_ring[comp_ring_cons];
1466 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1468 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1469 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1470 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1471 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1472 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1473 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1475 /* is this a slowpath msg? */
1476 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1477 bnx2x_sp_event(fp, cqe);
1480 /* this is an rx packet */
1482 rx_buf = &fp->rx_buf_ring[bd_cons];
1484 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1485 pad = cqe->fast_path_cqe.placement_offset;
1487 /* If CQE is marked both TPA_START and TPA_END
1488 it is a non-TPA CQE */
1489 if ((!fp->disable_tpa) &&
1490 (TPA_TYPE(cqe_fp_flags) !=
1491 (TPA_TYPE_START | TPA_TYPE_END))) {
1492 u16 queue = cqe->fast_path_cqe.queue_index;
1494 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1495 DP(NETIF_MSG_RX_STATUS,
1496 "calling tpa_start on queue %d\n",
1499 bnx2x_tpa_start(fp, queue, skb,
1504 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1505 DP(NETIF_MSG_RX_STATUS,
1506 "calling tpa_stop on queue %d\n",
1509 if (!BNX2X_RX_SUM_FIX(cqe))
1510 BNX2X_ERR("STOP on none TCP "
1513 /* This is a size of the linear data
1515 len = le16_to_cpu(cqe->fast_path_cqe.
1517 bnx2x_tpa_stop(bp, fp, queue, pad,
1518 len, cqe, comp_ring_cons);
1519 #ifdef BNX2X_STOP_ON_ERROR
1524 bnx2x_update_sge_prod(fp,
1525 &cqe->fast_path_cqe);
1530 pci_dma_sync_single_for_device(bp->pdev,
1531 pci_unmap_addr(rx_buf, mapping),
1532 pad + RX_COPY_THRESH,
1533 PCI_DMA_FROMDEVICE);
1535 prefetch(((char *)(skb)) + 128);
1537 /* is this an error packet? */
1538 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1539 DP(NETIF_MSG_RX_ERR,
1540 "ERROR flags %x rx packet %u\n",
1541 cqe_fp_flags, sw_comp_cons);
1542 bp->eth_stats.rx_err_discard_pkt++;
1546 /* Since we don't have a jumbo ring
1547 * copy small packets if mtu > 1500
1549 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1550 (len <= RX_COPY_THRESH)) {
1551 struct sk_buff *new_skb;
1553 new_skb = netdev_alloc_skb(bp->dev,
1555 if (new_skb == NULL) {
1556 DP(NETIF_MSG_RX_ERR,
1557 "ERROR packet dropped "
1558 "because of alloc failure\n");
1559 bp->eth_stats.rx_skb_alloc_failed++;
1564 skb_copy_from_linear_data_offset(skb, pad,
1565 new_skb->data + pad, len);
1566 skb_reserve(new_skb, pad);
1567 skb_put(new_skb, len);
1569 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1573 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1574 pci_unmap_single(bp->pdev,
1575 pci_unmap_addr(rx_buf, mapping),
1577 PCI_DMA_FROMDEVICE);
1578 skb_reserve(skb, pad);
1582 DP(NETIF_MSG_RX_ERR,
1583 "ERROR packet dropped because "
1584 "of alloc failure\n");
1585 bp->eth_stats.rx_skb_alloc_failed++;
1587 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591 skb->protocol = eth_type_trans(skb, bp->dev);
1593 skb->ip_summed = CHECKSUM_NONE;
1595 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1596 skb->ip_summed = CHECKSUM_UNNECESSARY;
1598 bp->eth_stats.hw_csum_err++;
1603 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1604 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1605 PARSING_FLAGS_VLAN))
1606 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1610 netif_receive_skb(skb);
1616 bd_cons = NEXT_RX_IDX(bd_cons);
1617 bd_prod = NEXT_RX_IDX(bd_prod);
1618 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1621 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1622 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1624 if (rx_pkt == budget)
1628 fp->rx_bd_cons = bd_cons;
1629 fp->rx_bd_prod = bd_prod_fw;
1630 fp->rx_comp_cons = sw_comp_cons;
1631 fp->rx_comp_prod = sw_comp_prod;
1633 /* Update producers */
1634 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1637 fp->rx_pkt += rx_pkt;
1643 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1645 struct bnx2x_fastpath *fp = fp_cookie;
1646 struct bnx2x *bp = fp->bp;
1647 int index = FP_IDX(fp);
1649 /* Return here if interrupt is disabled */
1650 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1651 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1655 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1656 index, FP_SB_ID(fp));
1657 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1659 #ifdef BNX2X_STOP_ON_ERROR
1660 if (unlikely(bp->panic))
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1669 napi_schedule(&bnx2x_fp(bp, index, napi));
1674 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1676 struct bnx2x *bp = netdev_priv(dev_instance);
1677 u16 status = bnx2x_ack_int(bp);
1680 /* Return here if interrupt is shared and it's not for us */
1681 if (unlikely(status == 0)) {
1682 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1685 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1687 /* Return here if interrupt is disabled */
1688 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1689 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1693 #ifdef BNX2X_STOP_ON_ERROR
1694 if (unlikely(bp->panic))
1698 mask = 0x2 << bp->fp[0].sb_id;
1699 if (status & mask) {
1700 struct bnx2x_fastpath *fp = &bp->fp[0];
1702 prefetch(fp->rx_cons_sb);
1703 prefetch(fp->tx_cons_sb);
1704 prefetch(&fp->status_blk->c_status_block.status_block_index);
1705 prefetch(&fp->status_blk->u_status_block.status_block_index);
1707 napi_schedule(&bnx2x_fp(bp, 0, napi));
1713 if (unlikely(status & 0x1)) {
1714 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1722 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1728 /* end of fast path */
1730 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1735 * General service functions
1738 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1741 u32 resource_bit = (1 << resource);
1742 int func = BP_FUNC(bp);
1743 u32 hw_lock_control_reg;
1746 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1749 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1755 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1757 hw_lock_control_reg =
1758 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1761 /* Validating that the resource is not already taken */
1762 lock_status = REG_RD(bp, hw_lock_control_reg);
1763 if (lock_status & resource_bit) {
1764 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1765 lock_status, resource_bit);
1769 /* Try for 5 second every 5ms */
1770 for (cnt = 0; cnt < 1000; cnt++) {
1771 /* Try to acquire the lock */
1772 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1773 lock_status = REG_RD(bp, hw_lock_control_reg);
1774 if (lock_status & resource_bit)
1779 DP(NETIF_MSG_HW, "Timeout\n");
1783 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1786 u32 resource_bit = (1 << resource);
1787 int func = BP_FUNC(bp);
1788 u32 hw_lock_control_reg;
1790 /* Validating that the resource is within range */
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1793 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1799 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1801 hw_lock_control_reg =
1802 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1805 /* Validating that the resource is currently taken */
1806 lock_status = REG_RD(bp, hw_lock_control_reg);
1807 if (!(lock_status & resource_bit)) {
1808 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1809 lock_status, resource_bit);
1813 REG_WR(bp, hw_lock_control_reg, resource_bit);
1817 /* HW Lock for shared dual port PHYs */
1818 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1822 mutex_lock(&bp->port.phy_mutex);
1824 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1825 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1829 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1831 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1833 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1834 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1837 mutex_unlock(&bp->port.phy_mutex);
1840 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1842 /* The GPIO should be swapped if swap register is set and active */
1843 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1844 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1845 int gpio_shift = gpio_num +
1846 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1847 u32 gpio_mask = (1 << gpio_shift);
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1856 /* read GPIO and mask except the float bits */
1857 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1860 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1861 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1862 gpio_num, gpio_shift);
1863 /* clear FLOAT and set CLR */
1864 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1865 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1868 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1869 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1870 gpio_num, gpio_shift);
1871 /* clear FLOAT and set SET */
1872 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1873 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1876 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1877 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1878 gpio_num, gpio_shift);
1880 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1887 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1893 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1895 u32 spio_mask = (1 << spio_num);
1898 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1899 (spio_num > MISC_REGISTERS_SPIO_7)) {
1900 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1904 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1905 /* read SPIO and mask except the float bits */
1906 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1909 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1910 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1911 /* clear FLOAT and set CLR */
1912 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1913 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1916 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1917 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1918 /* clear FLOAT and set SET */
1919 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1920 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1923 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1924 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1926 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1933 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1939 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1941 switch (bp->link_vars.ieee_fc &
1942 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1943 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1947 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1948 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1951 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1952 bp->port.advertising |= ADVERTISED_Asym_Pause;
1955 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1961 static void bnx2x_link_report(struct bnx2x *bp)
1963 if (bp->link_vars.link_up) {
1964 if (bp->state == BNX2X_STATE_OPEN)
1965 netif_carrier_on(bp->dev);
1966 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1968 printk("%d Mbps ", bp->link_vars.line_speed);
1970 if (bp->link_vars.duplex == DUPLEX_FULL)
1971 printk("full duplex");
1973 printk("half duplex");
1975 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1976 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1977 printk(", receive ");
1978 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1979 printk("& transmit ");
1981 printk(", transmit ");
1983 printk("flow control ON");
1987 } else { /* link_down */
1988 netif_carrier_off(bp->dev);
1989 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1993 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1995 if (!BP_NOMCP(bp)) {
1998 /* Initialize link parameters structure variables */
1999 /* It is recommended to turn off RX FC for jumbo frames
2000 for better performance */
2002 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2003 else if (bp->dev->mtu > 5000)
2004 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2006 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2008 bnx2x_acquire_phy_lock(bp);
2009 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2010 bnx2x_release_phy_lock(bp);
2012 bnx2x_calc_fc_adv(bp);
2014 if (bp->link_vars.link_up)
2015 bnx2x_link_report(bp);
2020 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2024 static void bnx2x_link_set(struct bnx2x *bp)
2026 if (!BP_NOMCP(bp)) {
2027 bnx2x_acquire_phy_lock(bp);
2028 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2029 bnx2x_release_phy_lock(bp);
2031 bnx2x_calc_fc_adv(bp);
2033 BNX2X_ERR("Bootcode is missing -not setting link\n");
2036 static void bnx2x__link_reset(struct bnx2x *bp)
2038 if (!BP_NOMCP(bp)) {
2039 bnx2x_acquire_phy_lock(bp);
2040 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2041 bnx2x_release_phy_lock(bp);
2043 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2046 static u8 bnx2x_link_test(struct bnx2x *bp)
2050 bnx2x_acquire_phy_lock(bp);
2051 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2052 bnx2x_release_phy_lock(bp);
2057 /* Calculates the sum of vn_min_rates.
2058 It's needed for further normalizing of the min_rates.
2063 0 - if all the min_rates are 0.
2064 In the later case fairness algorithm should be deactivated.
2065 If not all min_rates are zero then those that are zeroes will
2068 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2070 int i, port = BP_PORT(bp);
2074 for (i = 0; i < E1HVN_MAX; i++) {
2076 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2077 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2078 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2079 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2080 /* If min rate is zero - set it to 1 */
2082 vn_min_rate = DEF_MIN_RATE;
2086 wsum += vn_min_rate;
2090 /* ... only if all min rates are zeros - disable FAIRNESS */
2097 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2100 struct cmng_struct_per_port *m_cmng_port)
2102 u32 r_param = port_rate / 8;
2103 int port = BP_PORT(bp);
2106 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2108 /* Enable minmax only if we are in e1hmf mode */
2110 u32 fair_periodic_timeout_usec;
2113 /* Enable rate shaping and fairness */
2114 m_cmng_port->flags.cmng_vn_enable = 1;
2115 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2116 m_cmng_port->flags.rate_shaping_enable = 1;
2119 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2120 " fairness will be disabled\n");
2122 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2123 m_cmng_port->rs_vars.rs_periodic_timeout =
2124 RS_PERIODIC_TIMEOUT_USEC / 4;
2126 /* this is the threshold below which no timer arming will occur
2127 1.25 coefficient is for the threshold to be a little bigger
2128 than the real time, to compensate for timer in-accuracy */
2129 m_cmng_port->rs_vars.rs_threshold =
2130 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2132 /* resolution of fairness timer */
2133 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2134 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2135 t_fair = T_FAIR_COEF / port_rate;
2137 /* this is the threshold below which we won't arm
2138 the timer anymore */
2139 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2141 /* we multiply by 1e3/8 to get bytes/msec.
2142 We don't want the credits to pass a credit
2143 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2144 m_cmng_port->fair_vars.upper_bound =
2145 r_param * t_fair * FAIR_MEM;
2146 /* since each tick is 4 usec */
2147 m_cmng_port->fair_vars.fairness_timeout =
2148 fair_periodic_timeout_usec / 4;
2151 /* Disable rate shaping and fairness */
2152 m_cmng_port->flags.cmng_vn_enable = 0;
2153 m_cmng_port->flags.fairness_enable = 0;
2154 m_cmng_port->flags.rate_shaping_enable = 0;
2157 "Single function mode minmax will be disabled\n");
2160 /* Store it to internal memory */
2161 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2162 REG_WR(bp, BAR_XSTRORM_INTMEM +
2163 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2164 ((u32 *)(m_cmng_port))[i]);
2167 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2168 u32 wsum, u16 port_rate,
2169 struct cmng_struct_per_port *m_cmng_port)
2171 struct rate_shaping_vars_per_vn m_rs_vn;
2172 struct fairness_vars_per_vn m_fair_vn;
2173 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2174 u16 vn_min_rate, vn_max_rate;
2177 /* If function is hidden - set min and max to zeroes */
2178 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2183 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2184 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2185 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2186 if current min rate is zero - set it to 1.
2187 This is a requirement of the algorithm. */
2188 if ((vn_min_rate == 0) && wsum)
2189 vn_min_rate = DEF_MIN_RATE;
2190 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2191 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2194 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2195 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2197 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2198 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2200 /* global vn counter - maximal Mbps for this vn */
2201 m_rs_vn.vn_counter.rate = vn_max_rate;
2203 /* quota - number of bytes transmitted in this period */
2204 m_rs_vn.vn_counter.quota =
2205 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2207 #ifdef BNX2X_PER_PROT_QOS
2208 /* per protocol counter */
2209 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2210 /* maximal Mbps for this protocol */
2211 m_rs_vn.protocol_counters[protocol].rate =
2212 protocol_max_rate[protocol];
2213 /* the quota in each timer period -
2214 number of bytes transmitted in this period */
2215 m_rs_vn.protocol_counters[protocol].quota =
2216 (u32)(rs_periodic_timeout_usec *
2218 protocol_counters[protocol].rate/8));
2223 /* credit for each period of the fairness algorithm:
2224 number of bytes in T_FAIR (the vn share the port rate).
2225 wsum should not be larger than 10000, thus
2226 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2227 m_fair_vn.vn_credit_delta =
2228 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2229 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2230 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2231 m_fair_vn.vn_credit_delta);
2234 #ifdef BNX2X_PER_PROT_QOS
2236 u32 protocolWeightSum = 0;
2238 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2239 protocolWeightSum +=
2240 drvInit.protocol_min_rate[protocol];
2241 /* per protocol counter -
2242 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2243 if (protocolWeightSum > 0) {
2245 protocol < NUM_OF_PROTOCOLS; protocol++)
2246 /* credit for each period of the
2247 fairness algorithm - number of bytes in
2248 T_FAIR (the protocol share the vn rate) */
2249 m_fair_vn.protocol_credit_delta[protocol] =
2250 (u32)((vn_min_rate / 8) * t_fair *
2251 protocol_min_rate / protocolWeightSum);
2256 /* Store it to internal memory */
2257 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2258 REG_WR(bp, BAR_XSTRORM_INTMEM +
2259 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2260 ((u32 *)(&m_rs_vn))[i]);
2262 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2263 REG_WR(bp, BAR_XSTRORM_INTMEM +
2264 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2265 ((u32 *)(&m_fair_vn))[i]);
2268 /* This function is called upon link interrupt */
2269 static void bnx2x_link_attn(struct bnx2x *bp)
2273 /* Make sure that we are synced with the current statistics */
2274 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2276 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2278 if (bp->link_vars.link_up) {
2280 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2281 struct host_port_stats *pstats;
2283 pstats = bnx2x_sp(bp, port_stats);
2284 /* reset old bmac stats */
2285 memset(&(pstats->mac_stx[0]), 0,
2286 sizeof(struct mac_stx));
2288 if ((bp->state == BNX2X_STATE_OPEN) ||
2289 (bp->state == BNX2X_STATE_DISABLED))
2290 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
2299 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2300 if (vn == BP_E1HVN(bp))
2303 func = ((vn << 1) | BP_PORT(bp));
2305 /* Set the attention towards other drivers
2307 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2308 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2312 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2313 struct cmng_struct_per_port m_cmng_port;
2315 int port = BP_PORT(bp);
2317 /* Init RATE SHAPING and FAIRNESS contexts */
2318 wsum = bnx2x_calc_vn_wsum(bp);
2319 bnx2x_init_port_minmax(bp, (int)wsum,
2320 bp->link_vars.line_speed,
2323 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2324 bnx2x_init_vn_minmax(bp, 2*vn + port,
2325 wsum, bp->link_vars.line_speed,
2330 static void bnx2x__link_status_update(struct bnx2x *bp)
2332 if (bp->state != BNX2X_STATE_OPEN)
2335 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2337 if (bp->link_vars.link_up)
2338 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2340 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2342 /* indicate link status */
2343 bnx2x_link_report(bp);
2346 static void bnx2x_pmf_update(struct bnx2x *bp)
2348 int port = BP_PORT(bp);
2352 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2354 /* enable nig attention */
2355 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2356 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2357 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2359 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2367 * General service functions
2370 /* the slow path queue is odd since completions arrive on the fastpath ring */
2371 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2372 u32 data_hi, u32 data_lo, int common)
2374 int func = BP_FUNC(bp);
2376 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2377 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2378 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2379 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2380 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2382 #ifdef BNX2X_STOP_ON_ERROR
2383 if (unlikely(bp->panic))
2387 spin_lock_bh(&bp->spq_lock);
2389 if (!bp->spq_left) {
2390 BNX2X_ERR("BUG! SPQ ring full!\n");
2391 spin_unlock_bh(&bp->spq_lock);
2396 /* CID needs port number to be encoded int it */
2397 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2398 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2400 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2402 bp->spq_prod_bd->hdr.type |=
2403 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2405 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2406 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2410 if (bp->spq_prod_bd == bp->spq_last_bd) {
2411 bp->spq_prod_bd = bp->spq;
2412 bp->spq_prod_idx = 0;
2413 DP(NETIF_MSG_TIMER, "end of spq\n");
2420 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2423 spin_unlock_bh(&bp->spq_lock);
2427 /* acquire split MCP access lock register */
2428 static int bnx2x_acquire_alr(struct bnx2x *bp)
2435 for (j = 0; j < i*10; j++) {
2437 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2438 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2439 if (val & (1L << 31))
2444 if (!(val & (1L << 31))) {
2445 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2452 /* release split MCP access lock register */
2453 static void bnx2x_release_alr(struct bnx2x *bp)
2457 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2460 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2462 struct host_def_status_block *def_sb = bp->def_status_blk;
2465 barrier(); /* status block is written to by the chip */
2466 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2467 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2470 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2471 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2474 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2475 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2478 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2479 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2482 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2483 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2490 * slow path service functions
2493 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2495 int port = BP_PORT(bp);
2496 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2497 COMMAND_REG_ATTN_BITS_SET);
2498 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2499 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2500 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2501 NIG_REG_MASK_INTERRUPT_PORT0;
2504 if (bp->attn_state & asserted)
2505 BNX2X_ERR("IGU ERROR\n");
2507 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2508 aeu_mask = REG_RD(bp, aeu_addr);
2510 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2511 aeu_mask, asserted);
2512 aeu_mask &= ~(asserted & 0xff);
2513 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2515 REG_WR(bp, aeu_addr, aeu_mask);
2516 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2518 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2519 bp->attn_state |= asserted;
2520 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2522 if (asserted & ATTN_HARD_WIRED_MASK) {
2523 if (asserted & ATTN_NIG_FOR_FUNC) {
2525 bnx2x_acquire_phy_lock(bp);
2527 /* save nig interrupt mask */
2528 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2529 REG_WR(bp, nig_int_mask_addr, 0);
2531 bnx2x_link_attn(bp);
2533 /* handle unicore attn? */
2535 if (asserted & ATTN_SW_TIMER_4_FUNC)
2536 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2538 if (asserted & GPIO_2_FUNC)
2539 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2541 if (asserted & GPIO_3_FUNC)
2542 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2544 if (asserted & GPIO_4_FUNC)
2545 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2548 if (asserted & ATTN_GENERAL_ATTN_1) {
2549 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2550 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2552 if (asserted & ATTN_GENERAL_ATTN_2) {
2553 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2554 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2556 if (asserted & ATTN_GENERAL_ATTN_3) {
2557 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2558 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2561 if (asserted & ATTN_GENERAL_ATTN_4) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2565 if (asserted & ATTN_GENERAL_ATTN_5) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2569 if (asserted & ATTN_GENERAL_ATTN_6) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2575 } /* if hardwired */
2577 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2579 REG_WR(bp, hc_addr, asserted);
2581 /* now set back the mask */
2582 if (asserted & ATTN_NIG_FOR_FUNC) {
2583 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2584 bnx2x_release_phy_lock(bp);
2588 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2590 int port = BP_PORT(bp);
2594 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2595 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2597 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2599 val = REG_RD(bp, reg_offset);
2600 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2601 REG_WR(bp, reg_offset, val);
2603 BNX2X_ERR("SPIO5 hw attention\n");
2605 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2606 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2607 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2608 /* Fan failure attention */
2610 /* The PHY reset is controlled by GPIO 1 */
2611 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2612 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2613 /* Low power mode is controlled by GPIO 2 */
2614 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2615 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2616 /* mark the failure */
2617 bp->link_params.ext_phy_config &=
2618 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2619 bp->link_params.ext_phy_config |=
2620 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2622 dev_info.port_hw_config[port].
2623 external_phy_config,
2624 bp->link_params.ext_phy_config);
2625 /* log the failure */
2626 printk(KERN_ERR PFX "Fan Failure on Network"
2627 " Controller %s has caused the driver to"
2628 " shutdown the card to prevent permanent"
2629 " damage. Please contact Dell Support for"
2630 " assistance\n", bp->dev->name);
2638 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2640 val = REG_RD(bp, reg_offset);
2641 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2642 REG_WR(bp, reg_offset, val);
2644 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2645 (attn & HW_INTERRUT_ASSERT_SET_0));
2650 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2654 if (attn & BNX2X_DOORQ_ASSERT) {
2656 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2657 BNX2X_ERR("DB hw attention 0x%x\n", val);
2658 /* DORQ discard attention */
2660 BNX2X_ERR("FATAL error from DORQ\n");
2663 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2665 int port = BP_PORT(bp);
2668 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2669 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2671 val = REG_RD(bp, reg_offset);
2672 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2673 REG_WR(bp, reg_offset, val);
2675 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2676 (attn & HW_INTERRUT_ASSERT_SET_1));
2681 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2685 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2687 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2688 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2689 /* CFC error attention */
2691 BNX2X_ERR("FATAL error from CFC\n");
2694 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2696 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2697 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2698 /* RQ_USDMDP_FIFO_OVERFLOW */
2700 BNX2X_ERR("FATAL error from PXP\n");
2703 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2705 int port = BP_PORT(bp);
2708 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2709 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2711 val = REG_RD(bp, reg_offset);
2712 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2713 REG_WR(bp, reg_offset, val);
2715 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2716 (attn & HW_INTERRUT_ASSERT_SET_2));
2721 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2725 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2727 if (attn & BNX2X_PMF_LINK_ASSERT) {
2728 int func = BP_FUNC(bp);
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2731 bnx2x__link_status_update(bp);
2732 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2734 bnx2x_pmf_update(bp);
2736 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2738 BNX2X_ERR("MC assert!\n");
2739 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2740 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2741 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2742 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2745 } else if (attn & BNX2X_MCP_ASSERT) {
2747 BNX2X_ERR("MCP assert!\n");
2748 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2752 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2755 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2756 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2757 if (attn & BNX2X_GRC_TIMEOUT) {
2758 val = CHIP_IS_E1H(bp) ?
2759 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2760 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2762 if (attn & BNX2X_GRC_RSV) {
2763 val = CHIP_IS_E1H(bp) ?
2764 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2765 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2767 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2771 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2773 struct attn_route attn;
2774 struct attn_route group_mask;
2775 int port = BP_PORT(bp);
2781 /* need to take HW lock because MCP or other port might also
2782 try to handle this event */
2783 bnx2x_acquire_alr(bp);
2785 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2786 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2787 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2788 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2789 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2790 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2792 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2793 if (deasserted & (1 << index)) {
2794 group_mask = bp->attn_group[index];
2796 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2797 index, group_mask.sig[0], group_mask.sig[1],
2798 group_mask.sig[2], group_mask.sig[3]);
2800 bnx2x_attn_int_deasserted3(bp,
2801 attn.sig[3] & group_mask.sig[3]);
2802 bnx2x_attn_int_deasserted1(bp,
2803 attn.sig[1] & group_mask.sig[1]);
2804 bnx2x_attn_int_deasserted2(bp,
2805 attn.sig[2] & group_mask.sig[2]);
2806 bnx2x_attn_int_deasserted0(bp,
2807 attn.sig[0] & group_mask.sig[0]);
2809 if ((attn.sig[0] & group_mask.sig[0] &
2810 HW_PRTY_ASSERT_SET_0) ||
2811 (attn.sig[1] & group_mask.sig[1] &
2812 HW_PRTY_ASSERT_SET_1) ||
2813 (attn.sig[2] & group_mask.sig[2] &
2814 HW_PRTY_ASSERT_SET_2))
2815 BNX2X_ERR("FATAL HW block parity attention\n");
2819 bnx2x_release_alr(bp);
2821 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2824 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2826 REG_WR(bp, reg_addr, val);
2828 if (~bp->attn_state & deasserted)
2829 BNX2X_ERR("IGU ERROR\n");
2831 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2832 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2834 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2835 aeu_mask = REG_RD(bp, reg_addr);
2837 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2838 aeu_mask, deasserted);
2839 aeu_mask |= (deasserted & 0xff);
2840 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2842 REG_WR(bp, reg_addr, aeu_mask);
2843 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2845 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2846 bp->attn_state &= ~deasserted;
2847 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2850 static void bnx2x_attn_int(struct bnx2x *bp)
2852 /* read local copy of bits */
2853 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2855 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2857 u32 attn_state = bp->attn_state;
2859 /* look for changed bits */
2860 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2861 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2864 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2865 attn_bits, attn_ack, asserted, deasserted);
2867 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2868 BNX2X_ERR("BAD attention state\n");
2870 /* handle bits that were raised */
2872 bnx2x_attn_int_asserted(bp, asserted);
2875 bnx2x_attn_int_deasserted(bp, deasserted);
2878 static void bnx2x_sp_task(struct work_struct *work)
2880 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2884 /* Return here if interrupt is disabled */
2885 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2886 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2890 status = bnx2x_update_dsb_idx(bp);
2891 /* if (status == 0) */
2892 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2894 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2900 /* CStorm events: query_stats, port delete ramrod */
2902 bp->stats_pending = 0;
2904 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2906 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2908 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2910 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2912 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2917 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2919 struct net_device *dev = dev_instance;
2920 struct bnx2x *bp = netdev_priv(dev);
2922 /* Return here if interrupt is disabled */
2923 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2924 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2928 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2930 #ifdef BNX2X_STOP_ON_ERROR
2931 if (unlikely(bp->panic))
2935 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2940 /* end of slow path */
2944 /****************************************************************************
2946 ****************************************************************************/
2948 /* sum[hi:lo] += add[hi:lo] */
2949 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2952 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2955 /* difference = minuend - subtrahend */
2956 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2958 if (m_lo < s_lo) { \
2960 d_hi = m_hi - s_hi; \
2962 /* we can 'loan' 1 */ \
2964 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2966 /* m_hi <= s_hi */ \
2971 /* m_lo >= s_lo */ \
2972 if (m_hi < s_hi) { \
2976 /* m_hi >= s_hi */ \
2977 d_hi = m_hi - s_hi; \
2978 d_lo = m_lo - s_lo; \
2983 #define UPDATE_STAT64(s, t) \
2985 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2986 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2987 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2988 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2989 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2990 pstats->mac_stx[1].t##_lo, diff.lo); \
2993 #define UPDATE_STAT64_NIG(s, t) \
2995 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2996 diff.lo, new->s##_lo, old->s##_lo); \
2997 ADD_64(estats->t##_hi, diff.hi, \
2998 estats->t##_lo, diff.lo); \
3001 /* sum[hi:lo] += add */
3002 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3005 s_hi += (s_lo < a) ? 1 : 0; \
3008 #define UPDATE_EXTEND_STAT(s) \
3010 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3011 pstats->mac_stx[1].s##_lo, \
3015 #define UPDATE_EXTEND_TSTAT(s, t) \
3017 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3018 old_tclient->s = le32_to_cpu(tclient->s); \
3019 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3022 #define UPDATE_EXTEND_XSTAT(s, t) \
3024 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3025 old_xclient->s = le32_to_cpu(xclient->s); \
3026 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3030 * General service functions
3033 static inline long bnx2x_hilo(u32 *hiref)
3035 u32 lo = *(hiref + 1);
3036 #if (BITS_PER_LONG == 64)
3039 return HILO_U64(hi, lo);
3046 * Init service functions
3049 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3051 if (!bp->stats_pending) {
3052 struct eth_query_ramrod_data ramrod_data = {0};
3055 ramrod_data.drv_counter = bp->stats_counter++;
3056 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3057 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3059 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3060 ((u32 *)&ramrod_data)[1],
3061 ((u32 *)&ramrod_data)[0], 0);
3063 /* stats ramrod has it's own slot on the spq */
3065 bp->stats_pending = 1;
3070 static void bnx2x_stats_init(struct bnx2x *bp)
3072 int port = BP_PORT(bp);
3074 bp->executer_idx = 0;
3075 bp->stats_counter = 0;
3079 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3081 bp->port.port_stx = 0;
3082 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3084 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3085 bp->port.old_nig_stats.brb_discard =
3086 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3087 bp->port.old_nig_stats.brb_truncate =
3088 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3089 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3090 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3091 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3092 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3094 /* function stats */
3095 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3096 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3097 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3098 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3100 bp->stats_state = STATS_STATE_DISABLED;
3101 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3102 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3105 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3107 struct dmae_command *dmae = &bp->stats_dmae;
3108 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3110 *stats_comp = DMAE_COMP_VAL;
3113 if (bp->executer_idx) {
3114 int loader_idx = PMF_DMAE_C(bp);
3116 memset(dmae, 0, sizeof(struct dmae_command));
3118 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3119 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3120 DMAE_CMD_DST_RESET |
3122 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3124 DMAE_CMD_ENDIANITY_DW_SWAP |
3126 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3128 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3129 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3130 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3131 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3132 sizeof(struct dmae_command) *
3133 (loader_idx + 1)) >> 2;
3134 dmae->dst_addr_hi = 0;
3135 dmae->len = sizeof(struct dmae_command) >> 2;
3138 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3139 dmae->comp_addr_hi = 0;
3143 bnx2x_post_dmae(bp, dmae, loader_idx);
3145 } else if (bp->func_stx) {
3147 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3151 static int bnx2x_stats_comp(struct bnx2x *bp)
3153 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3157 while (*stats_comp != DMAE_COMP_VAL) {
3159 BNX2X_ERR("timeout waiting for stats finished\n");
3169 * Statistics service functions
3172 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3174 struct dmae_command *dmae;
3176 int loader_idx = PMF_DMAE_C(bp);
3177 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3181 BNX2X_ERR("BUG!\n");
3185 bp->executer_idx = 0;
3187 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3193 DMAE_CMD_ENDIANITY_DW_SWAP |
3195 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3196 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3198 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3199 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3200 dmae->src_addr_lo = bp->port.port_stx >> 2;
3201 dmae->src_addr_hi = 0;
3202 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3203 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3204 dmae->len = DMAE_LEN32_RD_MAX;
3205 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3206 dmae->comp_addr_hi = 0;
3209 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3210 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3211 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3212 dmae->src_addr_hi = 0;
3213 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3214 DMAE_LEN32_RD_MAX * 4);
3215 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3216 DMAE_LEN32_RD_MAX * 4);
3217 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3218 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3219 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3220 dmae->comp_val = DMAE_COMP_VAL;
3223 bnx2x_hw_stats_post(bp);
3224 bnx2x_stats_comp(bp);
3227 static void bnx2x_port_stats_init(struct bnx2x *bp)
3229 struct dmae_command *dmae;
3230 int port = BP_PORT(bp);
3231 int vn = BP_E1HVN(bp);
3233 int loader_idx = PMF_DMAE_C(bp);
3235 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3238 if (!bp->link_vars.link_up || !bp->port.pmf) {
3239 BNX2X_ERR("BUG!\n");
3243 bp->executer_idx = 0;
3246 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3247 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3248 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3250 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3252 DMAE_CMD_ENDIANITY_DW_SWAP |
3254 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3255 (vn << DMAE_CMD_E1HVN_SHIFT));
3257 if (bp->port.port_stx) {
3259 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3260 dmae->opcode = opcode;
3261 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3262 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3263 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3264 dmae->dst_addr_hi = 0;
3265 dmae->len = sizeof(struct host_port_stats) >> 2;
3266 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3267 dmae->comp_addr_hi = 0;
3273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274 dmae->opcode = opcode;
3275 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3276 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3277 dmae->dst_addr_lo = bp->func_stx >> 2;
3278 dmae->dst_addr_hi = 0;
3279 dmae->len = sizeof(struct host_func_stats) >> 2;
3280 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3281 dmae->comp_addr_hi = 0;
3286 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3287 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3288 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3290 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3292 DMAE_CMD_ENDIANITY_DW_SWAP |
3294 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3295 (vn << DMAE_CMD_E1HVN_SHIFT));
3297 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3299 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3300 NIG_REG_INGRESS_BMAC0_MEM);
3302 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3303 BIGMAC_REGISTER_TX_STAT_GTBYT */
3304 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3305 dmae->opcode = opcode;
3306 dmae->src_addr_lo = (mac_addr +
3307 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3308 dmae->src_addr_hi = 0;
3309 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3310 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3311 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3312 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3313 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3314 dmae->comp_addr_hi = 0;
3317 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3318 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3319 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3320 dmae->opcode = opcode;
3321 dmae->src_addr_lo = (mac_addr +
3322 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3323 dmae->src_addr_hi = 0;
3324 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3325 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3326 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3327 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3328 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3329 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3330 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3331 dmae->comp_addr_hi = 0;
3334 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3336 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3338 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (mac_addr +
3342 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3346 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3347 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3348 dmae->comp_addr_hi = 0;
3351 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353 dmae->opcode = opcode;
3354 dmae->src_addr_lo = (mac_addr +
3355 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3356 dmae->src_addr_hi = 0;
3357 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3358 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3360 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3366 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368 dmae->opcode = opcode;
3369 dmae->src_addr_lo = (mac_addr +
3370 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3371 dmae->src_addr_hi = 0;
3372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3373 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3375 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3376 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378 dmae->comp_addr_hi = 0;
3383 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3384 dmae->opcode = opcode;
3385 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3386 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3387 dmae->src_addr_hi = 0;
3388 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3389 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3390 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392 dmae->comp_addr_hi = 0;
3395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3396 dmae->opcode = opcode;
3397 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3398 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3399 dmae->src_addr_hi = 0;
3400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3401 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3402 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3403 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3404 dmae->len = (2*sizeof(u32)) >> 2;
3405 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3406 dmae->comp_addr_hi = 0;
3409 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3410 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3411 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3412 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3414 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3416 DMAE_CMD_ENDIANITY_DW_SWAP |
3418 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3419 (vn << DMAE_CMD_E1HVN_SHIFT));
3420 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3421 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3422 dmae->src_addr_hi = 0;
3423 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3424 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3425 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3426 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3427 dmae->len = (2*sizeof(u32)) >> 2;
3428 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3429 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3430 dmae->comp_val = DMAE_COMP_VAL;
3435 static void bnx2x_func_stats_init(struct bnx2x *bp)
3437 struct dmae_command *dmae = &bp->stats_dmae;
3438 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3441 if (!bp->func_stx) {
3442 BNX2X_ERR("BUG!\n");
3446 bp->executer_idx = 0;
3447 memset(dmae, 0, sizeof(struct dmae_command));
3449 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3450 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3451 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3453 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3455 DMAE_CMD_ENDIANITY_DW_SWAP |
3457 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3458 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3459 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3460 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3461 dmae->dst_addr_lo = bp->func_stx >> 2;
3462 dmae->dst_addr_hi = 0;
3463 dmae->len = sizeof(struct host_func_stats) >> 2;
3464 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3465 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3466 dmae->comp_val = DMAE_COMP_VAL;
3471 static void bnx2x_stats_start(struct bnx2x *bp)
3474 bnx2x_port_stats_init(bp);
3476 else if (bp->func_stx)
3477 bnx2x_func_stats_init(bp);
3479 bnx2x_hw_stats_post(bp);
3480 bnx2x_storm_stats_post(bp);
3483 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3485 bnx2x_stats_comp(bp);
3486 bnx2x_stats_pmf_update(bp);
3487 bnx2x_stats_start(bp);
3490 static void bnx2x_stats_restart(struct bnx2x *bp)
3492 bnx2x_stats_comp(bp);
3493 bnx2x_stats_start(bp);
3496 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3498 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3499 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3500 struct regpair diff;
3502 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3503 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3504 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3505 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3506 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3507 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3508 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3509 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3510 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3511 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3512 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3513 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3514 UPDATE_STAT64(tx_stat_gt127,
3515 tx_stat_etherstatspkts65octetsto127octets);
3516 UPDATE_STAT64(tx_stat_gt255,
3517 tx_stat_etherstatspkts128octetsto255octets);
3518 UPDATE_STAT64(tx_stat_gt511,
3519 tx_stat_etherstatspkts256octetsto511octets);
3520 UPDATE_STAT64(tx_stat_gt1023,
3521 tx_stat_etherstatspkts512octetsto1023octets);
3522 UPDATE_STAT64(tx_stat_gt1518,
3523 tx_stat_etherstatspkts1024octetsto1522octets);
3524 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3525 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3526 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3527 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3528 UPDATE_STAT64(tx_stat_gterr,
3529 tx_stat_dot3statsinternalmactransmiterrors);
3530 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3533 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3535 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3536 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3538 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3539 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3540 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3541 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3542 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3543 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3544 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3545 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3546 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3547 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3548 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3549 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3550 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3551 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3552 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3553 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3554 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3555 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3556 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3557 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3558 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3559 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3560 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3561 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3562 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3563 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3564 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3565 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3566 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3567 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3568 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3571 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3573 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3574 struct nig_stats *old = &(bp->port.old_nig_stats);
3575 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3576 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3577 struct regpair diff;
3579 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3580 bnx2x_bmac_stats_update(bp);
3582 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3583 bnx2x_emac_stats_update(bp);
3585 else { /* unreached */
3586 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3590 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3591 new->brb_discard - old->brb_discard);
3592 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3593 new->brb_truncate - old->brb_truncate);
3595 UPDATE_STAT64_NIG(egress_mac_pkt0,
3596 etherstatspkts1024octetsto1522octets);
3597 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3599 memcpy(old, new, sizeof(struct nig_stats));
3601 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3602 sizeof(struct mac_stx));
3603 estats->brb_drop_hi = pstats->brb_drop_hi;
3604 estats->brb_drop_lo = pstats->brb_drop_lo;
3606 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3611 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3613 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3614 int cl_id = BP_CL_ID(bp);
3615 struct tstorm_per_port_stats *tport =
3616 &stats->tstorm_common.port_statistics;
3617 struct tstorm_per_client_stats *tclient =
3618 &stats->tstorm_common.client_statistics[cl_id];
3619 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3620 struct xstorm_per_client_stats *xclient =
3621 &stats->xstorm_common.client_statistics[cl_id];
3622 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3623 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3624 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3627 /* are storm stats valid? */
3628 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3629 bp->stats_counter) {
3630 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3631 " tstorm counter (%d) != stats_counter (%d)\n",
3632 tclient->stats_counter, bp->stats_counter);
3635 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3636 bp->stats_counter) {
3637 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3638 " xstorm counter (%d) != stats_counter (%d)\n",
3639 xclient->stats_counter, bp->stats_counter);
3643 fstats->total_bytes_received_hi =
3644 fstats->valid_bytes_received_hi =
3645 le32_to_cpu(tclient->total_rcv_bytes.hi);
3646 fstats->total_bytes_received_lo =
3647 fstats->valid_bytes_received_lo =
3648 le32_to_cpu(tclient->total_rcv_bytes.lo);
3650 estats->error_bytes_received_hi =
3651 le32_to_cpu(tclient->rcv_error_bytes.hi);
3652 estats->error_bytes_received_lo =
3653 le32_to_cpu(tclient->rcv_error_bytes.lo);
3654 ADD_64(estats->error_bytes_received_hi,
3655 estats->rx_stat_ifhcinbadoctets_hi,
3656 estats->error_bytes_received_lo,
3657 estats->rx_stat_ifhcinbadoctets_lo);
3659 ADD_64(fstats->total_bytes_received_hi,
3660 estats->error_bytes_received_hi,
3661 fstats->total_bytes_received_lo,
3662 estats->error_bytes_received_lo);
3664 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3665 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3666 total_multicast_packets_received);
3667 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3668 total_broadcast_packets_received);
3670 fstats->total_bytes_transmitted_hi =
3671 le32_to_cpu(xclient->total_sent_bytes.hi);
3672 fstats->total_bytes_transmitted_lo =
3673 le32_to_cpu(xclient->total_sent_bytes.lo);
3675 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3676 total_unicast_packets_transmitted);
3677 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3678 total_multicast_packets_transmitted);
3679 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3680 total_broadcast_packets_transmitted);
3682 memcpy(estats, &(fstats->total_bytes_received_hi),
3683 sizeof(struct host_func_stats) - 2*sizeof(u32));
3685 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3686 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3687 estats->brb_truncate_discard =
3688 le32_to_cpu(tport->brb_truncate_discard);
3689 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3691 old_tclient->rcv_unicast_bytes.hi =
3692 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3693 old_tclient->rcv_unicast_bytes.lo =
3694 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3695 old_tclient->rcv_broadcast_bytes.hi =
3696 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3697 old_tclient->rcv_broadcast_bytes.lo =
3698 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3699 old_tclient->rcv_multicast_bytes.hi =
3700 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3701 old_tclient->rcv_multicast_bytes.lo =
3702 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3703 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3705 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3706 old_tclient->packets_too_big_discard =
3707 le32_to_cpu(tclient->packets_too_big_discard);
3708 estats->no_buff_discard =
3709 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3710 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3712 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3713 old_xclient->unicast_bytes_sent.hi =
3714 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3715 old_xclient->unicast_bytes_sent.lo =
3716 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3717 old_xclient->multicast_bytes_sent.hi =
3718 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3719 old_xclient->multicast_bytes_sent.lo =
3720 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3721 old_xclient->broadcast_bytes_sent.hi =
3722 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3723 old_xclient->broadcast_bytes_sent.lo =
3724 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3726 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3731 static void bnx2x_net_stats_update(struct bnx2x *bp)
3733 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3734 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3735 struct net_device_stats *nstats = &bp->dev->stats;
3737 nstats->rx_packets =
3738 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3739 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3740 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3742 nstats->tx_packets =
3743 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3744 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3745 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3747 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3749 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3751 nstats->rx_dropped = old_tclient->checksum_discard +
3752 estats->mac_discard;
3753 nstats->tx_dropped = 0;
3756 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3758 nstats->collisions =
3759 estats->tx_stat_dot3statssinglecollisionframes_lo +
3760 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3761 estats->tx_stat_dot3statslatecollisions_lo +
3762 estats->tx_stat_dot3statsexcessivecollisions_lo;
3764 estats->jabber_packets_received =
3765 old_tclient->packets_too_big_discard +
3766 estats->rx_stat_dot3statsframestoolong_lo;
3768 nstats->rx_length_errors =
3769 estats->rx_stat_etherstatsundersizepkts_lo +
3770 estats->jabber_packets_received;
3771 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3772 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3773 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3774 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3775 nstats->rx_missed_errors = estats->xxoverflow_discard;
3777 nstats->rx_errors = nstats->rx_length_errors +
3778 nstats->rx_over_errors +
3779 nstats->rx_crc_errors +
3780 nstats->rx_frame_errors +
3781 nstats->rx_fifo_errors +
3782 nstats->rx_missed_errors;
3784 nstats->tx_aborted_errors =
3785 estats->tx_stat_dot3statslatecollisions_lo +
3786 estats->tx_stat_dot3statsexcessivecollisions_lo;
3787 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3788 nstats->tx_fifo_errors = 0;
3789 nstats->tx_heartbeat_errors = 0;
3790 nstats->tx_window_errors = 0;
3792 nstats->tx_errors = nstats->tx_aborted_errors +
3793 nstats->tx_carrier_errors;
3796 static void bnx2x_stats_update(struct bnx2x *bp)
3798 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3801 if (*stats_comp != DMAE_COMP_VAL)
3805 update = (bnx2x_hw_stats_update(bp) == 0);
3807 update |= (bnx2x_storm_stats_update(bp) == 0);
3810 bnx2x_net_stats_update(bp);
3813 if (bp->stats_pending) {
3814 bp->stats_pending++;
3815 if (bp->stats_pending == 3) {
3816 BNX2X_ERR("stats not updated for 3 times\n");
3823 if (bp->msglevel & NETIF_MSG_TIMER) {
3824 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3825 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3826 struct net_device_stats *nstats = &bp->dev->stats;
3829 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3830 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3832 bnx2x_tx_avail(bp->fp),
3833 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3834 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3836 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3837 bp->fp->rx_comp_cons),
3838 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3839 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3840 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3841 estats->driver_xoff, estats->brb_drop_lo);
3842 printk(KERN_DEBUG "tstats: checksum_discard %u "
3843 "packets_too_big_discard %u no_buff_discard %u "
3844 "mac_discard %u mac_filter_discard %u "
3845 "xxovrflow_discard %u brb_truncate_discard %u "
3846 "ttl0_discard %u\n",
3847 old_tclient->checksum_discard,
3848 old_tclient->packets_too_big_discard,
3849 old_tclient->no_buff_discard, estats->mac_discard,
3850 estats->mac_filter_discard, estats->xxoverflow_discard,
3851 estats->brb_truncate_discard,
3852 old_tclient->ttl0_discard);
3854 for_each_queue(bp, i) {
3855 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3856 bnx2x_fp(bp, i, tx_pkt),
3857 bnx2x_fp(bp, i, rx_pkt),
3858 bnx2x_fp(bp, i, rx_calls));
3862 bnx2x_hw_stats_post(bp);
3863 bnx2x_storm_stats_post(bp);
3866 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3868 struct dmae_command *dmae;
3870 int loader_idx = PMF_DMAE_C(bp);
3871 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3873 bp->executer_idx = 0;
3875 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3877 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3879 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3881 DMAE_CMD_ENDIANITY_DW_SWAP |
3883 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3884 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3886 if (bp->port.port_stx) {
3888 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3890 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3892 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3893 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3894 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3895 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3896 dmae->dst_addr_hi = 0;
3897 dmae->len = sizeof(struct host_port_stats) >> 2;
3899 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3900 dmae->comp_addr_hi = 0;
3903 dmae->comp_addr_lo =
3904 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3905 dmae->comp_addr_hi =
3906 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3907 dmae->comp_val = DMAE_COMP_VAL;
3915 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3916 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3917 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3918 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3919 dmae->dst_addr_lo = bp->func_stx >> 2;
3920 dmae->dst_addr_hi = 0;
3921 dmae->len = sizeof(struct host_func_stats) >> 2;
3922 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3923 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3924 dmae->comp_val = DMAE_COMP_VAL;
3930 static void bnx2x_stats_stop(struct bnx2x *bp)
3934 bnx2x_stats_comp(bp);
3937 update = (bnx2x_hw_stats_update(bp) == 0);
3939 update |= (bnx2x_storm_stats_update(bp) == 0);
3942 bnx2x_net_stats_update(bp);
3945 bnx2x_port_stats_stop(bp);
3947 bnx2x_hw_stats_post(bp);
3948 bnx2x_stats_comp(bp);
3952 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3956 static const struct {
3957 void (*action)(struct bnx2x *bp);
3958 enum bnx2x_stats_state next_state;
3959 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3962 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3963 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3964 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3965 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3968 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3969 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3970 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3971 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3975 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3977 enum bnx2x_stats_state state = bp->stats_state;
3979 bnx2x_stats_stm[state][event].action(bp);
3980 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3982 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3983 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3984 state, event, bp->stats_state);
3987 static void bnx2x_timer(unsigned long data)
3989 struct bnx2x *bp = (struct bnx2x *) data;
3991 if (!netif_running(bp->dev))
3994 if (atomic_read(&bp->intr_sem) != 0)
3998 struct bnx2x_fastpath *fp = &bp->fp[0];
4001 bnx2x_tx_int(fp, 1000);
4002 rc = bnx2x_rx_int(fp, 1000);
4005 if (!BP_NOMCP(bp)) {
4006 int func = BP_FUNC(bp);
4010 ++bp->fw_drv_pulse_wr_seq;
4011 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4012 /* TBD - add SYSTEM_TIME */
4013 drv_pulse = bp->fw_drv_pulse_wr_seq;
4014 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4016 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4017 MCP_PULSE_SEQ_MASK);
4018 /* The delta between driver pulse and mcp response
4019 * should be 1 (before mcp response) or 0 (after mcp response)
4021 if ((drv_pulse != mcp_pulse) &&
4022 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4023 /* someone lost a heartbeat... */
4024 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4025 drv_pulse, mcp_pulse);
4029 if ((bp->state == BNX2X_STATE_OPEN) ||
4030 (bp->state == BNX2X_STATE_DISABLED))
4031 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4034 mod_timer(&bp->timer, jiffies + bp->current_interval);
4037 /* end of Statistics */
4042 * nic init service functions
4045 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4047 int port = BP_PORT(bp);
4049 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4050 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4051 sizeof(struct ustorm_status_block)/4);
4052 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4053 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4054 sizeof(struct cstorm_status_block)/4);
4057 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4058 dma_addr_t mapping, int sb_id)
4060 int port = BP_PORT(bp);
4061 int func = BP_FUNC(bp);
4066 section = ((u64)mapping) + offsetof(struct host_status_block,
4068 sb->u_status_block.status_block_id = sb_id;
4070 REG_WR(bp, BAR_USTRORM_INTMEM +
4071 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4072 REG_WR(bp, BAR_USTRORM_INTMEM +
4073 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4075 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4076 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4078 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4079 REG_WR16(bp, BAR_USTRORM_INTMEM +
4080 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4083 section = ((u64)mapping) + offsetof(struct host_status_block,
4085 sb->c_status_block.status_block_id = sb_id;
4087 REG_WR(bp, BAR_CSTRORM_INTMEM +
4088 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4089 REG_WR(bp, BAR_CSTRORM_INTMEM +
4090 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4092 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4093 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4095 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4096 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4097 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4099 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4102 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4104 int func = BP_FUNC(bp);
4106 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4107 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4108 sizeof(struct ustorm_def_status_block)/4);
4109 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4110 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4111 sizeof(struct cstorm_def_status_block)/4);
4112 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4113 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4114 sizeof(struct xstorm_def_status_block)/4);
4115 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4116 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4117 sizeof(struct tstorm_def_status_block)/4);
4120 static void bnx2x_init_def_sb(struct bnx2x *bp,
4121 struct host_def_status_block *def_sb,
4122 dma_addr_t mapping, int sb_id)
4124 int port = BP_PORT(bp);
4125 int func = BP_FUNC(bp);
4126 int index, val, reg_offset;
4130 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4131 atten_status_block);
4132 def_sb->atten_status_block.status_block_id = sb_id;
4136 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4137 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4139 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4140 bp->attn_group[index].sig[0] = REG_RD(bp,
4141 reg_offset + 0x10*index);
4142 bp->attn_group[index].sig[1] = REG_RD(bp,
4143 reg_offset + 0x4 + 0x10*index);
4144 bp->attn_group[index].sig[2] = REG_RD(bp,
4145 reg_offset + 0x8 + 0x10*index);
4146 bp->attn_group[index].sig[3] = REG_RD(bp,
4147 reg_offset + 0xc + 0x10*index);
4150 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4151 HC_REG_ATTN_MSG0_ADDR_L);
4153 REG_WR(bp, reg_offset, U64_LO(section));
4154 REG_WR(bp, reg_offset + 4, U64_HI(section));
4156 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4158 val = REG_RD(bp, reg_offset);
4160 REG_WR(bp, reg_offset, val);
4163 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4164 u_def_status_block);
4165 def_sb->u_def_status_block.status_block_id = sb_id;
4167 REG_WR(bp, BAR_USTRORM_INTMEM +
4168 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4169 REG_WR(bp, BAR_USTRORM_INTMEM +
4170 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4172 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4173 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4175 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4176 REG_WR16(bp, BAR_USTRORM_INTMEM +
4177 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4180 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4181 c_def_status_block);
4182 def_sb->c_def_status_block.status_block_id = sb_id;
4184 REG_WR(bp, BAR_CSTRORM_INTMEM +
4185 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4186 REG_WR(bp, BAR_CSTRORM_INTMEM +
4187 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4189 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4190 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4192 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4193 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4194 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4197 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4198 t_def_status_block);
4199 def_sb->t_def_status_block.status_block_id = sb_id;
4201 REG_WR(bp, BAR_TSTRORM_INTMEM +
4202 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4203 REG_WR(bp, BAR_TSTRORM_INTMEM +
4204 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4206 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4207 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4209 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4210 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4211 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4214 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4215 x_def_status_block);
4216 def_sb->x_def_status_block.status_block_id = sb_id;
4218 REG_WR(bp, BAR_XSTRORM_INTMEM +
4219 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4220 REG_WR(bp, BAR_XSTRORM_INTMEM +
4221 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4223 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4224 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4226 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4227 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4228 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4230 bp->stats_pending = 0;
4231 bp->set_mac_pending = 0;
4233 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4236 static void bnx2x_update_coalesce(struct bnx2x *bp)
4238 int port = BP_PORT(bp);
4241 for_each_queue(bp, i) {
4242 int sb_id = bp->fp[i].sb_id;
4244 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4245 REG_WR8(bp, BAR_USTRORM_INTMEM +
4246 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4247 U_SB_ETH_RX_CQ_INDEX),
4249 REG_WR16(bp, BAR_USTRORM_INTMEM +
4250 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4251 U_SB_ETH_RX_CQ_INDEX),
4252 bp->rx_ticks ? 0 : 1);
4254 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4255 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4256 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4257 C_SB_ETH_TX_CQ_INDEX),
4259 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4260 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4261 C_SB_ETH_TX_CQ_INDEX),
4262 bp->tx_ticks ? 0 : 1);
4266 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4267 struct bnx2x_fastpath *fp, int last)
4271 for (i = 0; i < last; i++) {
4272 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4273 struct sk_buff *skb = rx_buf->skb;
4276 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4280 if (fp->tpa_state[i] == BNX2X_TPA_START)
4281 pci_unmap_single(bp->pdev,
4282 pci_unmap_addr(rx_buf, mapping),
4284 PCI_DMA_FROMDEVICE);
4291 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4293 int func = BP_FUNC(bp);
4294 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4295 ETH_MAX_AGGREGATION_QUEUES_E1H;
4296 u16 ring_prod, cqe_ring_prod;
4299 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4301 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4303 if (bp->flags & TPA_ENABLE_FLAG) {
4305 for_each_rx_queue(bp, j) {
4306 struct bnx2x_fastpath *fp = &bp->fp[j];
4308 for (i = 0; i < max_agg_queues; i++) {
4309 fp->tpa_pool[i].skb =
4310 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4311 if (!fp->tpa_pool[i].skb) {
4312 BNX2X_ERR("Failed to allocate TPA "
4313 "skb pool for queue[%d] - "
4314 "disabling TPA on this "
4316 bnx2x_free_tpa_pool(bp, fp, i);
4317 fp->disable_tpa = 1;
4320 pci_unmap_addr_set((struct sw_rx_bd *)
4321 &bp->fp->tpa_pool[i],
4323 fp->tpa_state[i] = BNX2X_TPA_STOP;
4328 for_each_rx_queue(bp, j) {
4329 struct bnx2x_fastpath *fp = &bp->fp[j];
4332 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4333 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4335 /* "next page" elements initialization */
4337 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4338 struct eth_rx_sge *sge;
4340 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4342 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4343 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4345 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4346 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4349 bnx2x_init_sge_ring_bit_mask(fp);
4352 for (i = 1; i <= NUM_RX_RINGS; i++) {
4353 struct eth_rx_bd *rx_bd;
4355 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4357 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4358 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4360 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4361 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4365 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4366 struct eth_rx_cqe_next_page *nextpg;
4368 nextpg = (struct eth_rx_cqe_next_page *)
4369 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4371 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4372 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4374 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4375 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4378 /* Allocate SGEs and initialize the ring elements */
4379 for (i = 0, ring_prod = 0;
4380 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4382 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4383 BNX2X_ERR("was only able to allocate "
4385 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4386 /* Cleanup already allocated elements */
4387 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4388 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4389 fp->disable_tpa = 1;
4393 ring_prod = NEXT_SGE_IDX(ring_prod);
4395 fp->rx_sge_prod = ring_prod;
4397 /* Allocate BDs and initialize BD ring */
4398 fp->rx_comp_cons = 0;
4399 cqe_ring_prod = ring_prod = 0;
4400 for (i = 0; i < bp->rx_ring_size; i++) {
4401 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4402 BNX2X_ERR("was only able to allocate "
4404 bp->eth_stats.rx_skb_alloc_failed++;
4407 ring_prod = NEXT_RX_IDX(ring_prod);
4408 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4409 WARN_ON(ring_prod <= i);
4412 fp->rx_bd_prod = ring_prod;
4413 /* must not have more available CQEs than BDs */
4414 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4416 fp->rx_pkt = fp->rx_calls = 0;
4419 * this will generate an interrupt (to the TSTORM)
4420 * must only be done after chip is initialized
4422 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4427 REG_WR(bp, BAR_USTRORM_INTMEM +
4428 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4429 U64_LO(fp->rx_comp_mapping));
4430 REG_WR(bp, BAR_USTRORM_INTMEM +
4431 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4432 U64_HI(fp->rx_comp_mapping));
4436 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4440 for_each_tx_queue(bp, j) {
4441 struct bnx2x_fastpath *fp = &bp->fp[j];
4443 for (i = 1; i <= NUM_TX_RINGS; i++) {
4444 struct eth_tx_bd *tx_bd =
4445 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4448 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4449 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4451 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4452 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4455 fp->tx_pkt_prod = 0;
4456 fp->tx_pkt_cons = 0;
4459 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4464 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4466 int func = BP_FUNC(bp);
4468 spin_lock_init(&bp->spq_lock);
4470 bp->spq_left = MAX_SPQ_PENDING;
4471 bp->spq_prod_idx = 0;
4472 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4473 bp->spq_prod_bd = bp->spq;
4474 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4476 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4477 U64_LO(bp->spq_mapping));
4479 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4480 U64_HI(bp->spq_mapping));
4482 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4486 static void bnx2x_init_context(struct bnx2x *bp)
4490 for_each_queue(bp, i) {
4491 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4492 struct bnx2x_fastpath *fp = &bp->fp[i];
4493 u8 sb_id = FP_SB_ID(fp);
4495 context->ustorm_st_context.common.sb_index_numbers =
4496 BNX2X_RX_SB_INDEX_NUM;
4497 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4498 context->ustorm_st_context.common.status_block_id = sb_id;
4499 context->ustorm_st_context.common.flags =
4500 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4501 context->ustorm_st_context.common.mc_alignment_log_size =
4502 BNX2X_RX_ALIGN_SHIFT;
4503 context->ustorm_st_context.common.bd_buff_size =
4505 context->ustorm_st_context.common.bd_page_base_hi =
4506 U64_HI(fp->rx_desc_mapping);
4507 context->ustorm_st_context.common.bd_page_base_lo =
4508 U64_LO(fp->rx_desc_mapping);
4509 if (!fp->disable_tpa) {
4510 context->ustorm_st_context.common.flags |=
4511 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4512 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4513 context->ustorm_st_context.common.sge_buff_size =
4514 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4516 context->ustorm_st_context.common.sge_page_base_hi =
4517 U64_HI(fp->rx_sge_mapping);
4518 context->ustorm_st_context.common.sge_page_base_lo =
4519 U64_LO(fp->rx_sge_mapping);
4522 context->ustorm_ag_context.cdu_usage =
4523 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4524 CDU_REGION_NUMBER_UCM_AG,
4525 ETH_CONNECTION_TYPE);
4527 context->xstorm_st_context.tx_bd_page_base_hi =
4528 U64_HI(fp->tx_desc_mapping);
4529 context->xstorm_st_context.tx_bd_page_base_lo =
4530 U64_LO(fp->tx_desc_mapping);
4531 context->xstorm_st_context.db_data_addr_hi =
4532 U64_HI(fp->tx_prods_mapping);
4533 context->xstorm_st_context.db_data_addr_lo =
4534 U64_LO(fp->tx_prods_mapping);
4535 context->xstorm_st_context.statistics_data = (fp->cl_id |
4536 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4537 context->cstorm_st_context.sb_index_number =
4538 C_SB_ETH_TX_CQ_INDEX;
4539 context->cstorm_st_context.status_block_id = sb_id;
4541 context->xstorm_ag_context.cdu_reserved =
4542 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4543 CDU_REGION_NUMBER_XCM_AG,
4544 ETH_CONNECTION_TYPE);
4548 static void bnx2x_init_ind_table(struct bnx2x *bp)
4550 int func = BP_FUNC(bp);
4553 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4557 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4558 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4559 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4560 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4561 BP_CL_ID(bp) + (i % bp->num_rx_queues));
4564 static void bnx2x_set_client_config(struct bnx2x *bp)
4566 struct tstorm_eth_client_config tstorm_client = {0};
4567 int port = BP_PORT(bp);
4570 tstorm_client.mtu = bp->dev->mtu;
4571 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4572 tstorm_client.config_flags =
4573 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4575 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4576 tstorm_client.config_flags |=
4577 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4578 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4582 if (bp->flags & TPA_ENABLE_FLAG) {
4583 tstorm_client.max_sges_for_packet =
4584 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4585 tstorm_client.max_sges_for_packet =
4586 ((tstorm_client.max_sges_for_packet +
4587 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4588 PAGES_PER_SGE_SHIFT;
4590 tstorm_client.config_flags |=
4591 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4594 for_each_queue(bp, i) {
4595 REG_WR(bp, BAR_TSTRORM_INTMEM +
4596 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4597 ((u32 *)&tstorm_client)[0]);
4598 REG_WR(bp, BAR_TSTRORM_INTMEM +
4599 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4600 ((u32 *)&tstorm_client)[1]);
4603 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4604 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4607 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4609 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4610 int mode = bp->rx_mode;
4611 int mask = (1 << BP_L_ID(bp));
4612 int func = BP_FUNC(bp);
4615 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4618 case BNX2X_RX_MODE_NONE: /* no Rx */
4619 tstorm_mac_filter.ucast_drop_all = mask;
4620 tstorm_mac_filter.mcast_drop_all = mask;
4621 tstorm_mac_filter.bcast_drop_all = mask;
4623 case BNX2X_RX_MODE_NORMAL:
4624 tstorm_mac_filter.bcast_accept_all = mask;
4626 case BNX2X_RX_MODE_ALLMULTI:
4627 tstorm_mac_filter.mcast_accept_all = mask;
4628 tstorm_mac_filter.bcast_accept_all = mask;
4630 case BNX2X_RX_MODE_PROMISC:
4631 tstorm_mac_filter.ucast_accept_all = mask;
4632 tstorm_mac_filter.mcast_accept_all = mask;
4633 tstorm_mac_filter.bcast_accept_all = mask;
4636 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4640 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4641 REG_WR(bp, BAR_TSTRORM_INTMEM +
4642 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4643 ((u32 *)&tstorm_mac_filter)[i]);
4645 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4646 ((u32 *)&tstorm_mac_filter)[i]); */
4649 if (mode != BNX2X_RX_MODE_NONE)
4650 bnx2x_set_client_config(bp);
4653 static void bnx2x_init_internal_common(struct bnx2x *bp)
4657 if (bp->flags & TPA_ENABLE_FLAG) {
4658 struct tstorm_eth_tpa_exist tpa = {0};
4662 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4664 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4668 /* Zero this manually as its initialization is
4669 currently missing in the initTool */
4670 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4671 REG_WR(bp, BAR_USTRORM_INTMEM +
4672 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4675 static void bnx2x_init_internal_port(struct bnx2x *bp)
4677 int port = BP_PORT(bp);
4679 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4680 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4682 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4685 static void bnx2x_init_internal_func(struct bnx2x *bp)
4687 struct tstorm_eth_function_common_config tstorm_config = {0};
4688 struct stats_indication_flags stats_flags = {0};
4689 int port = BP_PORT(bp);
4690 int func = BP_FUNC(bp);
4695 tstorm_config.config_flags = MULTI_FLAGS(bp);
4696 tstorm_config.rss_result_mask = MULTI_MASK;
4699 tstorm_config.config_flags |=
4700 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4702 tstorm_config.leading_client_id = BP_L_ID(bp);
4704 REG_WR(bp, BAR_TSTRORM_INTMEM +
4705 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4706 (*(u32 *)&tstorm_config));
4708 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4709 bnx2x_set_storm_rx_mode(bp);
4711 /* reset xstorm per client statistics */
4712 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4713 REG_WR(bp, BAR_XSTRORM_INTMEM +
4714 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4717 /* reset tstorm per client statistics */
4718 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4719 REG_WR(bp, BAR_TSTRORM_INTMEM +
4720 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4724 /* Init statistics related context */
4725 stats_flags.collect_eth = 1;
4727 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4728 ((u32 *)&stats_flags)[0]);
4729 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4730 ((u32 *)&stats_flags)[1]);
4732 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4733 ((u32 *)&stats_flags)[0]);
4734 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4735 ((u32 *)&stats_flags)[1]);
4737 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4738 ((u32 *)&stats_flags)[0]);
4739 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4740 ((u32 *)&stats_flags)[1]);
4742 REG_WR(bp, BAR_XSTRORM_INTMEM +
4743 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4744 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4745 REG_WR(bp, BAR_XSTRORM_INTMEM +
4746 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4747 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4749 REG_WR(bp, BAR_TSTRORM_INTMEM +
4750 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4751 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4752 REG_WR(bp, BAR_TSTRORM_INTMEM +
4753 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4754 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4756 if (CHIP_IS_E1H(bp)) {
4757 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4759 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4761 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4763 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4766 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4770 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4772 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4773 SGE_PAGE_SIZE * PAGES_PER_SGE),
4775 for_each_rx_queue(bp, i) {
4776 struct bnx2x_fastpath *fp = &bp->fp[i];
4778 REG_WR(bp, BAR_USTRORM_INTMEM +
4779 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4780 U64_LO(fp->rx_comp_mapping));
4781 REG_WR(bp, BAR_USTRORM_INTMEM +
4782 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4783 U64_HI(fp->rx_comp_mapping));
4785 REG_WR16(bp, BAR_USTRORM_INTMEM +
4786 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4791 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4793 switch (load_code) {
4794 case FW_MSG_CODE_DRV_LOAD_COMMON:
4795 bnx2x_init_internal_common(bp);
4798 case FW_MSG_CODE_DRV_LOAD_PORT:
4799 bnx2x_init_internal_port(bp);
4802 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4803 bnx2x_init_internal_func(bp);
4807 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4812 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4816 for_each_queue(bp, i) {
4817 struct bnx2x_fastpath *fp = &bp->fp[i];
4820 fp->state = BNX2X_FP_STATE_CLOSED;
4822 fp->cl_id = BP_L_ID(bp) + i;
4823 fp->sb_id = fp->cl_id;
4825 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4826 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4827 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4829 bnx2x_update_fpsb_idx(fp);
4832 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4834 bnx2x_update_dsb_idx(bp);
4835 bnx2x_update_coalesce(bp);
4836 bnx2x_init_rx_rings(bp);
4837 bnx2x_init_tx_ring(bp);
4838 bnx2x_init_sp_ring(bp);
4839 bnx2x_init_context(bp);
4840 bnx2x_init_internal(bp, load_code);
4841 bnx2x_init_ind_table(bp);
4842 bnx2x_stats_init(bp);
4844 /* At this point, we are ready for interrupts */
4845 atomic_set(&bp->intr_sem, 0);
4847 /* flush all before enabling interrupts */
4851 bnx2x_int_enable(bp);
4854 /* end of nic init */
4857 * gzip service functions
4860 static int bnx2x_gunzip_init(struct bnx2x *bp)
4862 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4863 &bp->gunzip_mapping);
4864 if (bp->gunzip_buf == NULL)
4867 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4868 if (bp->strm == NULL)
4871 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4873 if (bp->strm->workspace == NULL)
4883 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4884 bp->gunzip_mapping);
4885 bp->gunzip_buf = NULL;
4888 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4889 " un-compression\n", bp->dev->name);
4893 static void bnx2x_gunzip_end(struct bnx2x *bp)
4895 kfree(bp->strm->workspace);
4900 if (bp->gunzip_buf) {
4901 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4902 bp->gunzip_mapping);
4903 bp->gunzip_buf = NULL;
4907 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4911 /* check gzip header */
4912 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4919 if (zbuf[3] & FNAME)
4920 while ((zbuf[n++] != 0) && (n < len));
4922 bp->strm->next_in = zbuf + n;
4923 bp->strm->avail_in = len - n;
4924 bp->strm->next_out = bp->gunzip_buf;
4925 bp->strm->avail_out = FW_BUF_SIZE;
4927 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4931 rc = zlib_inflate(bp->strm, Z_FINISH);
4932 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4933 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4934 bp->dev->name, bp->strm->msg);
4936 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4937 if (bp->gunzip_outlen & 0x3)
4938 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4939 " gunzip_outlen (%d) not aligned\n",
4940 bp->dev->name, bp->gunzip_outlen);
4941 bp->gunzip_outlen >>= 2;
4943 zlib_inflateEnd(bp->strm);
4945 if (rc == Z_STREAM_END)
4951 /* nic load/unload */
4954 * General service functions
4957 /* send a NIG loopback debug packet */
4958 static void bnx2x_lb_pckt(struct bnx2x *bp)
4962 /* Ethernet source and destination addresses */
4963 wb_write[0] = 0x55555555;
4964 wb_write[1] = 0x55555555;
4965 wb_write[2] = 0x20; /* SOP */
4966 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4968 /* NON-IP protocol */
4969 wb_write[0] = 0x09000000;
4970 wb_write[1] = 0x55555555;
4971 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4972 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4975 /* some of the internal memories
4976 * are not directly readable from the driver
4977 * to test them we send debug packets
4979 static int bnx2x_int_mem_test(struct bnx2x *bp)
4985 if (CHIP_REV_IS_FPGA(bp))
4987 else if (CHIP_REV_IS_EMUL(bp))
4992 DP(NETIF_MSG_HW, "start part1\n");
4994 /* Disable inputs of parser neighbor blocks */
4995 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4996 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4997 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4998 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5000 /* Write 0 to parser credits for CFC search request */
5001 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5003 /* send Ethernet packet */
5006 /* TODO do i reset NIG statistic? */
5007 /* Wait until NIG register shows 1 packet of size 0x10 */
5008 count = 1000 * factor;
5011 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5012 val = *bnx2x_sp(bp, wb_data[0]);
5020 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5024 /* Wait until PRS register shows 1 packet */
5025 count = 1000 * factor;
5027 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5035 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5039 /* Reset and init BRB, PRS */
5040 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5042 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5044 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5045 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5047 DP(NETIF_MSG_HW, "part2\n");
5049 /* Disable inputs of parser neighbor blocks */
5050 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5051 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5052 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5053 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5055 /* Write 0 to parser credits for CFC search request */
5056 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5058 /* send 10 Ethernet packets */
5059 for (i = 0; i < 10; i++)
5062 /* Wait until NIG register shows 10 + 1
5063 packets of size 11*0x10 = 0xb0 */
5064 count = 1000 * factor;
5067 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5068 val = *bnx2x_sp(bp, wb_data[0]);
5076 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5080 /* Wait until PRS register shows 2 packets */
5081 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5083 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5085 /* Write 1 to parser credits for CFC search request */
5086 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5088 /* Wait until PRS register shows 3 packets */
5089 msleep(10 * factor);
5090 /* Wait until NIG register shows 1 packet of size 0x10 */
5091 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5093 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5095 /* clear NIG EOP FIFO */
5096 for (i = 0; i < 11; i++)
5097 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5098 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5100 BNX2X_ERR("clear of NIG failed\n");
5104 /* Reset and init BRB, PRS, NIG */
5105 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5107 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5109 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5110 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5113 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5116 /* Enable inputs of parser neighbor blocks */
5117 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5118 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5119 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5120 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5122 DP(NETIF_MSG_HW, "done\n");
5127 static void enable_blocks_attention(struct bnx2x *bp)
5129 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5130 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5131 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5132 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5133 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5134 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5135 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5136 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5137 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5138 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5139 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5140 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5141 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5142 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5143 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5144 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5145 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5146 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5147 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5148 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5149 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5150 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5151 if (CHIP_REV_IS_FPGA(bp))
5152 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5154 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5155 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5156 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5157 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5158 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5159 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5160 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5161 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5162 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5163 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5167 static void bnx2x_reset_common(struct bnx2x *bp)
5170 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5172 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5175 static int bnx2x_init_common(struct bnx2x *bp)
5179 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5181 bnx2x_reset_common(bp);
5182 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5183 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5185 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5186 if (CHIP_IS_E1H(bp))
5187 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5189 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5191 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5193 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5194 if (CHIP_IS_E1(bp)) {
5195 /* enable HW interrupt from PXP on USDM overflow
5196 bit 16 on INT_MASK_0 */
5197 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5200 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5204 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5205 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5206 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5207 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5208 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5209 /* make sure this value is 0 */
5210 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5212 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5213 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5214 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5215 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5216 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5219 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5221 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5222 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5223 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5226 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5227 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5229 /* let the HW do it's magic ... */
5231 /* finish PXP init */
5232 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5234 BNX2X_ERR("PXP2 CFG failed\n");
5237 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5239 BNX2X_ERR("PXP2 RD_INIT failed\n");
5243 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5244 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5246 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5248 /* clean the DMAE memory */
5250 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5252 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5253 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5254 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5255 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5257 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5258 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5259 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5260 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5262 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5263 /* soft reset pulse */
5264 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5265 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5268 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5271 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5272 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5273 if (!CHIP_REV_IS_SLOW(bp)) {
5274 /* enable hw interrupt from doorbell Q */
5275 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5278 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5279 if (CHIP_REV_IS_SLOW(bp)) {
5280 /* fix for emulation and FPGA for no pause */
5281 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5282 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5283 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5284 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5287 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5288 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5290 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5291 if (CHIP_IS_E1H(bp))
5292 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5294 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5295 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5296 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5297 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5299 if (CHIP_IS_E1H(bp)) {
5300 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5301 STORM_INTMEM_SIZE_E1H/2);
5303 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5304 0, STORM_INTMEM_SIZE_E1H/2);
5305 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5306 STORM_INTMEM_SIZE_E1H/2);
5308 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5309 0, STORM_INTMEM_SIZE_E1H/2);
5310 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5311 STORM_INTMEM_SIZE_E1H/2);
5313 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5314 0, STORM_INTMEM_SIZE_E1H/2);
5315 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5316 STORM_INTMEM_SIZE_E1H/2);
5318 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5319 0, STORM_INTMEM_SIZE_E1H/2);
5321 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5322 STORM_INTMEM_SIZE_E1);
5323 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5324 STORM_INTMEM_SIZE_E1);
5325 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5326 STORM_INTMEM_SIZE_E1);
5327 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5328 STORM_INTMEM_SIZE_E1);
5331 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5332 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5333 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5334 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5337 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5339 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5342 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5343 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5344 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5346 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5347 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5348 REG_WR(bp, i, 0xc0cac01a);
5349 /* TODO: replace with something meaningful */
5351 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5352 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5354 if (sizeof(union cdu_context) != 1024)
5355 /* we currently assume that a context is 1024 bytes */
5356 printk(KERN_ALERT PFX "please adjust the size of"
5357 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5359 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5360 val = (4 << 24) + (0 << 12) + 1024;
5361 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5362 if (CHIP_IS_E1(bp)) {
5363 /* !!! fix pxp client crdit until excel update */
5364 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5365 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5368 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5369 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5370 /* enable context validation interrupt from CFC */
5371 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5373 /* set the thresholds to prevent CFC/CDU race */
5374 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5376 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5377 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5379 /* PXPCS COMMON comes here */
5380 /* Reset PCIE errors for debug */
5381 REG_WR(bp, 0x2814, 0xffffffff);
5382 REG_WR(bp, 0x3820, 0xffffffff);
5384 /* EMAC0 COMMON comes here */
5385 /* EMAC1 COMMON comes here */
5386 /* DBU COMMON comes here */
5387 /* DBG COMMON comes here */
5389 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5390 if (CHIP_IS_E1H(bp)) {
5391 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5392 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5395 if (CHIP_REV_IS_SLOW(bp))
5398 /* finish CFC init */
5399 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5401 BNX2X_ERR("CFC LL_INIT failed\n");
5404 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5406 BNX2X_ERR("CFC AC_INIT failed\n");
5409 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5411 BNX2X_ERR("CFC CAM_INIT failed\n");
5414 REG_WR(bp, CFC_REG_DEBUG0, 0);
5416 /* read NIG statistic
5417 to see if this is our first up since powerup */
5418 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5419 val = *bnx2x_sp(bp, wb_data[0]);
5421 /* do internal memory self test */
5422 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5423 BNX2X_ERR("internal mem self test failed\n");
5427 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5428 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5429 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5430 /* Fan failure is indicated by SPIO 5 */
5431 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5432 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5434 /* set to active low mode */
5435 val = REG_RD(bp, MISC_REG_SPIO_INT);
5436 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5437 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5438 REG_WR(bp, MISC_REG_SPIO_INT, val);
5440 /* enable interrupt to signal the IGU */
5441 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5442 val |= (1 << MISC_REGISTERS_SPIO_5);
5443 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5450 /* clear PXP2 attentions */
5451 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5453 enable_blocks_attention(bp);
5455 if (!BP_NOMCP(bp)) {
5456 bnx2x_acquire_phy_lock(bp);
5457 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5458 bnx2x_release_phy_lock(bp);
5460 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5465 static int bnx2x_init_port(struct bnx2x *bp)
5467 int port = BP_PORT(bp);
5470 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5472 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5474 /* Port PXP comes here */
5475 /* Port PXP2 comes here */
5480 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5481 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5482 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5483 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5488 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5489 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5490 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5491 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5496 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5497 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5498 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5499 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5501 /* Port CMs come here */
5502 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5503 (port ? XCM_PORT1_END : XCM_PORT0_END));
5505 /* Port QM comes here */
5507 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5508 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5510 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5511 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5513 /* Port DQ comes here */
5514 /* Port BRB1 comes here */
5515 /* Port PRS comes here */
5516 /* Port TSDM comes here */
5517 /* Port CSDM comes here */
5518 /* Port USDM comes here */
5519 /* Port XSDM comes here */
5520 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5521 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5522 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5523 port ? USEM_PORT1_END : USEM_PORT0_END);
5524 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5525 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5526 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5527 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5528 /* Port UPB comes here */
5529 /* Port XPB comes here */
5531 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5532 port ? PBF_PORT1_END : PBF_PORT0_END);
5534 /* configure PBF to work without PAUSE mtu 9000 */
5535 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5537 /* update threshold */
5538 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5539 /* update init credit */
5540 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5543 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5545 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5548 /* tell the searcher where the T2 table is */
5549 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5551 wb_write[0] = U64_LO(bp->t2_mapping);
5552 wb_write[1] = U64_HI(bp->t2_mapping);
5553 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5554 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5555 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5556 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5558 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5559 /* Port SRCH comes here */
5561 /* Port CDU comes here */
5562 /* Port CFC comes here */
5564 if (CHIP_IS_E1(bp)) {
5565 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5566 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5568 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5569 port ? HC_PORT1_END : HC_PORT0_END);
5571 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5572 MISC_AEU_PORT0_START,
5573 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5574 /* init aeu_mask_attn_func_0/1:
5575 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5576 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5577 * bits 4-7 are used for "per vn group attention" */
5578 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5579 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5581 /* Port PXPCS comes here */
5582 /* Port EMAC0 comes here */
5583 /* Port EMAC1 comes here */
5584 /* Port DBU comes here */
5585 /* Port DBG comes here */
5586 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5587 port ? NIG_PORT1_END : NIG_PORT0_END);
5589 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5591 if (CHIP_IS_E1H(bp)) {
5593 struct cmng_struct_per_port m_cmng_port;
5596 /* 0x2 disable e1hov, 0x1 enable */
5597 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5598 (IS_E1HMF(bp) ? 0x1 : 0x2));
5600 /* Init RATE SHAPING and FAIRNESS contexts.
5601 Initialize as if there is 10G link. */
5602 wsum = bnx2x_calc_vn_wsum(bp);
5603 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5605 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5606 bnx2x_init_vn_minmax(bp, 2*vn + port,
5607 wsum, 10000, &m_cmng_port);
5610 /* Port MCP comes here */
5611 /* Port DMAE comes here */
5613 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5614 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5615 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5616 /* add SPIO 5 to group 0 */
5617 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5618 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5619 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5626 bnx2x__link_reset(bp);
5631 #define ILT_PER_FUNC (768/2)
5632 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5633 /* the phys address is shifted right 12 bits and has an added
5634 1=valid bit added to the 53rd bit
5635 then since this is a wide register(TM)
5636 we split it into two 32 bit writes
5638 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5639 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5640 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5641 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5643 #define CNIC_ILT_LINES 0
5645 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5649 if (CHIP_IS_E1H(bp))
5650 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5652 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5654 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5657 static int bnx2x_init_func(struct bnx2x *bp)
5659 int port = BP_PORT(bp);
5660 int func = BP_FUNC(bp);
5664 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5666 /* set MSI reconfigure capability */
5667 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5668 val = REG_RD(bp, addr);
5669 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5670 REG_WR(bp, addr, val);
5672 i = FUNC_ILT_BASE(func);
5674 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5675 if (CHIP_IS_E1H(bp)) {
5676 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5677 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5679 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5680 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5683 if (CHIP_IS_E1H(bp)) {
5684 for (i = 0; i < 9; i++)
5685 bnx2x_init_block(bp,
5686 cm_start[func][i], cm_end[func][i]);
5688 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5689 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5692 /* HC init per function */
5693 if (CHIP_IS_E1H(bp)) {
5694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5696 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5697 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5699 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5701 /* Reset PCIE errors for debug */
5702 REG_WR(bp, 0x2114, 0xffffffff);
5703 REG_WR(bp, 0x2120, 0xffffffff);
5708 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5712 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5713 BP_FUNC(bp), load_code);
5716 mutex_init(&bp->dmae_mutex);
5717 bnx2x_gunzip_init(bp);
5719 switch (load_code) {
5720 case FW_MSG_CODE_DRV_LOAD_COMMON:
5721 rc = bnx2x_init_common(bp);
5726 case FW_MSG_CODE_DRV_LOAD_PORT:
5728 rc = bnx2x_init_port(bp);
5733 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5735 rc = bnx2x_init_func(bp);
5741 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5745 if (!BP_NOMCP(bp)) {
5746 int func = BP_FUNC(bp);
5748 bp->fw_drv_pulse_wr_seq =
5749 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5750 DRV_PULSE_SEQ_MASK);
5751 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5752 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5753 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5757 /* this needs to be done before gunzip end */
5758 bnx2x_zero_def_sb(bp);
5759 for_each_queue(bp, i)
5760 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5763 bnx2x_gunzip_end(bp);
5768 /* send the MCP a request, block until there is a reply */
5769 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5771 int func = BP_FUNC(bp);
5772 u32 seq = ++bp->fw_seq;
5775 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5777 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5778 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5781 /* let the FW do it's magic ... */
5784 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5786 /* Give the FW up to 2 second (200*10ms) */
5787 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5789 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5790 cnt*delay, rc, seq);
5792 /* is this a reply to our command? */
5793 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5794 rc &= FW_MSG_CODE_MASK;
5798 BNX2X_ERR("FW failed to respond!\n");
5806 static void bnx2x_free_mem(struct bnx2x *bp)
5809 #define BNX2X_PCI_FREE(x, y, size) \
5812 pci_free_consistent(bp->pdev, size, x, y); \
5818 #define BNX2X_FREE(x) \
5830 for_each_queue(bp, i) {
5833 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5834 bnx2x_fp(bp, i, status_blk_mapping),
5835 sizeof(struct host_status_block) +
5836 sizeof(struct eth_tx_db_data));
5839 for_each_rx_queue(bp, i) {
5841 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5842 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5843 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5844 bnx2x_fp(bp, i, rx_desc_mapping),
5845 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5847 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5848 bnx2x_fp(bp, i, rx_comp_mapping),
5849 sizeof(struct eth_fast_path_rx_cqe) *
5853 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5854 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5855 bnx2x_fp(bp, i, rx_sge_mapping),
5856 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5859 for_each_tx_queue(bp, i) {
5861 /* fastpath tx rings: tx_buf tx_desc */
5862 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5863 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5864 bnx2x_fp(bp, i, tx_desc_mapping),
5865 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5867 /* end of fastpath */
5869 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5870 sizeof(struct host_def_status_block));
5872 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5873 sizeof(struct bnx2x_slowpath));
5876 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5877 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5878 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5879 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5881 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5883 #undef BNX2X_PCI_FREE
5887 static int bnx2x_alloc_mem(struct bnx2x *bp)
5890 #define BNX2X_PCI_ALLOC(x, y, size) \
5892 x = pci_alloc_consistent(bp->pdev, size, y); \
5894 goto alloc_mem_err; \
5895 memset(x, 0, size); \
5898 #define BNX2X_ALLOC(x, size) \
5900 x = vmalloc(size); \
5902 goto alloc_mem_err; \
5903 memset(x, 0, size); \
5910 for_each_queue(bp, i) {
5911 bnx2x_fp(bp, i, bp) = bp;
5914 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5915 &bnx2x_fp(bp, i, status_blk_mapping),
5916 sizeof(struct host_status_block) +
5917 sizeof(struct eth_tx_db_data));
5920 for_each_rx_queue(bp, i) {
5922 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5923 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5924 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5925 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5926 &bnx2x_fp(bp, i, rx_desc_mapping),
5927 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5929 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5930 &bnx2x_fp(bp, i, rx_comp_mapping),
5931 sizeof(struct eth_fast_path_rx_cqe) *
5935 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5936 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5937 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5938 &bnx2x_fp(bp, i, rx_sge_mapping),
5939 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5942 for_each_tx_queue(bp, i) {
5944 bnx2x_fp(bp, i, hw_tx_prods) =
5945 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5947 bnx2x_fp(bp, i, tx_prods_mapping) =
5948 bnx2x_fp(bp, i, status_blk_mapping) +
5949 sizeof(struct host_status_block);
5951 /* fastpath tx rings: tx_buf tx_desc */
5952 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5953 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5954 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5955 &bnx2x_fp(bp, i, tx_desc_mapping),
5956 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5958 /* end of fastpath */
5960 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5961 sizeof(struct host_def_status_block));
5963 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5964 sizeof(struct bnx2x_slowpath));
5967 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5970 for (i = 0; i < 64*1024; i += 64) {
5971 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5972 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5975 /* allocate searcher T2 table
5976 we allocate 1/4 of alloc num for T2
5977 (which is not entered into the ILT) */
5978 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5981 for (i = 0; i < 16*1024; i += 64)
5982 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5984 /* now fixup the last line in the block to point to the next block */
5985 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5987 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5988 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5990 /* QM queues (128*MAX_CONN) */
5991 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5994 /* Slow path ring */
5995 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6003 #undef BNX2X_PCI_ALLOC
6007 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6011 for_each_tx_queue(bp, i) {
6012 struct bnx2x_fastpath *fp = &bp->fp[i];
6014 u16 bd_cons = fp->tx_bd_cons;
6015 u16 sw_prod = fp->tx_pkt_prod;
6016 u16 sw_cons = fp->tx_pkt_cons;
6018 while (sw_cons != sw_prod) {
6019 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6025 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6029 for_each_rx_queue(bp, j) {
6030 struct bnx2x_fastpath *fp = &bp->fp[j];
6032 for (i = 0; i < NUM_RX_BD; i++) {
6033 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6034 struct sk_buff *skb = rx_buf->skb;
6039 pci_unmap_single(bp->pdev,
6040 pci_unmap_addr(rx_buf, mapping),
6042 PCI_DMA_FROMDEVICE);
6047 if (!fp->disable_tpa)
6048 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6049 ETH_MAX_AGGREGATION_QUEUES_E1 :
6050 ETH_MAX_AGGREGATION_QUEUES_E1H);
6054 static void bnx2x_free_skbs(struct bnx2x *bp)
6056 bnx2x_free_tx_skbs(bp);
6057 bnx2x_free_rx_skbs(bp);
6060 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6064 free_irq(bp->msix_table[0].vector, bp->dev);
6065 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6066 bp->msix_table[0].vector);
6068 for_each_queue(bp, i) {
6069 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6070 "state %x\n", i, bp->msix_table[i + offset].vector,
6071 bnx2x_fp(bp, i, state));
6073 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6077 static void bnx2x_free_irq(struct bnx2x *bp)
6079 if (bp->flags & USING_MSIX_FLAG) {
6080 bnx2x_free_msix_irqs(bp);
6081 pci_disable_msix(bp->pdev);
6082 bp->flags &= ~USING_MSIX_FLAG;
6084 } else if (bp->flags & USING_MSI_FLAG) {
6085 free_irq(bp->pdev->irq, bp->dev);
6086 pci_disable_msi(bp->pdev);
6087 bp->flags &= ~USING_MSI_FLAG;
6090 free_irq(bp->pdev->irq, bp->dev);
6093 static int bnx2x_enable_msix(struct bnx2x *bp)
6095 int i, rc, offset = 1;
6098 bp->msix_table[0].entry = igu_vec;
6099 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6101 for_each_queue(bp, i) {
6102 igu_vec = BP_L_ID(bp) + offset + i;
6103 bp->msix_table[i + offset].entry = igu_vec;
6104 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6105 "(fastpath #%u)\n", i + offset, igu_vec, i);
6108 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6109 BNX2X_NUM_QUEUES(bp) + offset);
6111 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6115 bp->flags |= USING_MSIX_FLAG;
6120 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6122 int i, rc, offset = 1;
6124 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6125 bp->dev->name, bp->dev);
6127 BNX2X_ERR("request sp irq failed\n");
6131 for_each_queue(bp, i) {
6132 struct bnx2x_fastpath *fp = &bp->fp[i];
6134 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6135 rc = request_irq(bp->msix_table[i + offset].vector,
6136 bnx2x_msix_fp_int, 0, fp->name, fp);
6138 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6139 bnx2x_free_msix_irqs(bp);
6143 fp->state = BNX2X_FP_STATE_IRQ;
6146 i = BNX2X_NUM_QUEUES(bp);
6148 printk(KERN_INFO PFX
6149 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6150 bp->dev->name, bp->msix_table[0].vector,
6151 bp->msix_table[offset].vector,
6152 bp->msix_table[offset + i - 1].vector);
6154 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6155 bp->dev->name, bp->msix_table[0].vector,
6156 bp->msix_table[offset + i - 1].vector);
6161 static int bnx2x_enable_msi(struct bnx2x *bp)
6165 rc = pci_enable_msi(bp->pdev);
6167 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6170 bp->flags |= USING_MSI_FLAG;
6175 static int bnx2x_req_irq(struct bnx2x *bp)
6177 unsigned long flags;
6180 if (bp->flags & USING_MSI_FLAG)
6183 flags = IRQF_SHARED;
6185 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6186 bp->dev->name, bp->dev);
6188 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6193 static void bnx2x_napi_enable(struct bnx2x *bp)
6197 for_each_rx_queue(bp, i)
6198 napi_enable(&bnx2x_fp(bp, i, napi));
6201 static void bnx2x_napi_disable(struct bnx2x *bp)
6205 for_each_rx_queue(bp, i)
6206 napi_disable(&bnx2x_fp(bp, i, napi));
6209 static void bnx2x_netif_start(struct bnx2x *bp)
6211 if (atomic_dec_and_test(&bp->intr_sem)) {
6212 if (netif_running(bp->dev)) {
6213 bnx2x_napi_enable(bp);
6214 bnx2x_int_enable(bp);
6215 if (bp->state == BNX2X_STATE_OPEN)
6216 netif_tx_wake_all_queues(bp->dev);
6221 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6223 bnx2x_int_disable_sync(bp, disable_hw);
6224 bnx2x_napi_disable(bp);
6225 if (netif_running(bp->dev)) {
6226 netif_tx_disable(bp->dev);
6227 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6232 * Init service functions
6235 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6237 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6238 int port = BP_PORT(bp);
6241 * unicasts 0-31:port0 32-63:port1
6242 * multicast 64-127:port0 128-191:port1
6244 config->hdr.length = 2;
6245 config->hdr.offset = port ? 32 : 0;
6246 config->hdr.client_id = BP_CL_ID(bp);
6247 config->hdr.reserved1 = 0;
6250 config->config_table[0].cam_entry.msb_mac_addr =
6251 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6252 config->config_table[0].cam_entry.middle_mac_addr =
6253 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6254 config->config_table[0].cam_entry.lsb_mac_addr =
6255 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6256 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6258 config->config_table[0].target_table_entry.flags = 0;
6260 CAM_INVALIDATE(config->config_table[0]);
6261 config->config_table[0].target_table_entry.client_id = 0;
6262 config->config_table[0].target_table_entry.vlan_id = 0;
6264 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6265 (set ? "setting" : "clearing"),
6266 config->config_table[0].cam_entry.msb_mac_addr,
6267 config->config_table[0].cam_entry.middle_mac_addr,
6268 config->config_table[0].cam_entry.lsb_mac_addr);
6271 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6272 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6273 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6274 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6276 config->config_table[1].target_table_entry.flags =
6277 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6279 CAM_INVALIDATE(config->config_table[1]);
6280 config->config_table[1].target_table_entry.client_id = 0;
6281 config->config_table[1].target_table_entry.vlan_id = 0;
6283 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6284 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6285 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6288 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6290 struct mac_configuration_cmd_e1h *config =
6291 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6293 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6294 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6298 /* CAM allocation for E1H
6299 * unicasts: by func number
6300 * multicast: 20+FUNC*20, 20 each
6302 config->hdr.length = 1;
6303 config->hdr.offset = BP_FUNC(bp);
6304 config->hdr.client_id = BP_CL_ID(bp);
6305 config->hdr.reserved1 = 0;
6308 config->config_table[0].msb_mac_addr =
6309 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6310 config->config_table[0].middle_mac_addr =
6311 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6312 config->config_table[0].lsb_mac_addr =
6313 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6314 config->config_table[0].client_id = BP_L_ID(bp);
6315 config->config_table[0].vlan_id = 0;
6316 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6318 config->config_table[0].flags = BP_PORT(bp);
6320 config->config_table[0].flags =
6321 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6323 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6324 (set ? "setting" : "clearing"),
6325 config->config_table[0].msb_mac_addr,
6326 config->config_table[0].middle_mac_addr,
6327 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6329 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6330 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6331 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6334 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6335 int *state_p, int poll)
6337 /* can take a while if any port is running */
6340 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6341 poll ? "polling" : "waiting", state, idx);
6346 bnx2x_rx_int(bp->fp, 10);
6347 /* if index is different from 0
6348 * the reply for some commands will
6349 * be on the non default queue
6352 bnx2x_rx_int(&bp->fp[idx], 10);
6355 mb(); /* state is changed by bnx2x_sp_event() */
6356 if (*state_p == state)
6363 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6364 poll ? "polling" : "waiting", state, idx);
6365 #ifdef BNX2X_STOP_ON_ERROR
6372 static int bnx2x_setup_leading(struct bnx2x *bp)
6376 /* reset IGU state */
6377 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6380 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6382 /* Wait for completion */
6383 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6388 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6390 struct bnx2x_fastpath *fp = &bp->fp[index];
6392 /* reset IGU state */
6393 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6396 fp->state = BNX2X_FP_STATE_OPENING;
6397 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6400 /* Wait for completion */
6401 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6405 static int bnx2x_poll(struct napi_struct *napi, int budget);
6407 static void bnx2x_set_int_mode(struct bnx2x *bp)
6415 bp->num_rx_queues = num_queues;
6416 bp->num_tx_queues = num_queues;
6418 "set number of queues to %d\n", num_queues);
6423 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6424 num_queues = min_t(u32, num_online_cpus(),
6425 BNX2X_MAX_QUEUES(bp));
6428 bp->num_rx_queues = num_queues;
6429 bp->num_tx_queues = num_queues;
6430 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6431 " number of tx queues to %d\n",
6432 bp->num_rx_queues, bp->num_tx_queues);
6433 /* if we can't use MSI-X we only need one fp,
6434 * so try to enable MSI-X with the requested number of fp's
6435 * and fallback to MSI or legacy INTx with one fp
6437 if (bnx2x_enable_msix(bp)) {
6438 /* failed to enable MSI-X */
6440 bp->num_rx_queues = num_queues;
6441 bp->num_tx_queues = num_queues;
6443 BNX2X_ERR("Multi requested but failed to "
6444 "enable MSI-X set number of "
6445 "queues to %d\n", num_queues);
6449 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6452 static void bnx2x_set_rx_mode(struct net_device *dev);
6454 /* must be called with rtnl_lock */
6455 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6459 #ifdef BNX2X_STOP_ON_ERROR
6460 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6461 if (unlikely(bp->panic))
6465 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6467 bnx2x_set_int_mode(bp);
6469 if (bnx2x_alloc_mem(bp))
6472 for_each_rx_queue(bp, i)
6473 bnx2x_fp(bp, i, disable_tpa) =
6474 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6476 for_each_rx_queue(bp, i)
6477 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6480 #ifdef BNX2X_STOP_ON_ERROR
6481 for_each_rx_queue(bp, i) {
6482 struct bnx2x_fastpath *fp = &bp->fp[i];
6484 fp->poll_no_work = 0;
6486 fp->poll_max_calls = 0;
6487 fp->poll_complete = 0;
6491 bnx2x_napi_enable(bp);
6493 if (bp->flags & USING_MSIX_FLAG) {
6494 rc = bnx2x_req_msix_irqs(bp);
6496 pci_disable_msix(bp->pdev);
6500 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6501 bnx2x_enable_msi(bp);
6503 rc = bnx2x_req_irq(bp);
6505 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6506 if (bp->flags & USING_MSI_FLAG)
6507 pci_disable_msi(bp->pdev);
6510 if (bp->flags & USING_MSI_FLAG) {
6511 bp->dev->irq = bp->pdev->irq;
6512 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6513 bp->dev->name, bp->pdev->irq);
6517 /* Send LOAD_REQUEST command to MCP
6518 Returns the type of LOAD command:
6519 if it is the first port to be initialized
6520 common blocks should be initialized, otherwise - not
6522 if (!BP_NOMCP(bp)) {
6523 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6525 BNX2X_ERR("MCP response failure, aborting\n");
6529 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6530 rc = -EBUSY; /* other port in diagnostic mode */
6535 int port = BP_PORT(bp);
6537 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6538 load_count[0], load_count[1], load_count[2]);
6540 load_count[1 + port]++;
6541 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6542 load_count[0], load_count[1], load_count[2]);
6543 if (load_count[0] == 1)
6544 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6545 else if (load_count[1 + port] == 1)
6546 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6548 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6551 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6552 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6556 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6559 rc = bnx2x_init_hw(bp, load_code);
6561 BNX2X_ERR("HW init failed, aborting\n");
6565 /* Setup NIC internals and enable interrupts */
6566 bnx2x_nic_init(bp, load_code);
6568 /* Send LOAD_DONE command to MCP */
6569 if (!BP_NOMCP(bp)) {
6570 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6572 BNX2X_ERR("MCP response failure, aborting\n");
6578 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6580 rc = bnx2x_setup_leading(bp);
6582 BNX2X_ERR("Setup leading failed!\n");
6586 if (CHIP_IS_E1H(bp))
6587 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6588 BNX2X_ERR("!!! mf_cfg function disabled\n");
6589 bp->state = BNX2X_STATE_DISABLED;
6592 if (bp->state == BNX2X_STATE_OPEN)
6593 for_each_nondefault_queue(bp, i) {
6594 rc = bnx2x_setup_multi(bp, i);
6600 bnx2x_set_mac_addr_e1(bp, 1);
6602 bnx2x_set_mac_addr_e1h(bp, 1);
6605 bnx2x_initial_phy_init(bp);
6607 /* Start fast path */
6608 switch (load_mode) {
6610 /* Tx queue should be only reenabled */
6611 netif_tx_wake_all_queues(bp->dev);
6612 /* Initialize the receive filter. */
6613 bnx2x_set_rx_mode(bp->dev);
6617 netif_tx_start_all_queues(bp->dev);
6618 /* Initialize the receive filter. */
6619 bnx2x_set_rx_mode(bp->dev);
6623 /* Initialize the receive filter. */
6624 bnx2x_set_rx_mode(bp->dev);
6625 bp->state = BNX2X_STATE_DIAG;
6633 bnx2x__link_status_update(bp);
6635 /* start the timer */
6636 mod_timer(&bp->timer, jiffies + bp->current_interval);
6642 bnx2x_int_disable_sync(bp, 1);
6643 if (!BP_NOMCP(bp)) {
6644 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6645 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6648 /* Free SKBs, SGEs, TPA pool and driver internals */
6649 bnx2x_free_skbs(bp);
6650 for_each_rx_queue(bp, i)
6651 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6656 bnx2x_napi_disable(bp);
6657 for_each_rx_queue(bp, i)
6658 netif_napi_del(&bnx2x_fp(bp, i, napi));
6661 /* TBD we really need to reset the chip
6662 if we want to recover from this */
6666 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6668 struct bnx2x_fastpath *fp = &bp->fp[index];
6671 /* halt the connection */
6672 fp->state = BNX2X_FP_STATE_HALTING;
6673 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
6675 /* Wait for completion */
6676 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6678 if (rc) /* timeout */
6681 /* delete cfc entry */
6682 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6684 /* Wait for completion */
6685 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6690 static int bnx2x_stop_leading(struct bnx2x *bp)
6692 u16 dsb_sp_prod_idx;
6693 /* if the other port is handling traffic,
6694 this can take a lot of time */
6700 /* Send HALT ramrod */
6701 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6702 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6704 /* Wait for completion */
6705 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6706 &(bp->fp[0].state), 1);
6707 if (rc) /* timeout */
6710 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6712 /* Send PORT_DELETE ramrod */
6713 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6715 /* Wait for completion to arrive on default status block
6716 we are going to reset the chip anyway
6717 so there is not much to do if this times out
6719 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6721 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6722 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6723 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6724 #ifdef BNX2X_STOP_ON_ERROR
6733 rmb(); /* Refresh the dsb_sp_prod */
6735 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6736 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6741 static void bnx2x_reset_func(struct bnx2x *bp)
6743 int port = BP_PORT(bp);
6744 int func = BP_FUNC(bp);
6748 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6749 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6752 base = FUNC_ILT_BASE(func);
6753 for (i = base; i < base + ILT_PER_FUNC; i++)
6754 bnx2x_ilt_wr(bp, i, 0);
6757 static void bnx2x_reset_port(struct bnx2x *bp)
6759 int port = BP_PORT(bp);
6762 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6764 /* Do not rcv packets to BRB */
6765 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6766 /* Do not direct rcv packets that are not for MCP to the BRB */
6767 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6768 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6771 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6774 /* Check for BRB port occupancy */
6775 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6777 DP(NETIF_MSG_IFDOWN,
6778 "BRB1 is not empty %d blocks are occupied\n", val);
6780 /* TODO: Close Doorbell port? */
6783 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6785 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6786 BP_FUNC(bp), reset_code);
6788 switch (reset_code) {
6789 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6790 bnx2x_reset_port(bp);
6791 bnx2x_reset_func(bp);
6792 bnx2x_reset_common(bp);
6795 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6796 bnx2x_reset_port(bp);
6797 bnx2x_reset_func(bp);
6800 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6801 bnx2x_reset_func(bp);
6805 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6810 /* must be called with rtnl_lock */
6811 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6813 int port = BP_PORT(bp);
6817 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6819 bp->rx_mode = BNX2X_RX_MODE_NONE;
6820 bnx2x_set_storm_rx_mode(bp);
6822 bnx2x_netif_stop(bp, 1);
6824 del_timer_sync(&bp->timer);
6825 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6826 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6827 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6832 /* Wait until tx fastpath tasks complete */
6833 for_each_tx_queue(bp, i) {
6834 struct bnx2x_fastpath *fp = &bp->fp[i];
6838 while (bnx2x_has_tx_work_unload(fp)) {
6840 bnx2x_tx_int(fp, 1000);
6842 BNX2X_ERR("timeout waiting for queue[%d]\n",
6844 #ifdef BNX2X_STOP_ON_ERROR
6856 /* Give HW time to discard old tx messages */
6859 if (CHIP_IS_E1(bp)) {
6860 struct mac_configuration_cmd *config =
6861 bnx2x_sp(bp, mcast_config);
6863 bnx2x_set_mac_addr_e1(bp, 0);
6865 for (i = 0; i < config->hdr.length; i++)
6866 CAM_INVALIDATE(config->config_table[i]);
6868 config->hdr.length = i;
6869 if (CHIP_REV_IS_SLOW(bp))
6870 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6872 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6873 config->hdr.client_id = BP_CL_ID(bp);
6874 config->hdr.reserved1 = 0;
6876 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6877 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6878 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6881 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6883 bnx2x_set_mac_addr_e1h(bp, 0);
6885 for (i = 0; i < MC_HASH_SIZE; i++)
6886 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6889 if (unload_mode == UNLOAD_NORMAL)
6890 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6892 else if (bp->flags & NO_WOL_FLAG) {
6893 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6894 if (CHIP_IS_E1H(bp))
6895 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6897 } else if (bp->wol) {
6898 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6899 u8 *mac_addr = bp->dev->dev_addr;
6901 /* The mac address is written to entries 1-4 to
6902 preserve entry 0 which is used by the PMF */
6903 u8 entry = (BP_E1HVN(bp) + 1)*8;
6905 val = (mac_addr[0] << 8) | mac_addr[1];
6906 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6908 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6909 (mac_addr[4] << 8) | mac_addr[5];
6910 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6912 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6915 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6917 /* Close multi and leading connections
6918 Completions for ramrods are collected in a synchronous way */
6919 for_each_nondefault_queue(bp, i)
6920 if (bnx2x_stop_multi(bp, i))
6923 rc = bnx2x_stop_leading(bp);
6925 BNX2X_ERR("Stop leading failed!\n");
6926 #ifdef BNX2X_STOP_ON_ERROR
6935 reset_code = bnx2x_fw_command(bp, reset_code);
6937 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6938 load_count[0], load_count[1], load_count[2]);
6940 load_count[1 + port]--;
6941 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6942 load_count[0], load_count[1], load_count[2]);
6943 if (load_count[0] == 0)
6944 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6945 else if (load_count[1 + port] == 0)
6946 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6948 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6951 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6952 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6953 bnx2x__link_reset(bp);
6955 /* Reset the chip */
6956 bnx2x_reset_chip(bp, reset_code);
6958 /* Report UNLOAD_DONE to MCP */
6960 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6963 /* Free SKBs, SGEs, TPA pool and driver internals */
6964 bnx2x_free_skbs(bp);
6965 for_each_rx_queue(bp, i)
6966 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6967 for_each_rx_queue(bp, i)
6968 netif_napi_del(&bnx2x_fp(bp, i, napi));
6971 bp->state = BNX2X_STATE_CLOSED;
6973 netif_carrier_off(bp->dev);
6978 static void bnx2x_reset_task(struct work_struct *work)
6980 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6982 #ifdef BNX2X_STOP_ON_ERROR
6983 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6984 " so reset not done to allow debug dump,\n"
6985 KERN_ERR " you will need to reboot when done\n");
6991 if (!netif_running(bp->dev))
6992 goto reset_task_exit;
6994 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6995 bnx2x_nic_load(bp, LOAD_NORMAL);
7001 /* end of nic load/unload */
7006 * Init service functions
7009 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7012 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7013 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7014 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7015 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7016 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7017 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7018 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7019 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7021 BNX2X_ERR("Unsupported function index: %d\n", func);
7026 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7028 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7030 /* Flush all outstanding writes */
7033 /* Pretend to be function 0 */
7035 /* Flush the GRC transaction (in the chip) */
7036 new_val = REG_RD(bp, reg);
7038 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7043 /* From now we are in the "like-E1" mode */
7044 bnx2x_int_disable(bp);
7046 /* Flush all outstanding writes */
7049 /* Restore the original funtion settings */
7050 REG_WR(bp, reg, orig_func);
7051 new_val = REG_RD(bp, reg);
7052 if (new_val != orig_func) {
7053 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7054 orig_func, new_val);
7059 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7061 if (CHIP_IS_E1H(bp))
7062 bnx2x_undi_int_disable_e1h(bp, func);
7064 bnx2x_int_disable(bp);
7067 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7071 /* Check if there is any driver already loaded */
7072 val = REG_RD(bp, MISC_REG_UNPREPARED);
7074 /* Check if it is the UNDI driver
7075 * UNDI driver initializes CID offset for normal bell to 0x7
7077 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7078 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7080 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7082 int func = BP_FUNC(bp);
7086 /* clear the UNDI indication */
7087 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7089 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7091 /* try unload UNDI on port 0 */
7094 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7095 DRV_MSG_SEQ_NUMBER_MASK);
7096 reset_code = bnx2x_fw_command(bp, reset_code);
7098 /* if UNDI is loaded on the other port */
7099 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7101 /* send "DONE" for previous unload */
7102 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7104 /* unload UNDI on port 1 */
7107 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7108 DRV_MSG_SEQ_NUMBER_MASK);
7109 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7111 bnx2x_fw_command(bp, reset_code);
7114 /* now it's safe to release the lock */
7115 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7117 bnx2x_undi_int_disable(bp, func);
7119 /* close input traffic and wait for it */
7120 /* Do not rcv packets to BRB */
7122 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7123 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7124 /* Do not direct rcv packets that are not for MCP to
7127 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7128 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7131 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7132 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7135 /* save NIG port swap info */
7136 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7137 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7140 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7143 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7145 /* take the NIG out of reset and restore swap values */
7147 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7148 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7149 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7150 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7152 /* send unload done to the MCP */
7153 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7155 /* restore our func and fw_seq */
7158 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7159 DRV_MSG_SEQ_NUMBER_MASK);
7162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7166 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7168 u32 val, val2, val3, val4, id;
7171 /* Get the chip revision id and number. */
7172 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7173 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7174 id = ((val & 0xffff) << 16);
7175 val = REG_RD(bp, MISC_REG_CHIP_REV);
7176 id |= ((val & 0xf) << 12);
7177 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7178 id |= ((val & 0xff) << 4);
7179 val = REG_RD(bp, MISC_REG_BOND_ID);
7181 bp->common.chip_id = id;
7182 bp->link_params.chip_id = bp->common.chip_id;
7183 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7185 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7186 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7187 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7188 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7189 bp->common.flash_size, bp->common.flash_size);
7191 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7192 bp->link_params.shmem_base = bp->common.shmem_base;
7193 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7195 if (!bp->common.shmem_base ||
7196 (bp->common.shmem_base < 0xA0000) ||
7197 (bp->common.shmem_base >= 0xC0000)) {
7198 BNX2X_DEV_INFO("MCP not active\n");
7199 bp->flags |= NO_MCP_FLAG;
7203 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7204 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7205 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7206 BNX2X_ERR("BAD MCP validity signature\n");
7208 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7209 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7211 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7212 bp->common.hw_config, bp->common.board);
7214 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7215 SHARED_HW_CFG_LED_MODE_MASK) >>
7216 SHARED_HW_CFG_LED_MODE_SHIFT);
7218 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7219 bp->common.bc_ver = val;
7220 BNX2X_DEV_INFO("bc_ver %X\n", val);
7221 if (val < BNX2X_BC_VER) {
7222 /* for now only warn
7223 * later we might need to enforce this */
7224 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7225 " please upgrade BC\n", BNX2X_BC_VER, val);
7228 if (BP_E1HVN(bp) == 0) {
7229 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7230 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7232 /* no WOL capability for E1HVN != 0 */
7233 bp->flags |= NO_WOL_FLAG;
7235 BNX2X_DEV_INFO("%sWoL capable\n",
7236 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7238 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7239 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7240 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7241 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7243 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7244 val, val2, val3, val4);
7247 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7250 int port = BP_PORT(bp);
7253 switch (switch_cfg) {
7255 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7258 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7259 switch (ext_phy_type) {
7260 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7261 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7264 bp->port.supported |= (SUPPORTED_10baseT_Half |
7265 SUPPORTED_10baseT_Full |
7266 SUPPORTED_100baseT_Half |
7267 SUPPORTED_100baseT_Full |
7268 SUPPORTED_1000baseT_Full |
7269 SUPPORTED_2500baseX_Full |
7274 SUPPORTED_Asym_Pause);
7277 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7278 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7281 bp->port.supported |= (SUPPORTED_10baseT_Half |
7282 SUPPORTED_10baseT_Full |
7283 SUPPORTED_100baseT_Half |
7284 SUPPORTED_100baseT_Full |
7285 SUPPORTED_1000baseT_Full |
7290 SUPPORTED_Asym_Pause);
7294 BNX2X_ERR("NVRAM config error. "
7295 "BAD SerDes ext_phy_config 0x%x\n",
7296 bp->link_params.ext_phy_config);
7300 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7302 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7305 case SWITCH_CFG_10G:
7306 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7309 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7310 switch (ext_phy_type) {
7311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7312 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7315 bp->port.supported |= (SUPPORTED_10baseT_Half |
7316 SUPPORTED_10baseT_Full |
7317 SUPPORTED_100baseT_Half |
7318 SUPPORTED_100baseT_Full |
7319 SUPPORTED_1000baseT_Full |
7320 SUPPORTED_2500baseX_Full |
7321 SUPPORTED_10000baseT_Full |
7326 SUPPORTED_Asym_Pause);
7329 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7330 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7333 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7336 SUPPORTED_Asym_Pause);
7339 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7340 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7343 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7344 SUPPORTED_1000baseT_Full |
7347 SUPPORTED_Asym_Pause);
7350 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7351 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7354 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7355 SUPPORTED_1000baseT_Full |
7359 SUPPORTED_Asym_Pause);
7362 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7363 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7366 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7367 SUPPORTED_2500baseX_Full |
7368 SUPPORTED_1000baseT_Full |
7372 SUPPORTED_Asym_Pause);
7375 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7376 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7379 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7383 SUPPORTED_Asym_Pause);
7386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7387 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7388 bp->link_params.ext_phy_config);
7392 BNX2X_ERR("NVRAM config error. "
7393 "BAD XGXS ext_phy_config 0x%x\n",
7394 bp->link_params.ext_phy_config);
7398 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7400 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7405 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7406 bp->port.link_config);
7409 bp->link_params.phy_addr = bp->port.phy_addr;
7411 /* mask what we support according to speed_cap_mask */
7412 if (!(bp->link_params.speed_cap_mask &
7413 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7414 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7416 if (!(bp->link_params.speed_cap_mask &
7417 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7418 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7420 if (!(bp->link_params.speed_cap_mask &
7421 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7422 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7424 if (!(bp->link_params.speed_cap_mask &
7425 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7426 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7428 if (!(bp->link_params.speed_cap_mask &
7429 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7430 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7431 SUPPORTED_1000baseT_Full);
7433 if (!(bp->link_params.speed_cap_mask &
7434 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7435 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7437 if (!(bp->link_params.speed_cap_mask &
7438 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7439 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7441 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7444 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7446 bp->link_params.req_duplex = DUPLEX_FULL;
7448 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7449 case PORT_FEATURE_LINK_SPEED_AUTO:
7450 if (bp->port.supported & SUPPORTED_Autoneg) {
7451 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7452 bp->port.advertising = bp->port.supported;
7455 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7457 if ((ext_phy_type ==
7458 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7460 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7461 /* force 10G, no AN */
7462 bp->link_params.req_line_speed = SPEED_10000;
7463 bp->port.advertising =
7464 (ADVERTISED_10000baseT_Full |
7468 BNX2X_ERR("NVRAM config error. "
7469 "Invalid link_config 0x%x"
7470 " Autoneg not supported\n",
7471 bp->port.link_config);
7476 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7477 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7478 bp->link_params.req_line_speed = SPEED_10;
7479 bp->port.advertising = (ADVERTISED_10baseT_Full |
7482 BNX2X_ERR("NVRAM config error. "
7483 "Invalid link_config 0x%x"
7484 " speed_cap_mask 0x%x\n",
7485 bp->port.link_config,
7486 bp->link_params.speed_cap_mask);
7491 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7492 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7493 bp->link_params.req_line_speed = SPEED_10;
7494 bp->link_params.req_duplex = DUPLEX_HALF;
7495 bp->port.advertising = (ADVERTISED_10baseT_Half |
7498 BNX2X_ERR("NVRAM config error. "
7499 "Invalid link_config 0x%x"
7500 " speed_cap_mask 0x%x\n",
7501 bp->port.link_config,
7502 bp->link_params.speed_cap_mask);
7507 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7508 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7509 bp->link_params.req_line_speed = SPEED_100;
7510 bp->port.advertising = (ADVERTISED_100baseT_Full |
7513 BNX2X_ERR("NVRAM config error. "
7514 "Invalid link_config 0x%x"
7515 " speed_cap_mask 0x%x\n",
7516 bp->port.link_config,
7517 bp->link_params.speed_cap_mask);
7522 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7523 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7524 bp->link_params.req_line_speed = SPEED_100;
7525 bp->link_params.req_duplex = DUPLEX_HALF;
7526 bp->port.advertising = (ADVERTISED_100baseT_Half |
7529 BNX2X_ERR("NVRAM config error. "
7530 "Invalid link_config 0x%x"
7531 " speed_cap_mask 0x%x\n",
7532 bp->port.link_config,
7533 bp->link_params.speed_cap_mask);
7538 case PORT_FEATURE_LINK_SPEED_1G:
7539 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7540 bp->link_params.req_line_speed = SPEED_1000;
7541 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7544 BNX2X_ERR("NVRAM config error. "
7545 "Invalid link_config 0x%x"
7546 " speed_cap_mask 0x%x\n",
7547 bp->port.link_config,
7548 bp->link_params.speed_cap_mask);
7553 case PORT_FEATURE_LINK_SPEED_2_5G:
7554 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7555 bp->link_params.req_line_speed = SPEED_2500;
7556 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7559 BNX2X_ERR("NVRAM config error. "
7560 "Invalid link_config 0x%x"
7561 " speed_cap_mask 0x%x\n",
7562 bp->port.link_config,
7563 bp->link_params.speed_cap_mask);
7568 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7569 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7570 case PORT_FEATURE_LINK_SPEED_10G_KR:
7571 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7572 bp->link_params.req_line_speed = SPEED_10000;
7573 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7576 BNX2X_ERR("NVRAM config error. "
7577 "Invalid link_config 0x%x"
7578 " speed_cap_mask 0x%x\n",
7579 bp->port.link_config,
7580 bp->link_params.speed_cap_mask);
7586 BNX2X_ERR("NVRAM config error. "
7587 "BAD link speed link_config 0x%x\n",
7588 bp->port.link_config);
7589 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7590 bp->port.advertising = bp->port.supported;
7594 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7595 PORT_FEATURE_FLOW_CONTROL_MASK);
7596 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7597 !(bp->port.supported & SUPPORTED_Autoneg))
7598 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7600 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7601 " advertising 0x%x\n",
7602 bp->link_params.req_line_speed,
7603 bp->link_params.req_duplex,
7604 bp->link_params.req_flow_ctrl, bp->port.advertising);
7607 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7609 int port = BP_PORT(bp);
7612 bp->link_params.bp = bp;
7613 bp->link_params.port = port;
7615 bp->link_params.serdes_config =
7616 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7617 bp->link_params.lane_config =
7618 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7619 bp->link_params.ext_phy_config =
7621 dev_info.port_hw_config[port].external_phy_config);
7622 bp->link_params.speed_cap_mask =
7624 dev_info.port_hw_config[port].speed_capability_mask);
7626 bp->port.link_config =
7627 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7629 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7630 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7631 " link_config 0x%08x\n",
7632 bp->link_params.serdes_config,
7633 bp->link_params.lane_config,
7634 bp->link_params.ext_phy_config,
7635 bp->link_params.speed_cap_mask, bp->port.link_config);
7637 bp->link_params.switch_cfg = (bp->port.link_config &
7638 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7639 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7641 bnx2x_link_settings_requested(bp);
7643 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7644 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7645 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7646 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7647 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7648 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7649 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7650 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7651 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7652 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7655 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7657 int func = BP_FUNC(bp);
7661 bnx2x_get_common_hwinfo(bp);
7665 if (CHIP_IS_E1H(bp)) {
7667 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7669 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7670 FUNC_MF_CFG_E1HOV_TAG_MASK);
7671 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7675 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7677 func, bp->e1hov, bp->e1hov);
7679 BNX2X_DEV_INFO("Single function mode\n");
7681 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7682 " aborting\n", func);
7688 if (!BP_NOMCP(bp)) {
7689 bnx2x_get_port_hwinfo(bp);
7691 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7692 DRV_MSG_SEQ_NUMBER_MASK);
7693 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7697 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7698 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7699 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7700 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7701 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7702 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7703 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7704 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7705 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7706 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7707 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7709 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7717 /* only supposed to happen on emulation/FPGA */
7718 BNX2X_ERR("warning random MAC workaround active\n");
7719 random_ether_addr(bp->dev->dev_addr);
7720 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7726 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7728 int func = BP_FUNC(bp);
7731 /* Disable interrupt handling until HW is initialized */
7732 atomic_set(&bp->intr_sem, 1);
7734 mutex_init(&bp->port.phy_mutex);
7736 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7737 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7739 rc = bnx2x_get_hwinfo(bp);
7741 /* need to reset chip if undi was active */
7743 bnx2x_undi_unload(bp);
7745 if (CHIP_REV_IS_FPGA(bp))
7746 printk(KERN_ERR PFX "FPGA detected\n");
7748 if (BP_NOMCP(bp) && (func == 0))
7750 "MCP disabled, must load devices in order!\n");
7752 /* Set multi queue mode */
7753 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7754 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
7756 "Multi disabled since int_mode requested is not MSI-X\n");
7757 multi_mode = ETH_RSS_MODE_DISABLED;
7759 bp->multi_mode = multi_mode;
7764 bp->flags &= ~TPA_ENABLE_FLAG;
7765 bp->dev->features &= ~NETIF_F_LRO;
7767 bp->flags |= TPA_ENABLE_FLAG;
7768 bp->dev->features |= NETIF_F_LRO;
7772 bp->tx_ring_size = MAX_TX_AVAIL;
7773 bp->rx_ring_size = MAX_RX_AVAIL;
7781 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7782 bp->current_interval = (poll ? poll : bp->timer_interval);
7784 init_timer(&bp->timer);
7785 bp->timer.expires = jiffies + bp->current_interval;
7786 bp->timer.data = (unsigned long) bp;
7787 bp->timer.function = bnx2x_timer;
7793 * ethtool service functions
7796 /* All ethtool functions called with rtnl_lock */
7798 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7800 struct bnx2x *bp = netdev_priv(dev);
7802 cmd->supported = bp->port.supported;
7803 cmd->advertising = bp->port.advertising;
7805 if (netif_carrier_ok(dev)) {
7806 cmd->speed = bp->link_vars.line_speed;
7807 cmd->duplex = bp->link_vars.duplex;
7809 cmd->speed = bp->link_params.req_line_speed;
7810 cmd->duplex = bp->link_params.req_duplex;
7815 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7816 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7817 if (vn_max_rate < cmd->speed)
7818 cmd->speed = vn_max_rate;
7821 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7823 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7825 switch (ext_phy_type) {
7826 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7827 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7828 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7829 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7830 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7831 cmd->port = PORT_FIBRE;
7834 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7835 cmd->port = PORT_TP;
7838 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7839 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7840 bp->link_params.ext_phy_config);
7844 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7845 bp->link_params.ext_phy_config);
7849 cmd->port = PORT_TP;
7851 cmd->phy_address = bp->port.phy_addr;
7852 cmd->transceiver = XCVR_INTERNAL;
7854 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7855 cmd->autoneg = AUTONEG_ENABLE;
7857 cmd->autoneg = AUTONEG_DISABLE;
7862 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7863 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7864 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7865 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7866 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7867 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7868 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7873 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7875 struct bnx2x *bp = netdev_priv(dev);
7881 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7882 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7883 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7884 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7885 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7886 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7887 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7889 if (cmd->autoneg == AUTONEG_ENABLE) {
7890 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7891 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7895 /* advertise the requested speed and duplex if supported */
7896 cmd->advertising &= bp->port.supported;
7898 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7899 bp->link_params.req_duplex = DUPLEX_FULL;
7900 bp->port.advertising |= (ADVERTISED_Autoneg |
7903 } else { /* forced speed */
7904 /* advertise the requested speed and duplex if supported */
7905 switch (cmd->speed) {
7907 if (cmd->duplex == DUPLEX_FULL) {
7908 if (!(bp->port.supported &
7909 SUPPORTED_10baseT_Full)) {
7911 "10M full not supported\n");
7915 advertising = (ADVERTISED_10baseT_Full |
7918 if (!(bp->port.supported &
7919 SUPPORTED_10baseT_Half)) {
7921 "10M half not supported\n");
7925 advertising = (ADVERTISED_10baseT_Half |
7931 if (cmd->duplex == DUPLEX_FULL) {
7932 if (!(bp->port.supported &
7933 SUPPORTED_100baseT_Full)) {
7935 "100M full not supported\n");
7939 advertising = (ADVERTISED_100baseT_Full |
7942 if (!(bp->port.supported &
7943 SUPPORTED_100baseT_Half)) {
7945 "100M half not supported\n");
7949 advertising = (ADVERTISED_100baseT_Half |
7955 if (cmd->duplex != DUPLEX_FULL) {
7956 DP(NETIF_MSG_LINK, "1G half not supported\n");
7960 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7961 DP(NETIF_MSG_LINK, "1G full not supported\n");
7965 advertising = (ADVERTISED_1000baseT_Full |
7970 if (cmd->duplex != DUPLEX_FULL) {
7972 "2.5G half not supported\n");
7976 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7978 "2.5G full not supported\n");
7982 advertising = (ADVERTISED_2500baseX_Full |
7987 if (cmd->duplex != DUPLEX_FULL) {
7988 DP(NETIF_MSG_LINK, "10G half not supported\n");
7992 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7993 DP(NETIF_MSG_LINK, "10G full not supported\n");
7997 advertising = (ADVERTISED_10000baseT_Full |
8002 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8006 bp->link_params.req_line_speed = cmd->speed;
8007 bp->link_params.req_duplex = cmd->duplex;
8008 bp->port.advertising = advertising;
8011 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8012 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8013 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8014 bp->port.advertising);
8016 if (netif_running(dev)) {
8017 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8024 #define PHY_FW_VER_LEN 10
8026 static void bnx2x_get_drvinfo(struct net_device *dev,
8027 struct ethtool_drvinfo *info)
8029 struct bnx2x *bp = netdev_priv(dev);
8030 u8 phy_fw_ver[PHY_FW_VER_LEN];
8032 strcpy(info->driver, DRV_MODULE_NAME);
8033 strcpy(info->version, DRV_MODULE_VERSION);
8035 phy_fw_ver[0] = '\0';
8037 bnx2x_acquire_phy_lock(bp);
8038 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8039 (bp->state != BNX2X_STATE_CLOSED),
8040 phy_fw_ver, PHY_FW_VER_LEN);
8041 bnx2x_release_phy_lock(bp);
8044 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8045 (bp->common.bc_ver & 0xff0000) >> 16,
8046 (bp->common.bc_ver & 0xff00) >> 8,
8047 (bp->common.bc_ver & 0xff),
8048 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8049 strcpy(info->bus_info, pci_name(bp->pdev));
8050 info->n_stats = BNX2X_NUM_STATS;
8051 info->testinfo_len = BNX2X_NUM_TESTS;
8052 info->eedump_len = bp->common.flash_size;
8053 info->regdump_len = 0;
8056 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8058 struct bnx2x *bp = netdev_priv(dev);
8060 if (bp->flags & NO_WOL_FLAG) {
8064 wol->supported = WAKE_MAGIC;
8066 wol->wolopts = WAKE_MAGIC;
8070 memset(&wol->sopass, 0, sizeof(wol->sopass));
8073 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8075 struct bnx2x *bp = netdev_priv(dev);
8077 if (wol->wolopts & ~WAKE_MAGIC)
8080 if (wol->wolopts & WAKE_MAGIC) {
8081 if (bp->flags & NO_WOL_FLAG)
8091 static u32 bnx2x_get_msglevel(struct net_device *dev)
8093 struct bnx2x *bp = netdev_priv(dev);
8095 return bp->msglevel;
8098 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8100 struct bnx2x *bp = netdev_priv(dev);
8102 if (capable(CAP_NET_ADMIN))
8103 bp->msglevel = level;
8106 static int bnx2x_nway_reset(struct net_device *dev)
8108 struct bnx2x *bp = netdev_priv(dev);
8113 if (netif_running(dev)) {
8114 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8121 static int bnx2x_get_eeprom_len(struct net_device *dev)
8123 struct bnx2x *bp = netdev_priv(dev);
8125 return bp->common.flash_size;
8128 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8130 int port = BP_PORT(bp);
8134 /* adjust timeout for emulation/FPGA */
8135 count = NVRAM_TIMEOUT_COUNT;
8136 if (CHIP_REV_IS_SLOW(bp))
8139 /* request access to nvram interface */
8140 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8141 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8143 for (i = 0; i < count*10; i++) {
8144 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8145 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8151 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8152 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8159 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8161 int port = BP_PORT(bp);
8165 /* adjust timeout for emulation/FPGA */
8166 count = NVRAM_TIMEOUT_COUNT;
8167 if (CHIP_REV_IS_SLOW(bp))
8170 /* relinquish nvram interface */
8171 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8172 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8174 for (i = 0; i < count*10; i++) {
8175 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8176 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8182 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8183 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8190 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8194 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8196 /* enable both bits, even on read */
8197 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8198 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8199 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8202 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8206 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8208 /* disable both bits, even after read */
8209 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8210 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8211 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8214 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8220 /* build the command word */
8221 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8223 /* need to clear DONE bit separately */
8224 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8226 /* address of the NVRAM to read from */
8227 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8228 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8230 /* issue a read command */
8231 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8233 /* adjust timeout for emulation/FPGA */
8234 count = NVRAM_TIMEOUT_COUNT;
8235 if (CHIP_REV_IS_SLOW(bp))
8238 /* wait for completion */
8241 for (i = 0; i < count; i++) {
8243 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8245 if (val & MCPR_NVM_COMMAND_DONE) {
8246 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8247 /* we read nvram data in cpu order
8248 * but ethtool sees it as an array of bytes
8249 * converting to big-endian will do the work */
8250 val = cpu_to_be32(val);
8260 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8267 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8269 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8274 if (offset + buf_size > bp->common.flash_size) {
8275 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8276 " buf_size (0x%x) > flash_size (0x%x)\n",
8277 offset, buf_size, bp->common.flash_size);
8281 /* request access to nvram interface */
8282 rc = bnx2x_acquire_nvram_lock(bp);
8286 /* enable access to nvram interface */
8287 bnx2x_enable_nvram_access(bp);
8289 /* read the first word(s) */
8290 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8291 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8292 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8293 memcpy(ret_buf, &val, 4);
8295 /* advance to the next dword */
8296 offset += sizeof(u32);
8297 ret_buf += sizeof(u32);
8298 buf_size -= sizeof(u32);
8303 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8304 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8305 memcpy(ret_buf, &val, 4);
8308 /* disable access to nvram interface */
8309 bnx2x_disable_nvram_access(bp);
8310 bnx2x_release_nvram_lock(bp);
8315 static int bnx2x_get_eeprom(struct net_device *dev,
8316 struct ethtool_eeprom *eeprom, u8 *eebuf)
8318 struct bnx2x *bp = netdev_priv(dev);
8321 if (!netif_running(dev))
8324 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8325 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8326 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8327 eeprom->len, eeprom->len);
8329 /* parameters already validated in ethtool_get_eeprom */
8331 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8336 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8341 /* build the command word */
8342 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8344 /* need to clear DONE bit separately */
8345 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8347 /* write the data */
8348 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8350 /* address of the NVRAM to write to */
8351 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8352 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8354 /* issue the write command */
8355 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8357 /* adjust timeout for emulation/FPGA */
8358 count = NVRAM_TIMEOUT_COUNT;
8359 if (CHIP_REV_IS_SLOW(bp))
8362 /* wait for completion */
8364 for (i = 0; i < count; i++) {
8366 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8367 if (val & MCPR_NVM_COMMAND_DONE) {
8376 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8378 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8386 if (offset + buf_size > bp->common.flash_size) {
8387 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8388 " buf_size (0x%x) > flash_size (0x%x)\n",
8389 offset, buf_size, bp->common.flash_size);
8393 /* request access to nvram interface */
8394 rc = bnx2x_acquire_nvram_lock(bp);
8398 /* enable access to nvram interface */
8399 bnx2x_enable_nvram_access(bp);
8401 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8402 align_offset = (offset & ~0x03);
8403 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8406 val &= ~(0xff << BYTE_OFFSET(offset));
8407 val |= (*data_buf << BYTE_OFFSET(offset));
8409 /* nvram data is returned as an array of bytes
8410 * convert it back to cpu order */
8411 val = be32_to_cpu(val);
8413 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8417 /* disable access to nvram interface */
8418 bnx2x_disable_nvram_access(bp);
8419 bnx2x_release_nvram_lock(bp);
8424 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8432 if (buf_size == 1) /* ethtool */
8433 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8435 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8437 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8442 if (offset + buf_size > bp->common.flash_size) {
8443 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8444 " buf_size (0x%x) > flash_size (0x%x)\n",
8445 offset, buf_size, bp->common.flash_size);
8449 /* request access to nvram interface */
8450 rc = bnx2x_acquire_nvram_lock(bp);
8454 /* enable access to nvram interface */
8455 bnx2x_enable_nvram_access(bp);
8458 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8459 while ((written_so_far < buf_size) && (rc == 0)) {
8460 if (written_so_far == (buf_size - sizeof(u32)))
8461 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8462 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8463 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8464 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8465 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8467 memcpy(&val, data_buf, 4);
8469 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8471 /* advance to the next dword */
8472 offset += sizeof(u32);
8473 data_buf += sizeof(u32);
8474 written_so_far += sizeof(u32);
8478 /* disable access to nvram interface */
8479 bnx2x_disable_nvram_access(bp);
8480 bnx2x_release_nvram_lock(bp);
8485 static int bnx2x_set_eeprom(struct net_device *dev,
8486 struct ethtool_eeprom *eeprom, u8 *eebuf)
8488 struct bnx2x *bp = netdev_priv(dev);
8491 if (!netif_running(dev))
8494 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8495 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8496 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8497 eeprom->len, eeprom->len);
8499 /* parameters already validated in ethtool_set_eeprom */
8501 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8502 if (eeprom->magic == 0x00504859)
8505 bnx2x_acquire_phy_lock(bp);
8506 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8507 bp->link_params.ext_phy_config,
8508 (bp->state != BNX2X_STATE_CLOSED),
8509 eebuf, eeprom->len);
8510 if ((bp->state == BNX2X_STATE_OPEN) ||
8511 (bp->state == BNX2X_STATE_DISABLED)) {
8512 rc |= bnx2x_link_reset(&bp->link_params,
8514 rc |= bnx2x_phy_init(&bp->link_params,
8517 bnx2x_release_phy_lock(bp);
8519 } else /* Only the PMF can access the PHY */
8522 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8527 static int bnx2x_get_coalesce(struct net_device *dev,
8528 struct ethtool_coalesce *coal)
8530 struct bnx2x *bp = netdev_priv(dev);
8532 memset(coal, 0, sizeof(struct ethtool_coalesce));
8534 coal->rx_coalesce_usecs = bp->rx_ticks;
8535 coal->tx_coalesce_usecs = bp->tx_ticks;
8540 static int bnx2x_set_coalesce(struct net_device *dev,
8541 struct ethtool_coalesce *coal)
8543 struct bnx2x *bp = netdev_priv(dev);
8545 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8546 if (bp->rx_ticks > 3000)
8547 bp->rx_ticks = 3000;
8549 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8550 if (bp->tx_ticks > 0x3000)
8551 bp->tx_ticks = 0x3000;
8553 if (netif_running(dev))
8554 bnx2x_update_coalesce(bp);
8559 static void bnx2x_get_ringparam(struct net_device *dev,
8560 struct ethtool_ringparam *ering)
8562 struct bnx2x *bp = netdev_priv(dev);
8564 ering->rx_max_pending = MAX_RX_AVAIL;
8565 ering->rx_mini_max_pending = 0;
8566 ering->rx_jumbo_max_pending = 0;
8568 ering->rx_pending = bp->rx_ring_size;
8569 ering->rx_mini_pending = 0;
8570 ering->rx_jumbo_pending = 0;
8572 ering->tx_max_pending = MAX_TX_AVAIL;
8573 ering->tx_pending = bp->tx_ring_size;
8576 static int bnx2x_set_ringparam(struct net_device *dev,
8577 struct ethtool_ringparam *ering)
8579 struct bnx2x *bp = netdev_priv(dev);
8582 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8583 (ering->tx_pending > MAX_TX_AVAIL) ||
8584 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8587 bp->rx_ring_size = ering->rx_pending;
8588 bp->tx_ring_size = ering->tx_pending;
8590 if (netif_running(dev)) {
8591 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8592 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8598 static void bnx2x_get_pauseparam(struct net_device *dev,
8599 struct ethtool_pauseparam *epause)
8601 struct bnx2x *bp = netdev_priv(dev);
8603 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8604 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8606 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8607 BNX2X_FLOW_CTRL_RX);
8608 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8609 BNX2X_FLOW_CTRL_TX);
8611 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8612 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8613 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8616 static int bnx2x_set_pauseparam(struct net_device *dev,
8617 struct ethtool_pauseparam *epause)
8619 struct bnx2x *bp = netdev_priv(dev);
8624 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8625 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8626 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8628 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8630 if (epause->rx_pause)
8631 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8633 if (epause->tx_pause)
8634 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8636 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8637 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8639 if (epause->autoneg) {
8640 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8641 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8645 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8646 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8650 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8652 if (netif_running(dev)) {
8653 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8660 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8662 struct bnx2x *bp = netdev_priv(dev);
8666 /* TPA requires Rx CSUM offloading */
8667 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8668 if (!(dev->features & NETIF_F_LRO)) {
8669 dev->features |= NETIF_F_LRO;
8670 bp->flags |= TPA_ENABLE_FLAG;
8674 } else if (dev->features & NETIF_F_LRO) {
8675 dev->features &= ~NETIF_F_LRO;
8676 bp->flags &= ~TPA_ENABLE_FLAG;
8680 if (changed && netif_running(dev)) {
8681 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8682 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8688 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8690 struct bnx2x *bp = netdev_priv(dev);
8695 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8697 struct bnx2x *bp = netdev_priv(dev);
8702 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8703 TPA'ed packets will be discarded due to wrong TCP CSUM */
8705 u32 flags = ethtool_op_get_flags(dev);
8707 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8713 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8716 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8717 dev->features |= NETIF_F_TSO6;
8719 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8720 dev->features &= ~NETIF_F_TSO6;
8726 static const struct {
8727 char string[ETH_GSTRING_LEN];
8728 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8729 { "register_test (offline)" },
8730 { "memory_test (offline)" },
8731 { "loopback_test (offline)" },
8732 { "nvram_test (online)" },
8733 { "interrupt_test (online)" },
8734 { "link_test (online)" },
8735 { "idle check (online)" },
8736 { "MC errors (online)" }
8739 static int bnx2x_self_test_count(struct net_device *dev)
8741 return BNX2X_NUM_TESTS;
8744 static int bnx2x_test_registers(struct bnx2x *bp)
8746 int idx, i, rc = -ENODEV;
8748 int port = BP_PORT(bp);
8749 static const struct {
8754 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8755 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8756 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8757 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8758 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8759 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8760 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8761 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8762 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8763 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8764 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8765 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8766 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8767 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8768 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8769 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8770 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8771 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8772 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8773 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8774 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8775 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8776 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8777 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8778 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8779 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8780 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8781 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8782 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8783 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8784 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8785 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8786 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8787 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8788 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8789 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8790 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8791 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8793 { 0xffffffff, 0, 0x00000000 }
8796 if (!netif_running(bp->dev))
8799 /* Repeat the test twice:
8800 First by writing 0x00000000, second by writing 0xffffffff */
8801 for (idx = 0; idx < 2; idx++) {
8808 wr_val = 0xffffffff;
8812 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8813 u32 offset, mask, save_val, val;
8815 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8816 mask = reg_tbl[i].mask;
8818 save_val = REG_RD(bp, offset);
8820 REG_WR(bp, offset, wr_val);
8821 val = REG_RD(bp, offset);
8823 /* Restore the original register's value */
8824 REG_WR(bp, offset, save_val);
8826 /* verify that value is as expected value */
8827 if ((val & mask) != (wr_val & mask))
8838 static int bnx2x_test_memory(struct bnx2x *bp)
8840 int i, j, rc = -ENODEV;
8842 static const struct {
8846 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8847 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8848 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8849 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8850 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8851 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8852 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8856 static const struct {
8862 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8863 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8864 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8865 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8866 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8867 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8869 { NULL, 0xffffffff, 0, 0 }
8872 if (!netif_running(bp->dev))
8875 /* Go through all the memories */
8876 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8877 for (j = 0; j < mem_tbl[i].size; j++)
8878 REG_RD(bp, mem_tbl[i].offset + j*4);
8880 /* Check the parity status */
8881 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8882 val = REG_RD(bp, prty_tbl[i].offset);
8883 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8884 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8886 "%s is 0x%x\n", prty_tbl[i].name, val);
8897 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8902 while (bnx2x_link_test(bp) && cnt--)
8906 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8908 unsigned int pkt_size, num_pkts, i;
8909 struct sk_buff *skb;
8910 unsigned char *packet;
8911 struct bnx2x_fastpath *fp = &bp->fp[0];
8912 u16 tx_start_idx, tx_idx;
8913 u16 rx_start_idx, rx_idx;
8915 struct sw_tx_bd *tx_buf;
8916 struct eth_tx_bd *tx_bd;
8918 union eth_rx_cqe *cqe;
8920 struct sw_rx_bd *rx_buf;
8924 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8925 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8926 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8928 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8930 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8931 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8932 /* wait until link state is restored */
8934 while (cnt-- && bnx2x_test_link(&bp->link_params,
8941 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8944 goto test_loopback_exit;
8946 packet = skb_put(skb, pkt_size);
8947 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8948 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8949 for (i = ETH_HLEN; i < pkt_size; i++)
8950 packet[i] = (unsigned char) (i & 0xff);
8953 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8954 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8956 pkt_prod = fp->tx_pkt_prod++;
8957 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8958 tx_buf->first_bd = fp->tx_bd_prod;
8961 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8962 mapping = pci_map_single(bp->pdev, skb->data,
8963 skb_headlen(skb), PCI_DMA_TODEVICE);
8964 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8965 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8966 tx_bd->nbd = cpu_to_le16(1);
8967 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8968 tx_bd->vlan = cpu_to_le16(pkt_prod);
8969 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8970 ETH_TX_BD_FLAGS_END_BD);
8971 tx_bd->general_data = ((UNICAST_ADDRESS <<
8972 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8976 fp->hw_tx_prods->bds_prod =
8977 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8978 mb(); /* FW restriction: must not reorder writing nbd and packets */
8979 fp->hw_tx_prods->packets_prod =
8980 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8981 DOORBELL(bp, FP_IDX(fp), 0);
8987 bp->dev->trans_start = jiffies;
8991 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8992 if (tx_idx != tx_start_idx + num_pkts)
8993 goto test_loopback_exit;
8995 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8996 if (rx_idx != rx_start_idx + num_pkts)
8997 goto test_loopback_exit;
8999 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9000 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9001 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9002 goto test_loopback_rx_exit;
9004 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9005 if (len != pkt_size)
9006 goto test_loopback_rx_exit;
9008 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9010 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9011 for (i = ETH_HLEN; i < pkt_size; i++)
9012 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9013 goto test_loopback_rx_exit;
9017 test_loopback_rx_exit:
9019 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9020 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9021 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9022 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9024 /* Update producers */
9025 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9029 bp->link_params.loopback_mode = LOOPBACK_NONE;
9034 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9038 if (!netif_running(bp->dev))
9039 return BNX2X_LOOPBACK_FAILED;
9041 bnx2x_netif_stop(bp, 1);
9042 bnx2x_acquire_phy_lock(bp);
9044 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9045 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9046 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9049 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9050 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9051 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9054 bnx2x_release_phy_lock(bp);
9055 bnx2x_netif_start(bp);
9060 #define CRC32_RESIDUAL 0xdebb20e3
9062 static int bnx2x_test_nvram(struct bnx2x *bp)
9064 static const struct {
9068 { 0, 0x14 }, /* bootstrap */
9069 { 0x14, 0xec }, /* dir */
9070 { 0x100, 0x350 }, /* manuf_info */
9071 { 0x450, 0xf0 }, /* feature_info */
9072 { 0x640, 0x64 }, /* upgrade_key_info */
9074 { 0x708, 0x70 }, /* manuf_key_info */
9079 u8 *data = (u8 *)buf;
9083 rc = bnx2x_nvram_read(bp, 0, data, 4);
9085 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9086 goto test_nvram_exit;
9089 magic = be32_to_cpu(buf[0]);
9090 if (magic != 0x669955aa) {
9091 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9093 goto test_nvram_exit;
9096 for (i = 0; nvram_tbl[i].size; i++) {
9098 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9102 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9103 goto test_nvram_exit;
9106 csum = ether_crc_le(nvram_tbl[i].size, data);
9107 if (csum != CRC32_RESIDUAL) {
9109 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9111 goto test_nvram_exit;
9119 static int bnx2x_test_intr(struct bnx2x *bp)
9121 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9124 if (!netif_running(bp->dev))
9127 config->hdr.length = 0;
9129 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9131 config->hdr.offset = BP_FUNC(bp);
9132 config->hdr.client_id = BP_CL_ID(bp);
9133 config->hdr.reserved1 = 0;
9135 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9136 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9137 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9139 bp->set_mac_pending++;
9140 for (i = 0; i < 10; i++) {
9141 if (!bp->set_mac_pending)
9143 msleep_interruptible(10);
9152 static void bnx2x_self_test(struct net_device *dev,
9153 struct ethtool_test *etest, u64 *buf)
9155 struct bnx2x *bp = netdev_priv(dev);
9157 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9159 if (!netif_running(dev))
9162 /* offline tests are not supported in MF mode */
9164 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9166 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9169 link_up = bp->link_vars.link_up;
9170 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9171 bnx2x_nic_load(bp, LOAD_DIAG);
9172 /* wait until link state is restored */
9173 bnx2x_wait_for_link(bp, link_up);
9175 if (bnx2x_test_registers(bp) != 0) {
9177 etest->flags |= ETH_TEST_FL_FAILED;
9179 if (bnx2x_test_memory(bp) != 0) {
9181 etest->flags |= ETH_TEST_FL_FAILED;
9183 buf[2] = bnx2x_test_loopback(bp, link_up);
9185 etest->flags |= ETH_TEST_FL_FAILED;
9187 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9188 bnx2x_nic_load(bp, LOAD_NORMAL);
9189 /* wait until link state is restored */
9190 bnx2x_wait_for_link(bp, link_up);
9192 if (bnx2x_test_nvram(bp) != 0) {
9194 etest->flags |= ETH_TEST_FL_FAILED;
9196 if (bnx2x_test_intr(bp) != 0) {
9198 etest->flags |= ETH_TEST_FL_FAILED;
9201 if (bnx2x_link_test(bp) != 0) {
9203 etest->flags |= ETH_TEST_FL_FAILED;
9205 buf[7] = bnx2x_mc_assert(bp);
9207 etest->flags |= ETH_TEST_FL_FAILED;
9209 #ifdef BNX2X_EXTRA_DEBUG
9210 bnx2x_panic_dump(bp);
9214 static const struct {
9218 #define STATS_FLAGS_PORT 1
9219 #define STATS_FLAGS_FUNC 2
9220 u8 string[ETH_GSTRING_LEN];
9221 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9222 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9223 8, STATS_FLAGS_FUNC, "rx_bytes" },
9224 { STATS_OFFSET32(error_bytes_received_hi),
9225 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9226 { STATS_OFFSET32(total_bytes_transmitted_hi),
9227 8, STATS_FLAGS_FUNC, "tx_bytes" },
9228 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9229 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9230 { STATS_OFFSET32(total_unicast_packets_received_hi),
9231 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9232 { STATS_OFFSET32(total_multicast_packets_received_hi),
9233 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9234 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9235 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9236 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9237 8, STATS_FLAGS_FUNC, "tx_packets" },
9238 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9239 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9240 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9241 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9242 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9243 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9244 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9245 8, STATS_FLAGS_PORT, "rx_align_errors" },
9246 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9247 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9248 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9249 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9250 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9251 8, STATS_FLAGS_PORT, "tx_deferred" },
9252 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9253 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9254 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9255 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9256 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9257 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9258 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9259 8, STATS_FLAGS_PORT, "rx_fragments" },
9260 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9261 8, STATS_FLAGS_PORT, "rx_jabbers" },
9262 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9263 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9264 { STATS_OFFSET32(jabber_packets_received),
9265 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9266 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9267 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9268 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9269 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9270 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9271 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9272 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9273 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9274 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9275 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9276 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9277 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9278 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9279 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9280 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9281 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9282 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9283 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9284 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9285 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9286 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9287 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9288 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9289 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9290 { STATS_OFFSET32(mac_filter_discard),
9291 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9292 { STATS_OFFSET32(no_buff_discard),
9293 4, STATS_FLAGS_FUNC, "rx_discards" },
9294 { STATS_OFFSET32(xxoverflow_discard),
9295 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9296 { STATS_OFFSET32(brb_drop_hi),
9297 8, STATS_FLAGS_PORT, "brb_discard" },
9298 { STATS_OFFSET32(brb_truncate_hi),
9299 8, STATS_FLAGS_PORT, "brb_truncate" },
9300 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9301 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9302 { STATS_OFFSET32(rx_skb_alloc_failed),
9303 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9304 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9305 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9308 #define IS_NOT_E1HMF_STAT(bp, i) \
9309 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9311 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9313 struct bnx2x *bp = netdev_priv(dev);
9316 switch (stringset) {
9318 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9319 if (IS_NOT_E1HMF_STAT(bp, i))
9321 strcpy(buf + j*ETH_GSTRING_LEN,
9322 bnx2x_stats_arr[i].string);
9328 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9333 static int bnx2x_get_stats_count(struct net_device *dev)
9335 struct bnx2x *bp = netdev_priv(dev);
9336 int i, num_stats = 0;
9338 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9339 if (IS_NOT_E1HMF_STAT(bp, i))
9346 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9347 struct ethtool_stats *stats, u64 *buf)
9349 struct bnx2x *bp = netdev_priv(dev);
9350 u32 *hw_stats = (u32 *)&bp->eth_stats;
9353 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9354 if (IS_NOT_E1HMF_STAT(bp, i))
9357 if (bnx2x_stats_arr[i].size == 0) {
9358 /* skip this counter */
9363 if (bnx2x_stats_arr[i].size == 4) {
9364 /* 4-byte counter */
9365 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9369 /* 8-byte counter */
9370 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9371 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9376 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9378 struct bnx2x *bp = netdev_priv(dev);
9379 int port = BP_PORT(bp);
9382 if (!netif_running(dev))
9391 for (i = 0; i < (data * 2); i++) {
9393 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9394 bp->link_params.hw_led_mode,
9395 bp->link_params.chip_id);
9397 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9398 bp->link_params.hw_led_mode,
9399 bp->link_params.chip_id);
9401 msleep_interruptible(500);
9402 if (signal_pending(current))
9406 if (bp->link_vars.link_up)
9407 bnx2x_set_led(bp, port, LED_MODE_OPER,
9408 bp->link_vars.line_speed,
9409 bp->link_params.hw_led_mode,
9410 bp->link_params.chip_id);
9415 static struct ethtool_ops bnx2x_ethtool_ops = {
9416 .get_settings = bnx2x_get_settings,
9417 .set_settings = bnx2x_set_settings,
9418 .get_drvinfo = bnx2x_get_drvinfo,
9419 .get_wol = bnx2x_get_wol,
9420 .set_wol = bnx2x_set_wol,
9421 .get_msglevel = bnx2x_get_msglevel,
9422 .set_msglevel = bnx2x_set_msglevel,
9423 .nway_reset = bnx2x_nway_reset,
9424 .get_link = ethtool_op_get_link,
9425 .get_eeprom_len = bnx2x_get_eeprom_len,
9426 .get_eeprom = bnx2x_get_eeprom,
9427 .set_eeprom = bnx2x_set_eeprom,
9428 .get_coalesce = bnx2x_get_coalesce,
9429 .set_coalesce = bnx2x_set_coalesce,
9430 .get_ringparam = bnx2x_get_ringparam,
9431 .set_ringparam = bnx2x_set_ringparam,
9432 .get_pauseparam = bnx2x_get_pauseparam,
9433 .set_pauseparam = bnx2x_set_pauseparam,
9434 .get_rx_csum = bnx2x_get_rx_csum,
9435 .set_rx_csum = bnx2x_set_rx_csum,
9436 .get_tx_csum = ethtool_op_get_tx_csum,
9437 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9438 .set_flags = bnx2x_set_flags,
9439 .get_flags = ethtool_op_get_flags,
9440 .get_sg = ethtool_op_get_sg,
9441 .set_sg = ethtool_op_set_sg,
9442 .get_tso = ethtool_op_get_tso,
9443 .set_tso = bnx2x_set_tso,
9444 .self_test_count = bnx2x_self_test_count,
9445 .self_test = bnx2x_self_test,
9446 .get_strings = bnx2x_get_strings,
9447 .phys_id = bnx2x_phys_id,
9448 .get_stats_count = bnx2x_get_stats_count,
9449 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9452 /* end of ethtool_ops */
9454 /****************************************************************************
9455 * General service functions
9456 ****************************************************************************/
9458 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9462 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9466 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9467 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9468 PCI_PM_CTRL_PME_STATUS));
9470 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9471 /* delay required during transition out of D3hot */
9476 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9480 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9482 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9485 /* No more memory access after this point until
9486 * device is brought back to D0.
9496 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9500 /* Tell compiler that status block fields can change */
9502 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9503 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9505 return (fp->rx_comp_cons != rx_cons_sb);
9509 * net_device service functions
9512 static int bnx2x_poll(struct napi_struct *napi, int budget)
9514 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9516 struct bnx2x *bp = fp->bp;
9519 #ifdef BNX2X_STOP_ON_ERROR
9520 if (unlikely(bp->panic))
9524 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9525 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9526 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9528 bnx2x_update_fpsb_idx(fp);
9530 if (bnx2x_has_tx_work(fp))
9531 bnx2x_tx_int(fp, budget);
9533 if (bnx2x_has_rx_work(fp))
9534 work_done = bnx2x_rx_int(fp, budget);
9535 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9537 /* must not complete if we consumed full budget */
9538 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9540 #ifdef BNX2X_STOP_ON_ERROR
9543 napi_complete(napi);
9545 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9546 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9547 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9548 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9554 /* we split the first BD into headers and data BDs
9555 * to ease the pain of our fellow microcode engineers
9556 * we use one mapping for both BDs
9557 * So far this has only been observed to happen
9558 * in Other Operating Systems(TM)
9560 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9561 struct bnx2x_fastpath *fp,
9562 struct eth_tx_bd **tx_bd, u16 hlen,
9563 u16 bd_prod, int nbd)
9565 struct eth_tx_bd *h_tx_bd = *tx_bd;
9566 struct eth_tx_bd *d_tx_bd;
9568 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9570 /* first fix first BD */
9571 h_tx_bd->nbd = cpu_to_le16(nbd);
9572 h_tx_bd->nbytes = cpu_to_le16(hlen);
9574 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9575 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9576 h_tx_bd->addr_lo, h_tx_bd->nbd);
9578 /* now get a new data BD
9579 * (after the pbd) and fill it */
9580 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9581 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9583 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9584 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9586 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9587 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9588 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9590 /* this marks the BD as one that has no individual mapping
9591 * the FW ignores this flag in a BD not marked start
9593 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9594 DP(NETIF_MSG_TX_QUEUED,
9595 "TSO split data size is %d (%x:%x)\n",
9596 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9598 /* update tx_bd for marking the last BD flag */
9604 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9607 csum = (u16) ~csum_fold(csum_sub(csum,
9608 csum_partial(t_header - fix, fix, 0)));
9611 csum = (u16) ~csum_fold(csum_add(csum,
9612 csum_partial(t_header, -fix, 0)));
9614 return swab16(csum);
9617 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9621 if (skb->ip_summed != CHECKSUM_PARTIAL)
9625 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9627 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9628 rc |= XMIT_CSUM_TCP;
9632 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9633 rc |= XMIT_CSUM_TCP;
9637 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9640 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9646 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9647 /* check if packet requires linearization (packet is too fragmented) */
9648 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9653 int first_bd_sz = 0;
9655 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9656 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9658 if (xmit_type & XMIT_GSO) {
9659 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9660 /* Check if LSO packet needs to be copied:
9661 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9662 int wnd_size = MAX_FETCH_BD - 3;
9663 /* Number of windows to check */
9664 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9669 /* Headers length */
9670 hlen = (int)(skb_transport_header(skb) - skb->data) +
9673 /* Amount of data (w/o headers) on linear part of SKB*/
9674 first_bd_sz = skb_headlen(skb) - hlen;
9676 wnd_sum = first_bd_sz;
9678 /* Calculate the first sum - it's special */
9679 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9681 skb_shinfo(skb)->frags[frag_idx].size;
9683 /* If there was data on linear skb data - check it */
9684 if (first_bd_sz > 0) {
9685 if (unlikely(wnd_sum < lso_mss)) {
9690 wnd_sum -= first_bd_sz;
9693 /* Others are easier: run through the frag list and
9694 check all windows */
9695 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9697 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9699 if (unlikely(wnd_sum < lso_mss)) {
9704 skb_shinfo(skb)->frags[wnd_idx].size;
9708 /* in non-LSO too fragmented packet should always
9715 if (unlikely(to_copy))
9716 DP(NETIF_MSG_TX_QUEUED,
9717 "Linearization IS REQUIRED for %s packet. "
9718 "num_frags %d hlen %d first_bd_sz %d\n",
9719 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9720 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9726 /* called with netif_tx_lock
9727 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9728 * netif_wake_queue()
9730 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9732 struct bnx2x *bp = netdev_priv(dev);
9733 struct bnx2x_fastpath *fp;
9734 struct netdev_queue *txq;
9735 struct sw_tx_bd *tx_buf;
9736 struct eth_tx_bd *tx_bd;
9737 struct eth_tx_parse_bd *pbd = NULL;
9738 u16 pkt_prod, bd_prod;
9741 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9742 int vlan_off = (bp->e1hov ? 4 : 0);
9746 #ifdef BNX2X_STOP_ON_ERROR
9747 if (unlikely(bp->panic))
9748 return NETDEV_TX_BUSY;
9751 fp_index = skb_get_queue_mapping(skb);
9752 txq = netdev_get_tx_queue(dev, fp_index);
9754 fp = &bp->fp[fp_index];
9756 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9757 bp->eth_stats.driver_xoff++,
9758 netif_tx_stop_queue(txq);
9759 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9760 return NETDEV_TX_BUSY;
9763 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9764 " gso type %x xmit_type %x\n",
9765 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9766 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9768 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9769 /* First, check if we need to linearize the skb
9770 (due to FW restrictions) */
9771 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9772 /* Statistics of linearization */
9774 if (skb_linearize(skb) != 0) {
9775 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9776 "silently dropping this SKB\n");
9777 dev_kfree_skb_any(skb);
9778 return NETDEV_TX_OK;
9784 Please read carefully. First we use one BD which we mark as start,
9785 then for TSO or xsum we have a parsing info BD,
9786 and only then we have the rest of the TSO BDs.
9787 (don't forget to mark the last one as last,
9788 and to unmap only AFTER you write to the BD ...)
9789 And above all, all pdb sizes are in words - NOT DWORDS!
9792 pkt_prod = fp->tx_pkt_prod++;
9793 bd_prod = TX_BD(fp->tx_bd_prod);
9795 /* get a tx_buf and first BD */
9796 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9797 tx_bd = &fp->tx_desc_ring[bd_prod];
9799 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9800 tx_bd->general_data = (UNICAST_ADDRESS <<
9801 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9803 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9805 /* remember the first BD of the packet */
9806 tx_buf->first_bd = fp->tx_bd_prod;
9809 DP(NETIF_MSG_TX_QUEUED,
9810 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9811 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9814 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9815 (bp->flags & HW_VLAN_TX_FLAG)) {
9816 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9817 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9821 tx_bd->vlan = cpu_to_le16(pkt_prod);
9824 /* turn on parsing and get a BD */
9825 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9826 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9828 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9831 if (xmit_type & XMIT_CSUM) {
9832 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9834 /* for now NS flag is not used in Linux */
9835 pbd->global_data = (hlen |
9836 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9837 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9839 pbd->ip_hlen = (skb_transport_header(skb) -
9840 skb_network_header(skb)) / 2;
9842 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9844 pbd->total_hlen = cpu_to_le16(hlen);
9845 hlen = hlen*2 - vlan_off;
9847 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9849 if (xmit_type & XMIT_CSUM_V4)
9850 tx_bd->bd_flags.as_bitfield |=
9851 ETH_TX_BD_FLAGS_IP_CSUM;
9853 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9855 if (xmit_type & XMIT_CSUM_TCP) {
9856 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9859 s8 fix = SKB_CS_OFF(skb); /* signed! */
9861 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9862 pbd->cs_offset = fix / 2;
9864 DP(NETIF_MSG_TX_QUEUED,
9865 "hlen %d offset %d fix %d csum before fix %x\n",
9866 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9869 /* HW bug: fixup the CSUM */
9870 pbd->tcp_pseudo_csum =
9871 bnx2x_csum_fix(skb_transport_header(skb),
9874 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9875 pbd->tcp_pseudo_csum);
9879 mapping = pci_map_single(bp->pdev, skb->data,
9880 skb_headlen(skb), PCI_DMA_TODEVICE);
9882 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9883 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9884 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9885 tx_bd->nbd = cpu_to_le16(nbd);
9886 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9888 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9889 " nbytes %d flags %x vlan %x\n",
9890 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9891 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9892 le16_to_cpu(tx_bd->vlan));
9894 if (xmit_type & XMIT_GSO) {
9896 DP(NETIF_MSG_TX_QUEUED,
9897 "TSO packet len %d hlen %d total len %d tso size %d\n",
9898 skb->len, hlen, skb_headlen(skb),
9899 skb_shinfo(skb)->gso_size);
9901 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9903 if (unlikely(skb_headlen(skb) > hlen))
9904 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9907 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9908 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9909 pbd->tcp_flags = pbd_tcp_flags(skb);
9911 if (xmit_type & XMIT_GSO_V4) {
9912 pbd->ip_id = swab16(ip_hdr(skb)->id);
9913 pbd->tcp_pseudo_csum =
9914 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9916 0, IPPROTO_TCP, 0));
9919 pbd->tcp_pseudo_csum =
9920 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9921 &ipv6_hdr(skb)->daddr,
9922 0, IPPROTO_TCP, 0));
9924 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9927 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9930 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9931 tx_bd = &fp->tx_desc_ring[bd_prod];
9933 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9934 frag->size, PCI_DMA_TODEVICE);
9936 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9937 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9938 tx_bd->nbytes = cpu_to_le16(frag->size);
9939 tx_bd->vlan = cpu_to_le16(pkt_prod);
9940 tx_bd->bd_flags.as_bitfield = 0;
9942 DP(NETIF_MSG_TX_QUEUED,
9943 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9944 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9945 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9948 /* now at last mark the BD as the last BD */
9949 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9951 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9952 tx_bd, tx_bd->bd_flags.as_bitfield);
9954 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9956 /* now send a tx doorbell, counting the next BD
9957 * if the packet contains or ends with it
9959 if (TX_BD_POFF(bd_prod) < nbd)
9963 DP(NETIF_MSG_TX_QUEUED,
9964 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9965 " tcp_flags %x xsum %x seq %u hlen %u\n",
9966 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9967 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9968 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9970 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9973 * Make sure that the BD data is updated before updating the producer
9974 * since FW might read the BD right after the producer is updated.
9975 * This is only applicable for weak-ordered memory model archs such
9976 * as IA-64. The following barrier is also mandatory since FW will
9977 * assumes packets must have BDs.
9981 fp->hw_tx_prods->bds_prod =
9982 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9983 mb(); /* FW restriction: must not reorder writing nbd and packets */
9984 fp->hw_tx_prods->packets_prod =
9985 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9986 DOORBELL(bp, FP_IDX(fp), 0);
9990 fp->tx_bd_prod += nbd;
9991 dev->trans_start = jiffies;
9993 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9994 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9995 if we put Tx into XOFF state. */
9997 netif_tx_stop_queue(txq);
9998 bp->eth_stats.driver_xoff++;
9999 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10000 netif_tx_wake_queue(txq);
10004 return NETDEV_TX_OK;
10007 /* called with rtnl_lock */
10008 static int bnx2x_open(struct net_device *dev)
10010 struct bnx2x *bp = netdev_priv(dev);
10012 netif_carrier_off(dev);
10014 bnx2x_set_power_state(bp, PCI_D0);
10016 return bnx2x_nic_load(bp, LOAD_OPEN);
10019 /* called with rtnl_lock */
10020 static int bnx2x_close(struct net_device *dev)
10022 struct bnx2x *bp = netdev_priv(dev);
10024 /* Unload the driver, release IRQs */
10025 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10026 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10027 if (!CHIP_REV_IS_SLOW(bp))
10028 bnx2x_set_power_state(bp, PCI_D3hot);
10033 /* called with netif_tx_lock from set_multicast */
10034 static void bnx2x_set_rx_mode(struct net_device *dev)
10036 struct bnx2x *bp = netdev_priv(dev);
10037 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10038 int port = BP_PORT(bp);
10040 if (bp->state != BNX2X_STATE_OPEN) {
10041 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10045 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10047 if (dev->flags & IFF_PROMISC)
10048 rx_mode = BNX2X_RX_MODE_PROMISC;
10050 else if ((dev->flags & IFF_ALLMULTI) ||
10051 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10052 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10054 else { /* some multicasts */
10055 if (CHIP_IS_E1(bp)) {
10056 int i, old, offset;
10057 struct dev_mc_list *mclist;
10058 struct mac_configuration_cmd *config =
10059 bnx2x_sp(bp, mcast_config);
10061 for (i = 0, mclist = dev->mc_list;
10062 mclist && (i < dev->mc_count);
10063 i++, mclist = mclist->next) {
10065 config->config_table[i].
10066 cam_entry.msb_mac_addr =
10067 swab16(*(u16 *)&mclist->dmi_addr[0]);
10068 config->config_table[i].
10069 cam_entry.middle_mac_addr =
10070 swab16(*(u16 *)&mclist->dmi_addr[2]);
10071 config->config_table[i].
10072 cam_entry.lsb_mac_addr =
10073 swab16(*(u16 *)&mclist->dmi_addr[4]);
10074 config->config_table[i].cam_entry.flags =
10076 config->config_table[i].
10077 target_table_entry.flags = 0;
10078 config->config_table[i].
10079 target_table_entry.client_id = 0;
10080 config->config_table[i].
10081 target_table_entry.vlan_id = 0;
10084 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10085 config->config_table[i].
10086 cam_entry.msb_mac_addr,
10087 config->config_table[i].
10088 cam_entry.middle_mac_addr,
10089 config->config_table[i].
10090 cam_entry.lsb_mac_addr);
10092 old = config->hdr.length;
10094 for (; i < old; i++) {
10095 if (CAM_IS_INVALID(config->
10096 config_table[i])) {
10097 /* already invalidated */
10101 CAM_INVALIDATE(config->
10106 if (CHIP_REV_IS_SLOW(bp))
10107 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10109 offset = BNX2X_MAX_MULTICAST*(1 + port);
10111 config->hdr.length = i;
10112 config->hdr.offset = offset;
10113 config->hdr.client_id = bp->fp->cl_id;
10114 config->hdr.reserved1 = 0;
10116 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10117 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10118 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10121 /* Accept one or more multicasts */
10122 struct dev_mc_list *mclist;
10123 u32 mc_filter[MC_HASH_SIZE];
10124 u32 crc, bit, regidx;
10127 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10129 for (i = 0, mclist = dev->mc_list;
10130 mclist && (i < dev->mc_count);
10131 i++, mclist = mclist->next) {
10133 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10136 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10137 bit = (crc >> 24) & 0xff;
10140 mc_filter[regidx] |= (1 << bit);
10143 for (i = 0; i < MC_HASH_SIZE; i++)
10144 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10149 bp->rx_mode = rx_mode;
10150 bnx2x_set_storm_rx_mode(bp);
10153 /* called with rtnl_lock */
10154 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10156 struct sockaddr *addr = p;
10157 struct bnx2x *bp = netdev_priv(dev);
10159 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10162 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10163 if (netif_running(dev)) {
10164 if (CHIP_IS_E1(bp))
10165 bnx2x_set_mac_addr_e1(bp, 1);
10167 bnx2x_set_mac_addr_e1h(bp, 1);
10173 /* called with rtnl_lock */
10174 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10176 struct mii_ioctl_data *data = if_mii(ifr);
10177 struct bnx2x *bp = netdev_priv(dev);
10178 int port = BP_PORT(bp);
10183 data->phy_id = bp->port.phy_addr;
10187 case SIOCGMIIREG: {
10190 if (!netif_running(dev))
10193 mutex_lock(&bp->port.phy_mutex);
10194 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10195 DEFAULT_PHY_DEV_ADDR,
10196 (data->reg_num & 0x1f), &mii_regval);
10197 data->val_out = mii_regval;
10198 mutex_unlock(&bp->port.phy_mutex);
10203 if (!capable(CAP_NET_ADMIN))
10206 if (!netif_running(dev))
10209 mutex_lock(&bp->port.phy_mutex);
10210 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10211 DEFAULT_PHY_DEV_ADDR,
10212 (data->reg_num & 0x1f), data->val_in);
10213 mutex_unlock(&bp->port.phy_mutex);
10221 return -EOPNOTSUPP;
10224 /* called with rtnl_lock */
10225 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10227 struct bnx2x *bp = netdev_priv(dev);
10230 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10231 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10234 /* This does not race with packet allocation
10235 * because the actual alloc size is
10236 * only updated as part of load
10238 dev->mtu = new_mtu;
10240 if (netif_running(dev)) {
10241 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10242 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10248 static void bnx2x_tx_timeout(struct net_device *dev)
10250 struct bnx2x *bp = netdev_priv(dev);
10252 #ifdef BNX2X_STOP_ON_ERROR
10256 /* This allows the netif to be shutdown gracefully before resetting */
10257 schedule_work(&bp->reset_task);
10261 /* called with rtnl_lock */
10262 static void bnx2x_vlan_rx_register(struct net_device *dev,
10263 struct vlan_group *vlgrp)
10265 struct bnx2x *bp = netdev_priv(dev);
10269 /* Set flags according to the required capabilities */
10270 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10272 if (dev->features & NETIF_F_HW_VLAN_TX)
10273 bp->flags |= HW_VLAN_TX_FLAG;
10275 if (dev->features & NETIF_F_HW_VLAN_RX)
10276 bp->flags |= HW_VLAN_RX_FLAG;
10278 if (netif_running(dev))
10279 bnx2x_set_client_config(bp);
10284 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10285 static void poll_bnx2x(struct net_device *dev)
10287 struct bnx2x *bp = netdev_priv(dev);
10289 disable_irq(bp->pdev->irq);
10290 bnx2x_interrupt(bp->pdev->irq, dev);
10291 enable_irq(bp->pdev->irq);
10295 static const struct net_device_ops bnx2x_netdev_ops = {
10296 .ndo_open = bnx2x_open,
10297 .ndo_stop = bnx2x_close,
10298 .ndo_start_xmit = bnx2x_start_xmit,
10299 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10300 .ndo_set_mac_address = bnx2x_change_mac_addr,
10301 .ndo_validate_addr = eth_validate_addr,
10302 .ndo_do_ioctl = bnx2x_ioctl,
10303 .ndo_change_mtu = bnx2x_change_mtu,
10304 .ndo_tx_timeout = bnx2x_tx_timeout,
10306 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10308 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10309 .ndo_poll_controller = poll_bnx2x,
10314 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10315 struct net_device *dev)
10320 SET_NETDEV_DEV(dev, &pdev->dev);
10321 bp = netdev_priv(dev);
10326 bp->func = PCI_FUNC(pdev->devfn);
10328 rc = pci_enable_device(pdev);
10330 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10334 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10335 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10338 goto err_out_disable;
10341 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10342 printk(KERN_ERR PFX "Cannot find second PCI device"
10343 " base address, aborting\n");
10345 goto err_out_disable;
10348 if (atomic_read(&pdev->enable_cnt) == 1) {
10349 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10351 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10353 goto err_out_disable;
10356 pci_set_master(pdev);
10357 pci_save_state(pdev);
10360 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10361 if (bp->pm_cap == 0) {
10362 printk(KERN_ERR PFX "Cannot find power management"
10363 " capability, aborting\n");
10365 goto err_out_release;
10368 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10369 if (bp->pcie_cap == 0) {
10370 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10373 goto err_out_release;
10376 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10377 bp->flags |= USING_DAC_FLAG;
10378 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10379 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10380 " failed, aborting\n");
10382 goto err_out_release;
10385 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10386 printk(KERN_ERR PFX "System does not support DMA,"
10389 goto err_out_release;
10392 dev->mem_start = pci_resource_start(pdev, 0);
10393 dev->base_addr = dev->mem_start;
10394 dev->mem_end = pci_resource_end(pdev, 0);
10396 dev->irq = pdev->irq;
10398 bp->regview = pci_ioremap_bar(pdev, 0);
10399 if (!bp->regview) {
10400 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10402 goto err_out_release;
10405 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10406 min_t(u64, BNX2X_DB_SIZE,
10407 pci_resource_len(pdev, 2)));
10408 if (!bp->doorbells) {
10409 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10411 goto err_out_unmap;
10414 bnx2x_set_power_state(bp, PCI_D0);
10416 /* clean indirect addresses */
10417 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10418 PCICFG_VENDOR_ID_OFFSET);
10419 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10420 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10421 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10422 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10424 dev->watchdog_timeo = TX_TIMEOUT;
10426 dev->netdev_ops = &bnx2x_netdev_ops;
10427 dev->ethtool_ops = &bnx2x_ethtool_ops;
10428 dev->features |= NETIF_F_SG;
10429 dev->features |= NETIF_F_HW_CSUM;
10430 if (bp->flags & USING_DAC_FLAG)
10431 dev->features |= NETIF_F_HIGHDMA;
10433 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10434 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10436 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10437 dev->features |= NETIF_F_TSO6;
10443 iounmap(bp->regview);
10444 bp->regview = NULL;
10446 if (bp->doorbells) {
10447 iounmap(bp->doorbells);
10448 bp->doorbells = NULL;
10452 if (atomic_read(&pdev->enable_cnt) == 1)
10453 pci_release_regions(pdev);
10456 pci_disable_device(pdev);
10457 pci_set_drvdata(pdev, NULL);
10463 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10465 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10467 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10471 /* return value of 1=2.5GHz 2=5GHz */
10472 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10474 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10476 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10480 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10481 const struct pci_device_id *ent)
10483 static int version_printed;
10484 struct net_device *dev = NULL;
10488 if (version_printed++ == 0)
10489 printk(KERN_INFO "%s", version);
10491 /* dev zeroed in init_etherdev */
10492 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
10494 printk(KERN_ERR PFX "Cannot allocate net device\n");
10498 bp = netdev_priv(dev);
10499 bp->msglevel = debug;
10501 rc = bnx2x_init_dev(pdev, dev);
10507 pci_set_drvdata(pdev, dev);
10509 rc = bnx2x_init_bp(bp);
10511 goto init_one_exit;
10513 rc = register_netdev(dev);
10515 dev_err(&pdev->dev, "Cannot register net device\n");
10516 goto init_one_exit;
10519 bp->common.name = board_info[ent->driver_data].name;
10520 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10521 " IRQ %d, ", dev->name, bp->common.name,
10522 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10523 bnx2x_get_pcie_width(bp),
10524 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10525 dev->base_addr, bp->pdev->irq);
10526 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10531 iounmap(bp->regview);
10534 iounmap(bp->doorbells);
10538 if (atomic_read(&pdev->enable_cnt) == 1)
10539 pci_release_regions(pdev);
10541 pci_disable_device(pdev);
10542 pci_set_drvdata(pdev, NULL);
10547 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10549 struct net_device *dev = pci_get_drvdata(pdev);
10553 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10556 bp = netdev_priv(dev);
10558 unregister_netdev(dev);
10561 iounmap(bp->regview);
10564 iounmap(bp->doorbells);
10568 if (atomic_read(&pdev->enable_cnt) == 1)
10569 pci_release_regions(pdev);
10571 pci_disable_device(pdev);
10572 pci_set_drvdata(pdev, NULL);
10575 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10577 struct net_device *dev = pci_get_drvdata(pdev);
10581 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10584 bp = netdev_priv(dev);
10588 pci_save_state(pdev);
10590 if (!netif_running(dev)) {
10595 netif_device_detach(dev);
10597 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10599 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10606 static int bnx2x_resume(struct pci_dev *pdev)
10608 struct net_device *dev = pci_get_drvdata(pdev);
10613 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10616 bp = netdev_priv(dev);
10620 pci_restore_state(pdev);
10622 if (!netif_running(dev)) {
10627 bnx2x_set_power_state(bp, PCI_D0);
10628 netif_device_attach(dev);
10630 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10637 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10641 bp->state = BNX2X_STATE_ERROR;
10643 bp->rx_mode = BNX2X_RX_MODE_NONE;
10645 bnx2x_netif_stop(bp, 0);
10647 del_timer_sync(&bp->timer);
10648 bp->stats_state = STATS_STATE_DISABLED;
10649 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10652 bnx2x_free_irq(bp);
10654 if (CHIP_IS_E1(bp)) {
10655 struct mac_configuration_cmd *config =
10656 bnx2x_sp(bp, mcast_config);
10658 for (i = 0; i < config->hdr.length; i++)
10659 CAM_INVALIDATE(config->config_table[i]);
10662 /* Free SKBs, SGEs, TPA pool and driver internals */
10663 bnx2x_free_skbs(bp);
10664 for_each_rx_queue(bp, i)
10665 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10666 for_each_rx_queue(bp, i)
10667 netif_napi_del(&bnx2x_fp(bp, i, napi));
10668 bnx2x_free_mem(bp);
10670 bp->state = BNX2X_STATE_CLOSED;
10672 netif_carrier_off(bp->dev);
10677 static void bnx2x_eeh_recover(struct bnx2x *bp)
10681 mutex_init(&bp->port.phy_mutex);
10683 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10684 bp->link_params.shmem_base = bp->common.shmem_base;
10685 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10687 if (!bp->common.shmem_base ||
10688 (bp->common.shmem_base < 0xA0000) ||
10689 (bp->common.shmem_base >= 0xC0000)) {
10690 BNX2X_DEV_INFO("MCP not active\n");
10691 bp->flags |= NO_MCP_FLAG;
10695 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10696 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10697 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10698 BNX2X_ERR("BAD MCP validity signature\n");
10700 if (!BP_NOMCP(bp)) {
10701 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10702 & DRV_MSG_SEQ_NUMBER_MASK);
10703 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10708 * bnx2x_io_error_detected - called when PCI error is detected
10709 * @pdev: Pointer to PCI device
10710 * @state: The current pci connection state
10712 * This function is called after a PCI bus error affecting
10713 * this device has been detected.
10715 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10716 pci_channel_state_t state)
10718 struct net_device *dev = pci_get_drvdata(pdev);
10719 struct bnx2x *bp = netdev_priv(dev);
10723 netif_device_detach(dev);
10725 if (netif_running(dev))
10726 bnx2x_eeh_nic_unload(bp);
10728 pci_disable_device(pdev);
10732 /* Request a slot reset */
10733 return PCI_ERS_RESULT_NEED_RESET;
10737 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10738 * @pdev: Pointer to PCI device
10740 * Restart the card from scratch, as if from a cold-boot.
10742 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10744 struct net_device *dev = pci_get_drvdata(pdev);
10745 struct bnx2x *bp = netdev_priv(dev);
10749 if (pci_enable_device(pdev)) {
10750 dev_err(&pdev->dev,
10751 "Cannot re-enable PCI device after reset\n");
10753 return PCI_ERS_RESULT_DISCONNECT;
10756 pci_set_master(pdev);
10757 pci_restore_state(pdev);
10759 if (netif_running(dev))
10760 bnx2x_set_power_state(bp, PCI_D0);
10764 return PCI_ERS_RESULT_RECOVERED;
10768 * bnx2x_io_resume - called when traffic can start flowing again
10769 * @pdev: Pointer to PCI device
10771 * This callback is called when the error recovery driver tells us that
10772 * its OK to resume normal operation.
10774 static void bnx2x_io_resume(struct pci_dev *pdev)
10776 struct net_device *dev = pci_get_drvdata(pdev);
10777 struct bnx2x *bp = netdev_priv(dev);
10781 bnx2x_eeh_recover(bp);
10783 if (netif_running(dev))
10784 bnx2x_nic_load(bp, LOAD_NORMAL);
10786 netif_device_attach(dev);
10791 static struct pci_error_handlers bnx2x_err_handler = {
10792 .error_detected = bnx2x_io_error_detected,
10793 .slot_reset = bnx2x_io_slot_reset,
10794 .resume = bnx2x_io_resume,
10797 static struct pci_driver bnx2x_pci_driver = {
10798 .name = DRV_MODULE_NAME,
10799 .id_table = bnx2x_pci_tbl,
10800 .probe = bnx2x_init_one,
10801 .remove = __devexit_p(bnx2x_remove_one),
10802 .suspend = bnx2x_suspend,
10803 .resume = bnx2x_resume,
10804 .err_handler = &bnx2x_err_handler,
10807 static int __init bnx2x_init(void)
10809 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10810 if (bnx2x_wq == NULL) {
10811 printk(KERN_ERR PFX "Cannot create workqueue\n");
10815 return pci_register_driver(&bnx2x_pci_driver);
10818 static void __exit bnx2x_cleanup(void)
10820 pci_unregister_driver(&bnx2x_pci_driver);
10822 destroy_workqueue(bnx2x_wq);
10825 module_init(bnx2x_init);
10826 module_exit(bnx2x_cleanup);