1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59 #include "bnx2x_cmn.h"
61 #define DRV_MODULE_VERSION "1.52.53-1"
62 #define DRV_MODULE_RELDATE "2010/18/04"
63 #define BNX2X_BC_VER 0x040200
65 #include <linux/firmware.h>
66 #include "bnx2x_fw_file_hdr.h"
68 #define FW_FILE_VERSION \
69 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
70 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
71 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
72 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
73 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
74 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
76 /* Time in jiffies before concluding the transmitter is hung */
77 #define TX_TIMEOUT (5*HZ)
79 static char version[] __devinitdata =
80 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
81 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
83 MODULE_AUTHOR("Eliezer Tamir");
84 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
85 MODULE_LICENSE("GPL");
86 MODULE_VERSION(DRV_MODULE_VERSION);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1);
88 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
90 static int multi_mode = 1;
91 module_param(multi_mode, int, 0);
92 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
93 "(0 Disable; 1 Enable (default))");
95 static int num_queues;
96 module_param(num_queues, int, 0);
97 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
98 " (default is as a number of CPUs)");
100 static int disable_tpa;
101 module_param(disable_tpa, int, 0);
102 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
105 module_param(int_mode, int, 0);
106 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
109 static int dropless_fc;
110 module_param(dropless_fc, int, 0);
111 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
114 module_param(poll, int, 0);
115 MODULE_PARM_DESC(poll, " Use polling (for debug)");
117 static int mrrs = -1;
118 module_param(mrrs, int, 0);
119 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
122 module_param(debug, int, 0);
123 MODULE_PARM_DESC(debug, " Default debug msglevel");
125 static struct workqueue_struct *bnx2x_wq;
127 enum bnx2x_board_type {
133 /* indexed by board_type, above */
136 } board_info[] __devinitdata = {
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
157 * locking is done by mcp
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
179 static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
206 struct dmae_command dmae;
207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
219 memset(&dmae, 0, sizeof(struct dmae_command));
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 DMAE_CMD_ENDIANITY_DW_SWAP |
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251 mutex_lock(&bp->dmae_mutex);
255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
263 BNX2X_ERR("DMAE timeout!\n");
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
274 mutex_unlock(&bp->dmae_mutex);
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 struct dmae_command dmae;
280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
294 memset(&dmae, 0, sizeof(struct dmae_command));
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 DMAE_CMD_ENDIANITY_DW_SWAP |
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323 mutex_lock(&bp->dmae_mutex);
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
332 while (*wb_comp != DMAE_COMP_VAL) {
335 BNX2X_ERR("DMAE timeout!\n");
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349 mutex_unlock(&bp->dmae_mutex);
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
355 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
358 while (len > dmae_wr_max) {
359 bnx2x_write_dmae(bp, phys_addr + offset,
360 addr + offset, dmae_wr_max);
361 offset += dmae_wr_max * 4;
365 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
368 /* used only for slowpath so not inlined */
369 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
373 wb_write[0] = val_hi;
374 wb_write[1] = val_lo;
375 REG_WR_DMAE(bp, reg, wb_write, 2);
379 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
383 REG_RD_DMAE(bp, reg, wb_data, 2);
385 return HILO_U64(wb_data[0], wb_data[1]);
389 static int bnx2x_mc_assert(struct bnx2x *bp)
393 u32 row0, row1, row2, row3;
396 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
397 XSTORM_ASSERT_LIST_INDEX_OFFSET);
399 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401 /* print the asserts */
402 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i));
406 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
408 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
409 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
410 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
411 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
414 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
415 " 0x%08x 0x%08x 0x%08x\n",
416 i, row3, row2, row1, row0);
424 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
425 TSTORM_ASSERT_LIST_INDEX_OFFSET);
427 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429 /* print the asserts */
430 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i));
434 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
436 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
437 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
438 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
439 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
442 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
443 " 0x%08x 0x%08x 0x%08x\n",
444 i, row3, row2, row1, row0);
452 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
453 CSTORM_ASSERT_LIST_INDEX_OFFSET);
455 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457 /* print the asserts */
458 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i));
462 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
464 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
465 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
466 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
467 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
470 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
471 " 0x%08x 0x%08x 0x%08x\n",
472 i, row3, row2, row1, row0);
480 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
481 USTORM_ASSERT_LIST_INDEX_OFFSET);
483 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485 /* print the asserts */
486 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i));
490 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 4);
492 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
493 USTORM_ASSERT_LIST_OFFSET(i) + 8);
494 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
495 USTORM_ASSERT_LIST_OFFSET(i) + 12);
497 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
498 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
499 " 0x%08x 0x%08x 0x%08x\n",
500 i, row3, row2, row1, row0);
510 static void bnx2x_fw_dump(struct bnx2x *bp)
518 BNX2X_ERR("NO MCP - can not dump\n");
522 addr = bp->common.shmem_base - 0x0800 + 4;
523 mark = REG_RD(bp, addr);
524 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
525 pr_err("begin fw dump (mark 0x%x)\n", mark);
528 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
529 for (word = 0; word < 8; word++)
530 data[word] = htonl(REG_RD(bp, offset + 4*word));
532 pr_cont("%s", (char *)data);
534 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
535 for (word = 0; word < 8; word++)
536 data[word] = htonl(REG_RD(bp, offset + 4*word));
538 pr_cont("%s", (char *)data);
540 pr_err("end of fw dump\n");
543 static void bnx2x_panic_dump(struct bnx2x *bp)
548 bp->stats_state = STATS_STATE_DISABLED;
549 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551 BNX2X_ERR("begin crash dump -----------------\n");
555 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
556 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
557 " spq_prod_idx(0x%x)\n",
558 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
559 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
562 for_each_queue(bp, i) {
563 struct bnx2x_fastpath *fp = &bp->fp[i];
565 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
566 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
567 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
568 i, fp->rx_bd_prod, fp->rx_bd_cons,
569 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
570 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
571 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
572 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
573 fp->rx_sge_prod, fp->last_max_sge,
574 le16_to_cpu(fp->fp_u_idx),
575 fp->status_blk->u_status_block.status_block_index);
579 for_each_queue(bp, i) {
580 struct bnx2x_fastpath *fp = &bp->fp[i];
582 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
583 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
584 " *tx_cons_sb(0x%x)\n",
585 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
586 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
587 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
588 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
589 fp->status_blk->c_status_block.status_block_index,
590 fp->tx_db.data.prod);
595 for_each_queue(bp, i) {
596 struct bnx2x_fastpath *fp = &bp->fp[i];
598 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
599 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
600 for (j = start; j != end; j = RX_BD(j + 1)) {
601 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
602 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
605 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
608 start = RX_SGE(fp->rx_sge_prod);
609 end = RX_SGE(fp->last_max_sge);
610 for (j = start; j != end; j = RX_SGE(j + 1)) {
611 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
612 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
615 i, j, rx_sge[1], rx_sge[0], sw_page->page);
618 start = RCQ_BD(fp->rx_comp_cons - 10);
619 end = RCQ_BD(fp->rx_comp_cons + 503);
620 for (j = start; j != end; j = RCQ_BD(j + 1)) {
621 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
624 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
629 for_each_queue(bp, i) {
630 struct bnx2x_fastpath *fp = &bp->fp[i];
632 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
633 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
634 for (j = start; j != end; j = TX_BD(j + 1)) {
635 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
638 i, j, sw_bd->skb, sw_bd->first_bd);
641 start = TX_BD(fp->tx_bd_cons - 10);
642 end = TX_BD(fp->tx_bd_cons + 254);
643 for (j = start; j != end; j = TX_BD(j + 1)) {
644 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
647 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
653 BNX2X_ERR("end crash dump -----------------\n");
656 void bnx2x_int_enable(struct bnx2x *bp)
658 int port = BP_PORT(bp);
659 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
660 u32 val = REG_RD(bp, addr);
661 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
662 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
665 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666 HC_CONFIG_0_REG_INT_LINE_EN_0);
667 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
671 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
672 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
676 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
677 HC_CONFIG_0_REG_INT_LINE_EN_0 |
678 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
680 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
683 REG_WR(bp, addr, val);
685 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
688 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
689 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
691 REG_WR(bp, addr, val);
693 * Ensure that HC_CONFIG is written before leading/trailing edge config
698 if (CHIP_IS_E1H(bp)) {
699 /* init leading/trailing edge */
701 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
703 /* enable nig and gpio3 attention */
708 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
709 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
712 /* Make sure that interrupts are indeed enabled from here on */
716 static void bnx2x_int_disable(struct bnx2x *bp)
718 int port = BP_PORT(bp);
719 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
720 u32 val = REG_RD(bp, addr);
722 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
723 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
724 HC_CONFIG_0_REG_INT_LINE_EN_0 |
725 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
730 /* flush all outstanding writes */
733 REG_WR(bp, addr, val);
734 if (REG_RD(bp, addr) != val)
735 BNX2X_ERR("BUG! proper val not read from IGU!\n");
738 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
740 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
743 /* disable interrupt handling */
744 atomic_inc(&bp->intr_sem);
745 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
748 /* prevent the HW from sending interrupts */
749 bnx2x_int_disable(bp);
751 /* make sure all ISRs are done */
753 synchronize_irq(bp->msix_table[0].vector);
758 for_each_queue(bp, i)
759 synchronize_irq(bp->msix_table[i + offset].vector);
761 synchronize_irq(bp->pdev->irq);
763 /* make sure sp_task is not running */
764 cancel_delayed_work(&bp->sp_task);
765 flush_workqueue(bnx2x_wq);
771 * General service functions
774 /* Return true if succeeded to acquire the lock */
775 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
778 u32 resource_bit = (1 << resource);
779 int func = BP_FUNC(bp);
780 u32 hw_lock_control_reg;
782 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784 /* Validating that the resource is within range */
785 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
788 resource, HW_LOCK_MAX_RESOURCE_VALUE);
793 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 hw_lock_control_reg =
796 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798 /* Try to acquire the lock */
799 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
800 lock_status = REG_RD(bp, hw_lock_control_reg);
801 if (lock_status & resource_bit)
804 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
810 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
813 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
814 union eth_rx_cqe *rr_cqe)
816 struct bnx2x *bp = fp->bp;
817 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
818 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
821 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
822 fp->index, cid, command, bp->state,
823 rr_cqe->ramrod_cqe.ramrod_type);
828 switch (command | fp->state) {
829 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
830 BNX2X_FP_STATE_OPENING):
831 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
833 fp->state = BNX2X_FP_STATE_OPEN;
836 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
837 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
839 fp->state = BNX2X_FP_STATE_HALTED;
843 BNX2X_ERR("unexpected MC reply (%d) "
844 "fp[%d] state is %x\n",
845 command, fp->index, fp->state);
848 mb(); /* force bnx2x_wait_ramrod() to see the change */
852 switch (command | bp->state) {
853 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
854 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
855 bp->state = BNX2X_STATE_OPEN;
858 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
859 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
860 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
861 fp->state = BNX2X_FP_STATE_HALTED;
864 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
865 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
866 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
870 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
871 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
872 bnx2x_cnic_cfc_comp(bp, cid);
876 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
877 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
878 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
879 bp->set_mac_pending--;
883 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
884 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
885 bp->set_mac_pending--;
890 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
894 mb(); /* force bnx2x_wait_ramrod() to see the change */
897 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
899 struct bnx2x *bp = netdev_priv(dev_instance);
900 u16 status = bnx2x_ack_int(bp);
904 /* Return here if interrupt is shared and it's not for us */
905 if (unlikely(status == 0)) {
906 DP(NETIF_MSG_INTR, "not our interrupt!\n");
909 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
911 /* Return here if interrupt is disabled */
912 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
913 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
917 #ifdef BNX2X_STOP_ON_ERROR
918 if (unlikely(bp->panic))
922 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
923 struct bnx2x_fastpath *fp = &bp->fp[i];
925 mask = 0x2 << fp->sb_id;
927 /* Handle Rx and Tx according to SB id */
928 prefetch(fp->rx_cons_sb);
929 prefetch(&fp->status_blk->u_status_block.
931 prefetch(fp->tx_cons_sb);
932 prefetch(&fp->status_blk->c_status_block.
934 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
940 mask = 0x2 << CNIC_SB_ID(bp);
941 if (status & (mask | 0x1)) {
942 struct cnic_ops *c_ops = NULL;
945 c_ops = rcu_dereference(bp->cnic_ops);
947 c_ops->cnic_handler(bp->cnic_data, NULL);
954 if (unlikely(status & 0x1)) {
955 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
962 if (unlikely(status))
963 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
969 /* end of fast path */
975 * General service functions
978 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
981 u32 resource_bit = (1 << resource);
982 int func = BP_FUNC(bp);
983 u32 hw_lock_control_reg;
986 /* Validating that the resource is within range */
987 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
989 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
990 resource, HW_LOCK_MAX_RESOURCE_VALUE);
995 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
997 hw_lock_control_reg =
998 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1001 /* Validating that the resource is not already taken */
1002 lock_status = REG_RD(bp, hw_lock_control_reg);
1003 if (lock_status & resource_bit) {
1004 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1005 lock_status, resource_bit);
1009 /* Try for 5 second every 5ms */
1010 for (cnt = 0; cnt < 1000; cnt++) {
1011 /* Try to acquire the lock */
1012 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1013 lock_status = REG_RD(bp, hw_lock_control_reg);
1014 if (lock_status & resource_bit)
1019 DP(NETIF_MSG_HW, "Timeout\n");
1023 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1026 u32 resource_bit = (1 << resource);
1027 int func = BP_FUNC(bp);
1028 u32 hw_lock_control_reg;
1030 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1032 /* Validating that the resource is within range */
1033 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1035 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1036 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1041 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1043 hw_lock_control_reg =
1044 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1047 /* Validating that the resource is currently taken */
1048 lock_status = REG_RD(bp, hw_lock_control_reg);
1049 if (!(lock_status & resource_bit)) {
1050 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1051 lock_status, resource_bit);
1055 REG_WR(bp, hw_lock_control_reg, resource_bit);
1060 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1062 /* The GPIO should be swapped if swap register is set and active */
1063 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1064 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1065 int gpio_shift = gpio_num +
1066 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1067 u32 gpio_mask = (1 << gpio_shift);
1071 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1072 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1076 /* read GPIO value */
1077 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1079 /* get the requested pin value */
1080 if ((gpio_reg & gpio_mask) == gpio_mask)
1085 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1090 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1092 /* The GPIO should be swapped if swap register is set and active */
1093 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1094 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1095 int gpio_shift = gpio_num +
1096 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1097 u32 gpio_mask = (1 << gpio_shift);
1100 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1101 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1105 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1106 /* read GPIO and mask except the float bits */
1107 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1110 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1111 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1112 gpio_num, gpio_shift);
1113 /* clear FLOAT and set CLR */
1114 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1115 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1118 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1119 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1120 gpio_num, gpio_shift);
1121 /* clear FLOAT and set SET */
1122 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1123 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1126 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1127 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1128 gpio_num, gpio_shift);
1130 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1137 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1138 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1143 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1145 /* The GPIO should be swapped if swap register is set and active */
1146 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1147 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1148 int gpio_shift = gpio_num +
1149 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1150 u32 gpio_mask = (1 << gpio_shift);
1153 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1154 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1158 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1160 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1163 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1164 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1165 "output low\n", gpio_num, gpio_shift);
1166 /* clear SET and set CLR */
1167 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1168 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1172 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1173 "output high\n", gpio_num, gpio_shift);
1174 /* clear CLR and set SET */
1175 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1176 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1183 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1184 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1189 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1191 u32 spio_mask = (1 << spio_num);
1194 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1195 (spio_num > MISC_REGISTERS_SPIO_7)) {
1196 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1200 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1201 /* read SPIO and mask except the float bits */
1202 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1205 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1206 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1207 /* clear FLOAT and set CLR */
1208 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1209 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1212 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1213 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1214 /* clear FLOAT and set SET */
1215 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1216 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1219 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1220 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1222 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1229 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1230 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1235 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1237 switch (bp->link_vars.ieee_fc &
1238 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1240 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1245 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1249 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1250 bp->port.advertising |= ADVERTISED_Asym_Pause;
1254 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1261 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1263 if (!BP_NOMCP(bp)) {
1266 /* Initialize link parameters structure variables */
1267 /* It is recommended to turn off RX FC for jumbo frames
1268 for better performance */
1269 if (bp->dev->mtu > 5000)
1270 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1272 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1274 bnx2x_acquire_phy_lock(bp);
1276 if (load_mode == LOAD_DIAG)
1277 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1279 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1281 bnx2x_release_phy_lock(bp);
1283 bnx2x_calc_fc_adv(bp);
1285 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1286 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1287 bnx2x_link_report(bp);
1292 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1296 void bnx2x_link_set(struct bnx2x *bp)
1298 if (!BP_NOMCP(bp)) {
1299 bnx2x_acquire_phy_lock(bp);
1300 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1301 bnx2x_release_phy_lock(bp);
1303 bnx2x_calc_fc_adv(bp);
1305 BNX2X_ERR("Bootcode is missing - can not set link\n");
1308 static void bnx2x__link_reset(struct bnx2x *bp)
1310 if (!BP_NOMCP(bp)) {
1311 bnx2x_acquire_phy_lock(bp);
1312 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1313 bnx2x_release_phy_lock(bp);
1315 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1318 u8 bnx2x_link_test(struct bnx2x *bp)
1322 if (!BP_NOMCP(bp)) {
1323 bnx2x_acquire_phy_lock(bp);
1324 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1325 bnx2x_release_phy_lock(bp);
1327 BNX2X_ERR("Bootcode is missing - can not test link\n");
1332 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1334 u32 r_param = bp->link_vars.line_speed / 8;
1335 u32 fair_periodic_timeout_usec;
1338 memset(&(bp->cmng.rs_vars), 0,
1339 sizeof(struct rate_shaping_vars_per_port));
1340 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1342 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1343 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1345 /* this is the threshold below which no timer arming will occur
1346 1.25 coefficient is for the threshold to be a little bigger
1347 than the real time, to compensate for timer in-accuracy */
1348 bp->cmng.rs_vars.rs_threshold =
1349 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1351 /* resolution of fairness timer */
1352 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1353 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1354 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1356 /* this is the threshold below which we won't arm the timer anymore */
1357 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1359 /* we multiply by 1e3/8 to get bytes/msec.
1360 We don't want the credits to pass a credit
1361 of the t_fair*FAIR_MEM (algorithm resolution) */
1362 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1363 /* since each tick is 4 usec */
1364 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1367 /* Calculates the sum of vn_min_rates.
1368 It's needed for further normalizing of the min_rates.
1370 sum of vn_min_rates.
1372 0 - if all the min_rates are 0.
1373 In the later case fainess algorithm should be deactivated.
1374 If not all min_rates are zero then those that are zeroes will be set to 1.
1376 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1379 int port = BP_PORT(bp);
1382 bp->vn_weight_sum = 0;
1383 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1384 int func = 2*vn + port;
1385 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1386 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1387 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1389 /* Skip hidden vns */
1390 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1393 /* If min rate is zero - set it to 1 */
1395 vn_min_rate = DEF_MIN_RATE;
1399 bp->vn_weight_sum += vn_min_rate;
1402 /* ... only if all min rates are zeros - disable fairness */
1404 bp->cmng.flags.cmng_enables &=
1405 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1406 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1407 " fairness will be disabled\n");
1409 bp->cmng.flags.cmng_enables |=
1410 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1413 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1415 struct rate_shaping_vars_per_vn m_rs_vn;
1416 struct fairness_vars_per_vn m_fair_vn;
1417 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1418 u16 vn_min_rate, vn_max_rate;
1421 /* If function is hidden - set min and max to zeroes */
1422 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1427 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1428 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1429 /* If min rate is zero - set it to 1 */
1431 vn_min_rate = DEF_MIN_RATE;
1432 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1433 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1436 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1437 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1439 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1440 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1442 /* global vn counter - maximal Mbps for this vn */
1443 m_rs_vn.vn_counter.rate = vn_max_rate;
1445 /* quota - number of bytes transmitted in this period */
1446 m_rs_vn.vn_counter.quota =
1447 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1449 if (bp->vn_weight_sum) {
1450 /* credit for each period of the fairness algorithm:
1451 number of bytes in T_FAIR (the vn share the port rate).
1452 vn_weight_sum should not be larger than 10000, thus
1453 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1455 m_fair_vn.vn_credit_delta =
1456 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1457 (8 * bp->vn_weight_sum))),
1458 (bp->cmng.fair_vars.fair_threshold * 2));
1459 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1460 m_fair_vn.vn_credit_delta);
1463 /* Store it to internal memory */
1464 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1465 REG_WR(bp, BAR_XSTRORM_INTMEM +
1466 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1467 ((u32 *)(&m_rs_vn))[i]);
1469 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1470 REG_WR(bp, BAR_XSTRORM_INTMEM +
1471 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1472 ((u32 *)(&m_fair_vn))[i]);
1476 /* This function is called upon link interrupt */
1477 static void bnx2x_link_attn(struct bnx2x *bp)
1479 u32 prev_link_status = bp->link_vars.link_status;
1480 /* Make sure that we are synced with the current statistics */
1481 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1483 bnx2x_link_update(&bp->link_params, &bp->link_vars);
1485 if (bp->link_vars.link_up) {
1487 /* dropless flow control */
1488 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1489 int port = BP_PORT(bp);
1490 u32 pause_enabled = 0;
1492 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1495 REG_WR(bp, BAR_USTRORM_INTMEM +
1496 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1500 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1501 struct host_port_stats *pstats;
1503 pstats = bnx2x_sp(bp, port_stats);
1504 /* reset old bmac stats */
1505 memset(&(pstats->mac_stx[0]), 0,
1506 sizeof(struct mac_stx));
1508 if (bp->state == BNX2X_STATE_OPEN)
1509 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1512 /* indicate link status only if link status actually changed */
1513 if (prev_link_status != bp->link_vars.link_status)
1514 bnx2x_link_report(bp);
1517 int port = BP_PORT(bp);
1521 /* Set the attention towards other drivers on the same port */
1522 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1523 if (vn == BP_E1HVN(bp))
1526 func = ((vn << 1) | port);
1527 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1528 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1531 if (bp->link_vars.link_up) {
1534 /* Init rate shaping and fairness contexts */
1535 bnx2x_init_port_minmax(bp);
1537 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1538 bnx2x_init_vn_minmax(bp, 2*vn + port);
1540 /* Store it to internal memory */
1542 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1543 REG_WR(bp, BAR_XSTRORM_INTMEM +
1544 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1545 ((u32 *)(&bp->cmng))[i]);
1550 void bnx2x__link_status_update(struct bnx2x *bp)
1552 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1555 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1557 if (bp->link_vars.link_up)
1558 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1560 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1562 bnx2x_calc_vn_weight_sum(bp);
1564 /* indicate link status */
1565 bnx2x_link_report(bp);
1568 static void bnx2x_pmf_update(struct bnx2x *bp)
1570 int port = BP_PORT(bp);
1574 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1576 /* enable nig attention */
1577 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1578 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1579 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1581 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1589 * General service functions
1592 /* send the MCP a request, block until there is a reply */
1593 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1595 int func = BP_FUNC(bp);
1596 u32 seq = ++bp->fw_seq;
1599 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1601 mutex_lock(&bp->fw_mb_mutex);
1602 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1603 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1606 /* let the FW do it's magic ... */
1609 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1611 /* Give the FW up to 5 second (500*10ms) */
1612 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1614 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1615 cnt*delay, rc, seq);
1617 /* is this a reply to our command? */
1618 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1619 rc &= FW_MSG_CODE_MASK;
1622 BNX2X_ERR("FW failed to respond!\n");
1626 mutex_unlock(&bp->fw_mb_mutex);
1631 static void bnx2x_e1h_disable(struct bnx2x *bp)
1633 int port = BP_PORT(bp);
1635 netif_tx_disable(bp->dev);
1637 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1639 netif_carrier_off(bp->dev);
1642 static void bnx2x_e1h_enable(struct bnx2x *bp)
1644 int port = BP_PORT(bp);
1646 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1648 /* Tx queue should be only reenabled */
1649 netif_tx_wake_all_queues(bp->dev);
1652 * Should not call netif_carrier_on since it will be called if the link
1653 * is up when checking for link state
1657 static void bnx2x_update_min_max(struct bnx2x *bp)
1659 int port = BP_PORT(bp);
1662 /* Init rate shaping and fairness contexts */
1663 bnx2x_init_port_minmax(bp);
1665 bnx2x_calc_vn_weight_sum(bp);
1667 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1668 bnx2x_init_vn_minmax(bp, 2*vn + port);
1673 /* Set the attention towards other drivers on the same port */
1674 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1675 if (vn == BP_E1HVN(bp))
1678 func = ((vn << 1) | port);
1679 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1680 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1683 /* Store it to internal memory */
1684 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1685 REG_WR(bp, BAR_XSTRORM_INTMEM +
1686 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1687 ((u32 *)(&bp->cmng))[i]);
1691 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1693 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1695 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1698 * This is the only place besides the function initialization
1699 * where the bp->flags can change so it is done without any
1702 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1703 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1704 bp->flags |= MF_FUNC_DIS;
1706 bnx2x_e1h_disable(bp);
1708 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1709 bp->flags &= ~MF_FUNC_DIS;
1711 bnx2x_e1h_enable(bp);
1713 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1715 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1717 bnx2x_update_min_max(bp);
1718 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1721 /* Report results to MCP */
1723 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1725 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1728 /* must be called under the spq lock */
1729 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1731 struct eth_spe *next_spe = bp->spq_prod_bd;
1733 if (bp->spq_prod_bd == bp->spq_last_bd) {
1734 bp->spq_prod_bd = bp->spq;
1735 bp->spq_prod_idx = 0;
1736 DP(NETIF_MSG_TIMER, "end of spq\n");
1744 /* must be called under the spq lock */
1745 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1747 int func = BP_FUNC(bp);
1749 /* Make sure that BD data is updated before writing the producer */
1752 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1757 /* the slow path queue is odd since completions arrive on the fastpath ring */
1758 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1759 u32 data_hi, u32 data_lo, int common)
1761 struct eth_spe *spe;
1763 #ifdef BNX2X_STOP_ON_ERROR
1764 if (unlikely(bp->panic))
1768 spin_lock_bh(&bp->spq_lock);
1770 if (!bp->spq_left) {
1771 BNX2X_ERR("BUG! SPQ ring full!\n");
1772 spin_unlock_bh(&bp->spq_lock);
1777 spe = bnx2x_sp_get_next(bp);
1779 /* CID needs port number to be encoded int it */
1780 spe->hdr.conn_and_cmd_data =
1781 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1783 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1786 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1788 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1789 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1793 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1794 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1795 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1796 (u32)(U64_LO(bp->spq_mapping) +
1797 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1798 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1800 bnx2x_sp_prod_update(bp);
1801 spin_unlock_bh(&bp->spq_lock);
1805 /* acquire split MCP access lock register */
1806 static int bnx2x_acquire_alr(struct bnx2x *bp)
1812 for (j = 0; j < 1000; j++) {
1814 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1815 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1816 if (val & (1L << 31))
1821 if (!(val & (1L << 31))) {
1822 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1829 /* release split MCP access lock register */
1830 static void bnx2x_release_alr(struct bnx2x *bp)
1832 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1835 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1837 struct host_def_status_block *def_sb = bp->def_status_blk;
1840 barrier(); /* status block is written to by the chip */
1841 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1842 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1845 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1846 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1849 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1850 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1853 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1854 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1857 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1858 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1865 * slow path service functions
1868 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1870 int port = BP_PORT(bp);
1871 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1872 COMMAND_REG_ATTN_BITS_SET);
1873 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1874 MISC_REG_AEU_MASK_ATTN_FUNC_0;
1875 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1876 NIG_REG_MASK_INTERRUPT_PORT0;
1880 if (bp->attn_state & asserted)
1881 BNX2X_ERR("IGU ERROR\n");
1883 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1884 aeu_mask = REG_RD(bp, aeu_addr);
1886 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
1887 aeu_mask, asserted);
1888 aeu_mask &= ~(asserted & 0x3ff);
1889 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1891 REG_WR(bp, aeu_addr, aeu_mask);
1892 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1894 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1895 bp->attn_state |= asserted;
1896 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1898 if (asserted & ATTN_HARD_WIRED_MASK) {
1899 if (asserted & ATTN_NIG_FOR_FUNC) {
1901 bnx2x_acquire_phy_lock(bp);
1903 /* save nig interrupt mask */
1904 nig_mask = REG_RD(bp, nig_int_mask_addr);
1905 REG_WR(bp, nig_int_mask_addr, 0);
1907 bnx2x_link_attn(bp);
1909 /* handle unicore attn? */
1911 if (asserted & ATTN_SW_TIMER_4_FUNC)
1912 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1914 if (asserted & GPIO_2_FUNC)
1915 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1917 if (asserted & GPIO_3_FUNC)
1918 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1920 if (asserted & GPIO_4_FUNC)
1921 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1924 if (asserted & ATTN_GENERAL_ATTN_1) {
1925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1928 if (asserted & ATTN_GENERAL_ATTN_2) {
1929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1932 if (asserted & ATTN_GENERAL_ATTN_3) {
1933 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1934 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1937 if (asserted & ATTN_GENERAL_ATTN_4) {
1938 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1939 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1941 if (asserted & ATTN_GENERAL_ATTN_5) {
1942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1945 if (asserted & ATTN_GENERAL_ATTN_6) {
1946 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1947 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1951 } /* if hardwired */
1953 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1955 REG_WR(bp, hc_addr, asserted);
1957 /* now set back the mask */
1958 if (asserted & ATTN_NIG_FOR_FUNC) {
1959 REG_WR(bp, nig_int_mask_addr, nig_mask);
1960 bnx2x_release_phy_lock(bp);
1964 static inline void bnx2x_fan_failure(struct bnx2x *bp)
1966 int port = BP_PORT(bp);
1968 /* mark the failure */
1969 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1970 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1971 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1972 bp->link_params.ext_phy_config);
1974 /* log the failure */
1975 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1976 " the driver to shutdown the card to prevent permanent"
1977 " damage. Please contact OEM Support for assistance\n");
1980 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1982 int port = BP_PORT(bp);
1984 u32 val, swap_val, swap_override;
1986 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1987 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1989 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1991 val = REG_RD(bp, reg_offset);
1992 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1993 REG_WR(bp, reg_offset, val);
1995 BNX2X_ERR("SPIO5 hw attention\n");
1997 /* Fan failure attention */
1998 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1999 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2000 /* Low power mode is controlled by GPIO 2 */
2001 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2002 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2003 /* The PHY reset is controlled by GPIO 1 */
2004 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2005 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2008 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2009 /* The PHY reset is controlled by GPIO 1 */
2010 /* fake the port number to cancel the swap done in
2012 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2013 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2014 port = (swap_val && swap_override) ^ 1;
2015 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2016 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2022 bnx2x_fan_failure(bp);
2025 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2026 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2027 bnx2x_acquire_phy_lock(bp);
2028 bnx2x_handle_module_detect_int(&bp->link_params);
2029 bnx2x_release_phy_lock(bp);
2032 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2034 val = REG_RD(bp, reg_offset);
2035 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2036 REG_WR(bp, reg_offset, val);
2038 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2039 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2044 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2048 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2050 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2051 BNX2X_ERR("DB hw attention 0x%x\n", val);
2052 /* DORQ discard attention */
2054 BNX2X_ERR("FATAL error from DORQ\n");
2057 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2059 int port = BP_PORT(bp);
2062 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2063 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2065 val = REG_RD(bp, reg_offset);
2066 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2067 REG_WR(bp, reg_offset, val);
2069 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2070 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2075 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2079 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2081 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2082 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2083 /* CFC error attention */
2085 BNX2X_ERR("FATAL error from CFC\n");
2088 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2090 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2091 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2092 /* RQ_USDMDP_FIFO_OVERFLOW */
2094 BNX2X_ERR("FATAL error from PXP\n");
2097 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2099 int port = BP_PORT(bp);
2102 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2103 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2105 val = REG_RD(bp, reg_offset);
2106 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2107 REG_WR(bp, reg_offset, val);
2109 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2110 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2115 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2119 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2121 if (attn & BNX2X_PMF_LINK_ASSERT) {
2122 int func = BP_FUNC(bp);
2124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2125 bp->mf_config = SHMEM_RD(bp,
2126 mf_cfg.func_mf_config[func].config);
2127 val = SHMEM_RD(bp, func_mb[func].drv_status);
2128 if (val & DRV_STATUS_DCC_EVENT_MASK)
2130 (val & DRV_STATUS_DCC_EVENT_MASK));
2131 bnx2x__link_status_update(bp);
2132 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2133 bnx2x_pmf_update(bp);
2135 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2137 BNX2X_ERR("MC assert!\n");
2138 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2139 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2140 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2141 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2144 } else if (attn & BNX2X_MCP_ASSERT) {
2146 BNX2X_ERR("MCP assert!\n");
2147 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2151 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2154 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2155 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2156 if (attn & BNX2X_GRC_TIMEOUT) {
2157 val = CHIP_IS_E1H(bp) ?
2158 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2159 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2161 if (attn & BNX2X_GRC_RSV) {
2162 val = CHIP_IS_E1H(bp) ?
2163 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2164 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2166 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2170 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2171 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2172 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2173 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2174 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2175 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2177 * should be run under rtnl lock
2179 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2181 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2182 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2183 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2189 * should be run under rtnl lock
2191 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2193 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2195 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2201 * should be run under rtnl lock
2203 bool bnx2x_reset_is_done(struct bnx2x *bp)
2205 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2206 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2207 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2211 * should be run under rtnl lock
2213 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2215 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2217 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2219 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2220 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2226 * should be run under rtnl lock
2228 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2230 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2232 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2234 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2235 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2243 * should be run under rtnl lock
2245 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2247 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2250 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2252 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2253 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2256 static inline void _print_next_block(int idx, const char *blk)
2263 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2267 for (i = 0; sig; i++) {
2268 cur_bit = ((u32)0x1 << i);
2269 if (sig & cur_bit) {
2271 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2272 _print_next_block(par_num++, "BRB");
2274 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2275 _print_next_block(par_num++, "PARSER");
2277 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2278 _print_next_block(par_num++, "TSDM");
2280 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2281 _print_next_block(par_num++, "SEARCHER");
2283 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2284 _print_next_block(par_num++, "TSEMI");
2296 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2300 for (i = 0; sig; i++) {
2301 cur_bit = ((u32)0x1 << i);
2302 if (sig & cur_bit) {
2304 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2305 _print_next_block(par_num++, "PBCLIENT");
2307 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2308 _print_next_block(par_num++, "QM");
2310 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2311 _print_next_block(par_num++, "XSDM");
2313 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2314 _print_next_block(par_num++, "XSEMI");
2316 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2317 _print_next_block(par_num++, "DOORBELLQ");
2319 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2320 _print_next_block(par_num++, "VAUX PCI CORE");
2322 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2323 _print_next_block(par_num++, "DEBUG");
2325 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2326 _print_next_block(par_num++, "USDM");
2328 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2329 _print_next_block(par_num++, "USEMI");
2331 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2332 _print_next_block(par_num++, "UPB");
2334 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2335 _print_next_block(par_num++, "CSDM");
2347 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2351 for (i = 0; sig; i++) {
2352 cur_bit = ((u32)0x1 << i);
2353 if (sig & cur_bit) {
2355 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2356 _print_next_block(par_num++, "CSEMI");
2358 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2359 _print_next_block(par_num++, "PXP");
2361 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2362 _print_next_block(par_num++,
2363 "PXPPCICLOCKCLIENT");
2365 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2366 _print_next_block(par_num++, "CFC");
2368 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2369 _print_next_block(par_num++, "CDU");
2371 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2372 _print_next_block(par_num++, "IGU");
2374 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2375 _print_next_block(par_num++, "MISC");
2387 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2391 for (i = 0; sig; i++) {
2392 cur_bit = ((u32)0x1 << i);
2393 if (sig & cur_bit) {
2395 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2396 _print_next_block(par_num++, "MCP ROM");
2398 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2399 _print_next_block(par_num++, "MCP UMP RX");
2401 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2402 _print_next_block(par_num++, "MCP UMP TX");
2404 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2405 _print_next_block(par_num++, "MCP SCPAD");
2417 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2420 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2421 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2423 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2424 "[0]:0x%08x [1]:0x%08x "
2425 "[2]:0x%08x [3]:0x%08x\n",
2426 sig0 & HW_PRTY_ASSERT_SET_0,
2427 sig1 & HW_PRTY_ASSERT_SET_1,
2428 sig2 & HW_PRTY_ASSERT_SET_2,
2429 sig3 & HW_PRTY_ASSERT_SET_3);
2430 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2432 par_num = bnx2x_print_blocks_with_parity0(
2433 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2434 par_num = bnx2x_print_blocks_with_parity1(
2435 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2436 par_num = bnx2x_print_blocks_with_parity2(
2437 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2438 par_num = bnx2x_print_blocks_with_parity3(
2439 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2446 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2448 struct attn_route attn;
2449 int port = BP_PORT(bp);
2451 attn.sig[0] = REG_RD(bp,
2452 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2454 attn.sig[1] = REG_RD(bp,
2455 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2457 attn.sig[2] = REG_RD(bp,
2458 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2460 attn.sig[3] = REG_RD(bp,
2461 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2464 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2468 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2470 struct attn_route attn, *group_mask;
2471 int port = BP_PORT(bp);
2477 /* need to take HW lock because MCP or other port might also
2478 try to handle this event */
2479 bnx2x_acquire_alr(bp);
2481 if (bnx2x_chk_parity_attn(bp)) {
2482 bp->recovery_state = BNX2X_RECOVERY_INIT;
2483 bnx2x_set_reset_in_progress(bp);
2484 schedule_delayed_work(&bp->reset_task, 0);
2485 /* Disable HW interrupts */
2486 bnx2x_int_disable(bp);
2487 bnx2x_release_alr(bp);
2488 /* In case of parity errors don't handle attentions so that
2489 * other function would "see" parity errors.
2494 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2495 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2496 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2497 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2498 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2499 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2501 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2502 if (deasserted & (1 << index)) {
2503 group_mask = &bp->attn_group[index];
2505 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2506 index, group_mask->sig[0], group_mask->sig[1],
2507 group_mask->sig[2], group_mask->sig[3]);
2509 bnx2x_attn_int_deasserted3(bp,
2510 attn.sig[3] & group_mask->sig[3]);
2511 bnx2x_attn_int_deasserted1(bp,
2512 attn.sig[1] & group_mask->sig[1]);
2513 bnx2x_attn_int_deasserted2(bp,
2514 attn.sig[2] & group_mask->sig[2]);
2515 bnx2x_attn_int_deasserted0(bp,
2516 attn.sig[0] & group_mask->sig[0]);
2520 bnx2x_release_alr(bp);
2522 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2525 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2527 REG_WR(bp, reg_addr, val);
2529 if (~bp->attn_state & deasserted)
2530 BNX2X_ERR("IGU ERROR\n");
2532 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2533 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2535 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2536 aeu_mask = REG_RD(bp, reg_addr);
2538 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2539 aeu_mask, deasserted);
2540 aeu_mask |= (deasserted & 0x3ff);
2541 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2543 REG_WR(bp, reg_addr, aeu_mask);
2544 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2546 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2547 bp->attn_state &= ~deasserted;
2548 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2551 static void bnx2x_attn_int(struct bnx2x *bp)
2553 /* read local copy of bits */
2554 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2556 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2558 u32 attn_state = bp->attn_state;
2560 /* look for changed bits */
2561 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2562 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2565 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2566 attn_bits, attn_ack, asserted, deasserted);
2568 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2569 BNX2X_ERR("BAD attention state\n");
2571 /* handle bits that were raised */
2573 bnx2x_attn_int_asserted(bp, asserted);
2576 bnx2x_attn_int_deasserted(bp, deasserted);
2579 static void bnx2x_sp_task(struct work_struct *work)
2581 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2584 /* Return here if interrupt is disabled */
2585 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2586 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2590 status = bnx2x_update_dsb_idx(bp);
2591 /* if (status == 0) */
2592 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2594 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2602 /* CStorm events: STAT_QUERY */
2604 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2608 if (unlikely(status))
2609 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2612 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2614 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2616 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2618 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2620 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2624 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2626 struct net_device *dev = dev_instance;
2627 struct bnx2x *bp = netdev_priv(dev);
2629 /* Return here if interrupt is disabled */
2630 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2631 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2635 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2637 #ifdef BNX2X_STOP_ON_ERROR
2638 if (unlikely(bp->panic))
2644 struct cnic_ops *c_ops;
2647 c_ops = rcu_dereference(bp->cnic_ops);
2649 c_ops->cnic_handler(bp->cnic_data, NULL);
2653 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2658 /* end of slow path */
2662 /****************************************************************************
2664 ****************************************************************************/
2666 /* sum[hi:lo] += add[hi:lo] */
2667 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2670 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2673 /* difference = minuend - subtrahend */
2674 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2676 if (m_lo < s_lo) { \
2678 d_hi = m_hi - s_hi; \
2680 /* we can 'loan' 1 */ \
2682 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2684 /* m_hi <= s_hi */ \
2689 /* m_lo >= s_lo */ \
2690 if (m_hi < s_hi) { \
2694 /* m_hi >= s_hi */ \
2695 d_hi = m_hi - s_hi; \
2696 d_lo = m_lo - s_lo; \
2701 #define UPDATE_STAT64(s, t) \
2703 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2704 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2705 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2706 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2707 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2708 pstats->mac_stx[1].t##_lo, diff.lo); \
2711 #define UPDATE_STAT64_NIG(s, t) \
2713 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2714 diff.lo, new->s##_lo, old->s##_lo); \
2715 ADD_64(estats->t##_hi, diff.hi, \
2716 estats->t##_lo, diff.lo); \
2719 /* sum[hi:lo] += add */
2720 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2723 s_hi += (s_lo < a) ? 1 : 0; \
2726 #define UPDATE_EXTEND_STAT(s) \
2728 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2729 pstats->mac_stx[1].s##_lo, \
2733 #define UPDATE_EXTEND_TSTAT(s, t) \
2735 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
2736 old_tclient->s = tclient->s; \
2737 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2740 #define UPDATE_EXTEND_USTAT(s, t) \
2742 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2743 old_uclient->s = uclient->s; \
2744 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2747 #define UPDATE_EXTEND_XSTAT(s, t) \
2749 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
2750 old_xclient->s = xclient->s; \
2751 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2754 /* minuend -= subtrahend */
2755 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2757 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2760 /* minuend[hi:lo] -= subtrahend */
2761 #define SUB_EXTEND_64(m_hi, m_lo, s) \
2763 SUB_64(m_hi, 0, m_lo, s); \
2766 #define SUB_EXTEND_USTAT(s, t) \
2768 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2769 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2773 * General service functions
2776 static inline long bnx2x_hilo(u32 *hiref)
2778 u32 lo = *(hiref + 1);
2779 #if (BITS_PER_LONG == 64)
2782 return HILO_U64(hi, lo);
2789 * Init service functions
2792 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2794 if (!bp->stats_pending) {
2795 struct eth_query_ramrod_data ramrod_data = {0};
2798 ramrod_data.drv_counter = bp->stats_counter++;
2799 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
2800 for_each_queue(bp, i)
2801 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
2803 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2804 ((u32 *)&ramrod_data)[1],
2805 ((u32 *)&ramrod_data)[0], 0);
2807 /* stats ramrod has it's own slot on the spq */
2809 bp->stats_pending = 1;
2814 static void bnx2x_hw_stats_post(struct bnx2x *bp)
2816 struct dmae_command *dmae = &bp->stats_dmae;
2817 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2819 *stats_comp = DMAE_COMP_VAL;
2820 if (CHIP_REV_IS_SLOW(bp))
2824 if (bp->executer_idx) {
2825 int loader_idx = PMF_DMAE_C(bp);
2827 memset(dmae, 0, sizeof(struct dmae_command));
2829 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2830 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2831 DMAE_CMD_DST_RESET |
2833 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2835 DMAE_CMD_ENDIANITY_DW_SWAP |
2837 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
2839 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
2840 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
2841 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
2842 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
2843 sizeof(struct dmae_command) *
2844 (loader_idx + 1)) >> 2;
2845 dmae->dst_addr_hi = 0;
2846 dmae->len = sizeof(struct dmae_command) >> 2;
2849 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
2850 dmae->comp_addr_hi = 0;
2854 bnx2x_post_dmae(bp, dmae, loader_idx);
2856 } else if (bp->func_stx) {
2858 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
2862 static int bnx2x_stats_comp(struct bnx2x *bp)
2864 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2868 while (*stats_comp != DMAE_COMP_VAL) {
2870 BNX2X_ERR("timeout waiting for stats finished\n");
2880 * Statistics service functions
2883 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
2885 struct dmae_command *dmae;
2887 int loader_idx = PMF_DMAE_C(bp);
2888 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2891 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
2892 BNX2X_ERR("BUG!\n");
2896 bp->executer_idx = 0;
2898 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2900 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2902 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2904 DMAE_CMD_ENDIANITY_DW_SWAP |
2906 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
2907 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
2909 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2910 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
2911 dmae->src_addr_lo = bp->port.port_stx >> 2;
2912 dmae->src_addr_hi = 0;
2913 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
2914 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
2915 dmae->len = DMAE_LEN32_RD_MAX;
2916 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2917 dmae->comp_addr_hi = 0;
2920 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2921 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
2922 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
2923 dmae->src_addr_hi = 0;
2924 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
2925 DMAE_LEN32_RD_MAX * 4);
2926 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
2927 DMAE_LEN32_RD_MAX * 4);
2928 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
2929 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
2930 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
2931 dmae->comp_val = DMAE_COMP_VAL;
2934 bnx2x_hw_stats_post(bp);
2935 bnx2x_stats_comp(bp);
2938 static void bnx2x_port_stats_init(struct bnx2x *bp)
2940 struct dmae_command *dmae;
2941 int port = BP_PORT(bp);
2942 int vn = BP_E1HVN(bp);
2944 int loader_idx = PMF_DMAE_C(bp);
2946 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2949 if (!bp->link_vars.link_up || !bp->port.pmf) {
2950 BNX2X_ERR("BUG!\n");
2954 bp->executer_idx = 0;
2957 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2958 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2959 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2961 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2963 DMAE_CMD_ENDIANITY_DW_SWAP |
2965 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
2966 (vn << DMAE_CMD_E1HVN_SHIFT));
2968 if (bp->port.port_stx) {
2970 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2971 dmae->opcode = opcode;
2972 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
2973 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
2974 dmae->dst_addr_lo = bp->port.port_stx >> 2;
2975 dmae->dst_addr_hi = 0;
2976 dmae->len = sizeof(struct host_port_stats) >> 2;
2977 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2978 dmae->comp_addr_hi = 0;
2984 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2985 dmae->opcode = opcode;
2986 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
2987 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
2988 dmae->dst_addr_lo = bp->func_stx >> 2;
2989 dmae->dst_addr_hi = 0;
2990 dmae->len = sizeof(struct host_func_stats) >> 2;
2991 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2992 dmae->comp_addr_hi = 0;
2997 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2998 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2999 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3001 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3003 DMAE_CMD_ENDIANITY_DW_SWAP |
3005 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3006 (vn << DMAE_CMD_E1HVN_SHIFT));
3008 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3010 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3011 NIG_REG_INGRESS_BMAC0_MEM);
3013 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3014 BIGMAC_REGISTER_TX_STAT_GTBYT */
3015 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3016 dmae->opcode = opcode;
3017 dmae->src_addr_lo = (mac_addr +
3018 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3019 dmae->src_addr_hi = 0;
3020 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3021 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3022 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3023 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3024 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3025 dmae->comp_addr_hi = 0;
3028 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3029 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3030 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3031 dmae->opcode = opcode;
3032 dmae->src_addr_lo = (mac_addr +
3033 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3034 dmae->src_addr_hi = 0;
3035 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3036 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3037 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3038 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3039 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3040 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3041 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3042 dmae->comp_addr_hi = 0;
3045 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3047 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3049 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3050 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3051 dmae->opcode = opcode;
3052 dmae->src_addr_lo = (mac_addr +
3053 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3054 dmae->src_addr_hi = 0;
3055 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3056 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3057 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3058 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3059 dmae->comp_addr_hi = 0;
3062 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3063 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3064 dmae->opcode = opcode;
3065 dmae->src_addr_lo = (mac_addr +
3066 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3067 dmae->src_addr_hi = 0;
3068 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3069 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3070 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3071 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3073 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3074 dmae->comp_addr_hi = 0;
3077 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3078 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3079 dmae->opcode = opcode;
3080 dmae->src_addr_lo = (mac_addr +
3081 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3082 dmae->src_addr_hi = 0;
3083 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3084 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3085 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3086 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3087 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3088 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3089 dmae->comp_addr_hi = 0;
3094 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3095 dmae->opcode = opcode;
3096 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3097 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3098 dmae->src_addr_hi = 0;
3099 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3100 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3101 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3102 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3103 dmae->comp_addr_hi = 0;
3106 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3107 dmae->opcode = opcode;
3108 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3109 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3110 dmae->src_addr_hi = 0;
3111 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3112 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3113 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3114 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3115 dmae->len = (2*sizeof(u32)) >> 2;
3116 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3117 dmae->comp_addr_hi = 0;
3120 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3121 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3122 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3123 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3125 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3127 DMAE_CMD_ENDIANITY_DW_SWAP |
3129 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3130 (vn << DMAE_CMD_E1HVN_SHIFT));
3131 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3132 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3133 dmae->src_addr_hi = 0;
3134 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3135 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3136 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3137 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3138 dmae->len = (2*sizeof(u32)) >> 2;
3139 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3140 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3141 dmae->comp_val = DMAE_COMP_VAL;
3146 static void bnx2x_func_stats_init(struct bnx2x *bp)
3148 struct dmae_command *dmae = &bp->stats_dmae;
3149 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3152 if (!bp->func_stx) {
3153 BNX2X_ERR("BUG!\n");
3157 bp->executer_idx = 0;
3158 memset(dmae, 0, sizeof(struct dmae_command));
3160 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3161 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3162 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3164 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3166 DMAE_CMD_ENDIANITY_DW_SWAP |
3168 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3169 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3170 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3171 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3172 dmae->dst_addr_lo = bp->func_stx >> 2;
3173 dmae->dst_addr_hi = 0;
3174 dmae->len = sizeof(struct host_func_stats) >> 2;
3175 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3176 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3177 dmae->comp_val = DMAE_COMP_VAL;
3182 static void bnx2x_stats_start(struct bnx2x *bp)
3185 bnx2x_port_stats_init(bp);
3187 else if (bp->func_stx)
3188 bnx2x_func_stats_init(bp);
3190 bnx2x_hw_stats_post(bp);
3191 bnx2x_storm_stats_post(bp);
3194 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3196 bnx2x_stats_comp(bp);
3197 bnx2x_stats_pmf_update(bp);
3198 bnx2x_stats_start(bp);
3201 static void bnx2x_stats_restart(struct bnx2x *bp)
3203 bnx2x_stats_comp(bp);
3204 bnx2x_stats_start(bp);
3207 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3209 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3210 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3211 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3217 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3218 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3219 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3220 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3221 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3222 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3223 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3224 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3225 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3226 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3227 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3228 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3229 UPDATE_STAT64(tx_stat_gt127,
3230 tx_stat_etherstatspkts65octetsto127octets);
3231 UPDATE_STAT64(tx_stat_gt255,
3232 tx_stat_etherstatspkts128octetsto255octets);
3233 UPDATE_STAT64(tx_stat_gt511,
3234 tx_stat_etherstatspkts256octetsto511octets);
3235 UPDATE_STAT64(tx_stat_gt1023,
3236 tx_stat_etherstatspkts512octetsto1023octets);
3237 UPDATE_STAT64(tx_stat_gt1518,
3238 tx_stat_etherstatspkts1024octetsto1522octets);
3239 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3240 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3241 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3242 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3243 UPDATE_STAT64(tx_stat_gterr,
3244 tx_stat_dot3statsinternalmactransmiterrors);
3245 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3247 estats->pause_frames_received_hi =
3248 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3249 estats->pause_frames_received_lo =
3250 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3252 estats->pause_frames_sent_hi =
3253 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3254 estats->pause_frames_sent_lo =
3255 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3258 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3260 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3261 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3262 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3264 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3265 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3266 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3267 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3268 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3269 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3270 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3271 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3272 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3273 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3274 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3275 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3276 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3277 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3278 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3279 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3280 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3281 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3282 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3283 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3284 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3285 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3286 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3287 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3288 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3289 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3290 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3291 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3292 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3293 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3294 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3296 estats->pause_frames_received_hi =
3297 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3298 estats->pause_frames_received_lo =
3299 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3300 ADD_64(estats->pause_frames_received_hi,
3301 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3302 estats->pause_frames_received_lo,
3303 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3305 estats->pause_frames_sent_hi =
3306 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3307 estats->pause_frames_sent_lo =
3308 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3309 ADD_64(estats->pause_frames_sent_hi,
3310 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3311 estats->pause_frames_sent_lo,
3312 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3315 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3317 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3318 struct nig_stats *old = &(bp->port.old_nig_stats);
3319 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3320 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3326 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3327 bnx2x_bmac_stats_update(bp);
3329 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3330 bnx2x_emac_stats_update(bp);
3332 else { /* unreached */
3333 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3337 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3338 new->brb_discard - old->brb_discard);
3339 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3340 new->brb_truncate - old->brb_truncate);
3342 UPDATE_STAT64_NIG(egress_mac_pkt0,
3343 etherstatspkts1024octetsto1522octets);
3344 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3346 memcpy(old, new, sizeof(struct nig_stats));
3348 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3349 sizeof(struct mac_stx));
3350 estats->brb_drop_hi = pstats->brb_drop_hi;
3351 estats->brb_drop_lo = pstats->brb_drop_lo;
3353 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3355 if (!BP_NOMCP(bp)) {
3357 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3358 if (nig_timer_max != estats->nig_timer_max) {
3359 estats->nig_timer_max = nig_timer_max;
3360 BNX2X_ERR("NIG timer max (%u)\n",
3361 estats->nig_timer_max);
3368 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3370 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3371 struct tstorm_per_port_stats *tport =
3372 &stats->tstorm_common.port_statistics;
3373 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3374 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3377 memcpy(&(fstats->total_bytes_received_hi),
3378 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3379 sizeof(struct host_func_stats) - 2*sizeof(u32));
3380 estats->error_bytes_received_hi = 0;
3381 estats->error_bytes_received_lo = 0;
3382 estats->etherstatsoverrsizepkts_hi = 0;
3383 estats->etherstatsoverrsizepkts_lo = 0;
3384 estats->no_buff_discard_hi = 0;
3385 estats->no_buff_discard_lo = 0;
3387 for_each_queue(bp, i) {
3388 struct bnx2x_fastpath *fp = &bp->fp[i];
3389 int cl_id = fp->cl_id;
3390 struct tstorm_per_client_stats *tclient =
3391 &stats->tstorm_common.client_statistics[cl_id];
3392 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3393 struct ustorm_per_client_stats *uclient =
3394 &stats->ustorm_common.client_statistics[cl_id];
3395 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3396 struct xstorm_per_client_stats *xclient =
3397 &stats->xstorm_common.client_statistics[cl_id];
3398 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3399 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3402 /* are storm stats valid? */
3403 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3404 bp->stats_counter) {
3405 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3406 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
3407 i, xclient->stats_counter, bp->stats_counter);
3410 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3411 bp->stats_counter) {
3412 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3413 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
3414 i, tclient->stats_counter, bp->stats_counter);
3417 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3418 bp->stats_counter) {
3419 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3420 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
3421 i, uclient->stats_counter, bp->stats_counter);
3425 qstats->total_bytes_received_hi =
3426 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3427 qstats->total_bytes_received_lo =
3428 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3430 ADD_64(qstats->total_bytes_received_hi,
3431 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3432 qstats->total_bytes_received_lo,
3433 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3435 ADD_64(qstats->total_bytes_received_hi,
3436 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3437 qstats->total_bytes_received_lo,
3438 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3440 SUB_64(qstats->total_bytes_received_hi,
3441 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
3442 qstats->total_bytes_received_lo,
3443 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
3445 SUB_64(qstats->total_bytes_received_hi,
3446 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
3447 qstats->total_bytes_received_lo,
3448 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
3450 SUB_64(qstats->total_bytes_received_hi,
3451 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
3452 qstats->total_bytes_received_lo,
3453 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
3455 qstats->valid_bytes_received_hi =
3456 qstats->total_bytes_received_hi;
3457 qstats->valid_bytes_received_lo =
3458 qstats->total_bytes_received_lo;
3460 qstats->error_bytes_received_hi =
3461 le32_to_cpu(tclient->rcv_error_bytes.hi);
3462 qstats->error_bytes_received_lo =
3463 le32_to_cpu(tclient->rcv_error_bytes.lo);
3465 ADD_64(qstats->total_bytes_received_hi,
3466 qstats->error_bytes_received_hi,
3467 qstats->total_bytes_received_lo,
3468 qstats->error_bytes_received_lo);
3470 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3471 total_unicast_packets_received);
3472 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3473 total_multicast_packets_received);
3474 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3475 total_broadcast_packets_received);
3476 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3477 etherstatsoverrsizepkts);
3478 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3480 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3481 total_unicast_packets_received);
3482 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3483 total_multicast_packets_received);
3484 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3485 total_broadcast_packets_received);
3486 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3487 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3488 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3490 qstats->total_bytes_transmitted_hi =
3491 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3492 qstats->total_bytes_transmitted_lo =
3493 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3495 ADD_64(qstats->total_bytes_transmitted_hi,
3496 le32_to_cpu(xclient->multicast_bytes_sent.hi),
3497 qstats->total_bytes_transmitted_lo,
3498 le32_to_cpu(xclient->multicast_bytes_sent.lo));
3500 ADD_64(qstats->total_bytes_transmitted_hi,
3501 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
3502 qstats->total_bytes_transmitted_lo,
3503 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
3505 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3506 total_unicast_packets_transmitted);
3507 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3508 total_multicast_packets_transmitted);
3509 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3510 total_broadcast_packets_transmitted);
3512 old_tclient->checksum_discard = tclient->checksum_discard;
3513 old_tclient->ttl0_discard = tclient->ttl0_discard;
3515 ADD_64(fstats->total_bytes_received_hi,
3516 qstats->total_bytes_received_hi,
3517 fstats->total_bytes_received_lo,
3518 qstats->total_bytes_received_lo);
3519 ADD_64(fstats->total_bytes_transmitted_hi,
3520 qstats->total_bytes_transmitted_hi,
3521 fstats->total_bytes_transmitted_lo,
3522 qstats->total_bytes_transmitted_lo);
3523 ADD_64(fstats->total_unicast_packets_received_hi,
3524 qstats->total_unicast_packets_received_hi,
3525 fstats->total_unicast_packets_received_lo,
3526 qstats->total_unicast_packets_received_lo);
3527 ADD_64(fstats->total_multicast_packets_received_hi,
3528 qstats->total_multicast_packets_received_hi,
3529 fstats->total_multicast_packets_received_lo,
3530 qstats->total_multicast_packets_received_lo);
3531 ADD_64(fstats->total_broadcast_packets_received_hi,
3532 qstats->total_broadcast_packets_received_hi,
3533 fstats->total_broadcast_packets_received_lo,
3534 qstats->total_broadcast_packets_received_lo);
3535 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3536 qstats->total_unicast_packets_transmitted_hi,
3537 fstats->total_unicast_packets_transmitted_lo,
3538 qstats->total_unicast_packets_transmitted_lo);
3539 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3540 qstats->total_multicast_packets_transmitted_hi,
3541 fstats->total_multicast_packets_transmitted_lo,
3542 qstats->total_multicast_packets_transmitted_lo);
3543 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3544 qstats->total_broadcast_packets_transmitted_hi,
3545 fstats->total_broadcast_packets_transmitted_lo,
3546 qstats->total_broadcast_packets_transmitted_lo);
3547 ADD_64(fstats->valid_bytes_received_hi,
3548 qstats->valid_bytes_received_hi,
3549 fstats->valid_bytes_received_lo,
3550 qstats->valid_bytes_received_lo);
3552 ADD_64(estats->error_bytes_received_hi,
3553 qstats->error_bytes_received_hi,
3554 estats->error_bytes_received_lo,
3555 qstats->error_bytes_received_lo);
3556 ADD_64(estats->etherstatsoverrsizepkts_hi,
3557 qstats->etherstatsoverrsizepkts_hi,
3558 estats->etherstatsoverrsizepkts_lo,
3559 qstats->etherstatsoverrsizepkts_lo);
3560 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3561 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3564 ADD_64(fstats->total_bytes_received_hi,
3565 estats->rx_stat_ifhcinbadoctets_hi,
3566 fstats->total_bytes_received_lo,
3567 estats->rx_stat_ifhcinbadoctets_lo);
3569 memcpy(estats, &(fstats->total_bytes_received_hi),
3570 sizeof(struct host_func_stats) - 2*sizeof(u32));
3572 ADD_64(estats->etherstatsoverrsizepkts_hi,
3573 estats->rx_stat_dot3statsframestoolong_hi,
3574 estats->etherstatsoverrsizepkts_lo,
3575 estats->rx_stat_dot3statsframestoolong_lo);
3576 ADD_64(estats->error_bytes_received_hi,
3577 estats->rx_stat_ifhcinbadoctets_hi,
3578 estats->error_bytes_received_lo,
3579 estats->rx_stat_ifhcinbadoctets_lo);
3582 estats->mac_filter_discard =
3583 le32_to_cpu(tport->mac_filter_discard);
3584 estats->xxoverflow_discard =
3585 le32_to_cpu(tport->xxoverflow_discard);
3586 estats->brb_truncate_discard =
3587 le32_to_cpu(tport->brb_truncate_discard);
3588 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3591 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3593 bp->stats_pending = 0;
3598 static void bnx2x_net_stats_update(struct bnx2x *bp)
3600 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3601 struct net_device_stats *nstats = &bp->dev->stats;
3604 nstats->rx_packets =
3605 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3606 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3607 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3609 nstats->tx_packets =
3610 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3611 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3612 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3614 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3616 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3618 nstats->rx_dropped = estats->mac_discard;
3619 for_each_queue(bp, i)
3620 nstats->rx_dropped +=
3621 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3623 nstats->tx_dropped = 0;
3626 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3628 nstats->collisions =
3629 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3631 nstats->rx_length_errors =
3632 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3633 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3634 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3635 bnx2x_hilo(&estats->brb_truncate_hi);
3636 nstats->rx_crc_errors =
3637 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3638 nstats->rx_frame_errors =
3639 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3640 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3641 nstats->rx_missed_errors = estats->xxoverflow_discard;
3643 nstats->rx_errors = nstats->rx_length_errors +
3644 nstats->rx_over_errors +
3645 nstats->rx_crc_errors +
3646 nstats->rx_frame_errors +
3647 nstats->rx_fifo_errors +
3648 nstats->rx_missed_errors;
3650 nstats->tx_aborted_errors =
3651 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3652 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3653 nstats->tx_carrier_errors =
3654 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3655 nstats->tx_fifo_errors = 0;
3656 nstats->tx_heartbeat_errors = 0;
3657 nstats->tx_window_errors = 0;
3659 nstats->tx_errors = nstats->tx_aborted_errors +
3660 nstats->tx_carrier_errors +
3661 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3664 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3666 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3669 estats->driver_xoff = 0;
3670 estats->rx_err_discard_pkt = 0;
3671 estats->rx_skb_alloc_failed = 0;
3672 estats->hw_csum_err = 0;
3673 for_each_queue(bp, i) {
3674 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3676 estats->driver_xoff += qstats->driver_xoff;
3677 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3678 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3679 estats->hw_csum_err += qstats->hw_csum_err;
3683 static void bnx2x_stats_update(struct bnx2x *bp)
3685 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3687 if (*stats_comp != DMAE_COMP_VAL)
3691 bnx2x_hw_stats_update(bp);
3693 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3694 BNX2X_ERR("storm stats were not updated for 3 times\n");
3699 bnx2x_net_stats_update(bp);
3700 bnx2x_drv_stats_update(bp);
3702 if (netif_msg_timer(bp)) {
3703 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3706 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
3708 estats->brb_drop_lo, estats->brb_truncate_lo);
3710 for_each_queue(bp, i) {
3711 struct bnx2x_fastpath *fp = &bp->fp[i];
3712 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3714 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
3715 " rx pkt(%lu) rx calls(%lu %lu)\n",
3716 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
3718 le16_to_cpu(*fp->rx_cons_sb),
3719 bnx2x_hilo(&qstats->
3720 total_unicast_packets_received_hi),
3721 fp->rx_calls, fp->rx_pkt);
3724 for_each_queue(bp, i) {
3725 struct bnx2x_fastpath *fp = &bp->fp[i];
3726 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3727 struct netdev_queue *txq =
3728 netdev_get_tx_queue(bp->dev, i);
3730 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
3731 " tx pkt(%lu) tx calls (%lu)"
3732 " %s (Xoff events %u)\n",
3733 fp->name, bnx2x_tx_avail(fp),
3734 le16_to_cpu(*fp->tx_cons_sb),
3735 bnx2x_hilo(&qstats->
3736 total_unicast_packets_transmitted_hi),
3738 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
3739 qstats->driver_xoff);
3743 bnx2x_hw_stats_post(bp);
3744 bnx2x_storm_stats_post(bp);
3747 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3749 struct dmae_command *dmae;
3751 int loader_idx = PMF_DMAE_C(bp);
3752 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3754 bp->executer_idx = 0;
3756 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3758 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3760 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3762 DMAE_CMD_ENDIANITY_DW_SWAP |
3764 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3765 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3767 if (bp->port.port_stx) {
3769 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3771 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3773 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3774 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3775 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3776 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3777 dmae->dst_addr_hi = 0;
3778 dmae->len = sizeof(struct host_port_stats) >> 2;
3780 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3781 dmae->comp_addr_hi = 0;
3784 dmae->comp_addr_lo =
3785 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3786 dmae->comp_addr_hi =
3787 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3788 dmae->comp_val = DMAE_COMP_VAL;
3796 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3797 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3798 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3799 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3800 dmae->dst_addr_lo = bp->func_stx >> 2;
3801 dmae->dst_addr_hi = 0;
3802 dmae->len = sizeof(struct host_func_stats) >> 2;
3803 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3804 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3805 dmae->comp_val = DMAE_COMP_VAL;
3811 static void bnx2x_stats_stop(struct bnx2x *bp)
3815 bnx2x_stats_comp(bp);
3818 update = (bnx2x_hw_stats_update(bp) == 0);
3820 update |= (bnx2x_storm_stats_update(bp) == 0);
3823 bnx2x_net_stats_update(bp);
3826 bnx2x_port_stats_stop(bp);
3828 bnx2x_hw_stats_post(bp);
3829 bnx2x_stats_comp(bp);
3833 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3837 static const struct {
3838 void (*action)(struct bnx2x *bp);
3839 enum bnx2x_stats_state next_state;
3840 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3843 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3844 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3845 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3846 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3849 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3850 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3851 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3852 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3856 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3858 enum bnx2x_stats_state state = bp->stats_state;
3860 if (unlikely(bp->panic))
3863 bnx2x_stats_stm[state][event].action(bp);
3864 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3866 /* Make sure the state has been "changed" */
3869 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
3870 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3871 state, event, bp->stats_state);
3874 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
3876 struct dmae_command *dmae;
3877 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3880 if (!bp->port.pmf || !bp->port.port_stx) {
3881 BNX2X_ERR("BUG!\n");
3885 bp->executer_idx = 0;
3887 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3888 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3889 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3890 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3892 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3894 DMAE_CMD_ENDIANITY_DW_SWAP |
3896 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3897 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3898 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3899 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3900 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3901 dmae->dst_addr_hi = 0;
3902 dmae->len = sizeof(struct host_port_stats) >> 2;
3903 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3904 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3905 dmae->comp_val = DMAE_COMP_VAL;
3908 bnx2x_hw_stats_post(bp);
3909 bnx2x_stats_comp(bp);
3912 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
3914 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
3915 int port = BP_PORT(bp);
3920 if (!bp->port.pmf || !bp->func_stx) {
3921 BNX2X_ERR("BUG!\n");
3925 /* save our func_stx */
3926 func_stx = bp->func_stx;
3928 for (vn = VN_0; vn < vn_max; vn++) {
3931 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
3932 bnx2x_func_stats_init(bp);
3933 bnx2x_hw_stats_post(bp);
3934 bnx2x_stats_comp(bp);
3937 /* restore our func_stx */
3938 bp->func_stx = func_stx;
3941 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
3943 struct dmae_command *dmae = &bp->stats_dmae;
3944 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3947 if (!bp->func_stx) {
3948 BNX2X_ERR("BUG!\n");
3952 bp->executer_idx = 0;
3953 memset(dmae, 0, sizeof(struct dmae_command));
3955 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3956 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3957 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3959 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3961 DMAE_CMD_ENDIANITY_DW_SWAP |
3963 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3964 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3965 dmae->src_addr_lo = bp->func_stx >> 2;
3966 dmae->src_addr_hi = 0;
3967 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
3968 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
3969 dmae->len = sizeof(struct host_func_stats) >> 2;
3970 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3971 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3972 dmae->comp_val = DMAE_COMP_VAL;
3975 bnx2x_hw_stats_post(bp);
3976 bnx2x_stats_comp(bp);
3979 static void bnx2x_stats_init(struct bnx2x *bp)
3981 int port = BP_PORT(bp);
3982 int func = BP_FUNC(bp);
3985 bp->stats_pending = 0;
3986 bp->executer_idx = 0;
3987 bp->stats_counter = 0;
3989 /* port and func stats for management */
3990 if (!BP_NOMCP(bp)) {
3991 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3992 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
3995 bp->port.port_stx = 0;
3998 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
3999 bp->port.port_stx, bp->func_stx);
4002 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4003 bp->port.old_nig_stats.brb_discard =
4004 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4005 bp->port.old_nig_stats.brb_truncate =
4006 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4007 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4008 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4009 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4010 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4012 /* function stats */
4013 for_each_queue(bp, i) {
4014 struct bnx2x_fastpath *fp = &bp->fp[i];
4016 memset(&fp->old_tclient, 0,
4017 sizeof(struct tstorm_per_client_stats));
4018 memset(&fp->old_uclient, 0,
4019 sizeof(struct ustorm_per_client_stats));
4020 memset(&fp->old_xclient, 0,
4021 sizeof(struct xstorm_per_client_stats));
4022 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4025 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4026 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4028 bp->stats_state = STATS_STATE_DISABLED;
4031 if (bp->port.port_stx)
4032 bnx2x_port_stats_base_init(bp);
4035 bnx2x_func_stats_base_init(bp);
4037 } else if (bp->func_stx)
4038 bnx2x_func_stats_base_update(bp);
4041 static void bnx2x_timer(unsigned long data)
4043 struct bnx2x *bp = (struct bnx2x *) data;
4045 if (!netif_running(bp->dev))
4048 if (atomic_read(&bp->intr_sem) != 0)
4052 struct bnx2x_fastpath *fp = &bp->fp[0];
4056 rc = bnx2x_rx_int(fp, 1000);
4059 if (!BP_NOMCP(bp)) {
4060 int func = BP_FUNC(bp);
4064 ++bp->fw_drv_pulse_wr_seq;
4065 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4066 /* TBD - add SYSTEM_TIME */
4067 drv_pulse = bp->fw_drv_pulse_wr_seq;
4068 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4070 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4071 MCP_PULSE_SEQ_MASK);
4072 /* The delta between driver pulse and mcp response
4073 * should be 1 (before mcp response) or 0 (after mcp response)
4075 if ((drv_pulse != mcp_pulse) &&
4076 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4077 /* someone lost a heartbeat... */
4078 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4079 drv_pulse, mcp_pulse);
4083 if (bp->state == BNX2X_STATE_OPEN)
4084 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4087 mod_timer(&bp->timer, jiffies + bp->current_interval);
4090 /* end of Statistics */
4095 * nic init service functions
4098 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4100 int port = BP_PORT(bp);
4103 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4104 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4105 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4106 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4107 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4108 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4111 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4112 dma_addr_t mapping, int sb_id)
4114 int port = BP_PORT(bp);
4115 int func = BP_FUNC(bp);
4120 section = ((u64)mapping) + offsetof(struct host_status_block,
4122 sb->u_status_block.status_block_id = sb_id;
4124 REG_WR(bp, BAR_CSTRORM_INTMEM +
4125 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4126 REG_WR(bp, BAR_CSTRORM_INTMEM +
4127 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4129 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4130 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4132 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4133 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4134 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4137 section = ((u64)mapping) + offsetof(struct host_status_block,
4139 sb->c_status_block.status_block_id = sb_id;
4141 REG_WR(bp, BAR_CSTRORM_INTMEM +
4142 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4143 REG_WR(bp, BAR_CSTRORM_INTMEM +
4144 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4146 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4147 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4149 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4150 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4151 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4153 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4156 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4158 int func = BP_FUNC(bp);
4160 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4161 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4162 sizeof(struct tstorm_def_status_block)/4);
4163 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4164 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4165 sizeof(struct cstorm_def_status_block_u)/4);
4166 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4167 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4168 sizeof(struct cstorm_def_status_block_c)/4);
4169 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4170 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4171 sizeof(struct xstorm_def_status_block)/4);
4174 static void bnx2x_init_def_sb(struct bnx2x *bp,
4175 struct host_def_status_block *def_sb,
4176 dma_addr_t mapping, int sb_id)
4178 int port = BP_PORT(bp);
4179 int func = BP_FUNC(bp);
4180 int index, val, reg_offset;
4184 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4185 atten_status_block);
4186 def_sb->atten_status_block.status_block_id = sb_id;
4190 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4191 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4193 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4194 bp->attn_group[index].sig[0] = REG_RD(bp,
4195 reg_offset + 0x10*index);
4196 bp->attn_group[index].sig[1] = REG_RD(bp,
4197 reg_offset + 0x4 + 0x10*index);
4198 bp->attn_group[index].sig[2] = REG_RD(bp,
4199 reg_offset + 0x8 + 0x10*index);
4200 bp->attn_group[index].sig[3] = REG_RD(bp,
4201 reg_offset + 0xc + 0x10*index);
4204 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4205 HC_REG_ATTN_MSG0_ADDR_L);
4207 REG_WR(bp, reg_offset, U64_LO(section));
4208 REG_WR(bp, reg_offset + 4, U64_HI(section));
4210 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4212 val = REG_RD(bp, reg_offset);
4214 REG_WR(bp, reg_offset, val);
4217 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4218 u_def_status_block);
4219 def_sb->u_def_status_block.status_block_id = sb_id;
4221 REG_WR(bp, BAR_CSTRORM_INTMEM +
4222 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4223 REG_WR(bp, BAR_CSTRORM_INTMEM +
4224 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4226 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4227 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4229 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4230 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4231 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4234 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4235 c_def_status_block);
4236 def_sb->c_def_status_block.status_block_id = sb_id;
4238 REG_WR(bp, BAR_CSTRORM_INTMEM +
4239 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4240 REG_WR(bp, BAR_CSTRORM_INTMEM +
4241 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4243 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4244 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4246 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4247 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4248 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4251 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4252 t_def_status_block);
4253 def_sb->t_def_status_block.status_block_id = sb_id;
4255 REG_WR(bp, BAR_TSTRORM_INTMEM +
4256 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4257 REG_WR(bp, BAR_TSTRORM_INTMEM +
4258 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4260 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4261 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4263 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4264 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4265 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4268 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4269 x_def_status_block);
4270 def_sb->x_def_status_block.status_block_id = sb_id;
4272 REG_WR(bp, BAR_XSTRORM_INTMEM +
4273 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4274 REG_WR(bp, BAR_XSTRORM_INTMEM +
4275 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4277 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4278 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4280 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4281 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4282 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4284 bp->stats_pending = 0;
4285 bp->set_mac_pending = 0;
4287 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4290 void bnx2x_update_coalesce(struct bnx2x *bp)
4292 int port = BP_PORT(bp);
4295 for_each_queue(bp, i) {
4296 int sb_id = bp->fp[i].sb_id;
4298 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4299 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4300 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4301 U_SB_ETH_RX_CQ_INDEX),
4302 bp->rx_ticks/(4 * BNX2X_BTR));
4303 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4304 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4305 U_SB_ETH_RX_CQ_INDEX),
4306 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4308 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4309 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4310 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4311 C_SB_ETH_TX_CQ_INDEX),
4312 bp->tx_ticks/(4 * BNX2X_BTR));
4313 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4314 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4315 C_SB_ETH_TX_CQ_INDEX),
4316 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4320 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4322 int func = BP_FUNC(bp);
4324 spin_lock_init(&bp->spq_lock);
4326 bp->spq_left = MAX_SPQ_PENDING;
4327 bp->spq_prod_idx = 0;
4328 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4329 bp->spq_prod_bd = bp->spq;
4330 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4332 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4333 U64_LO(bp->spq_mapping));
4335 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4336 U64_HI(bp->spq_mapping));
4338 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4342 static void bnx2x_init_context(struct bnx2x *bp)
4347 for_each_queue(bp, i) {
4348 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4349 struct bnx2x_fastpath *fp = &bp->fp[i];
4350 u8 cl_id = fp->cl_id;
4352 context->ustorm_st_context.common.sb_index_numbers =
4353 BNX2X_RX_SB_INDEX_NUM;
4354 context->ustorm_st_context.common.clientId = cl_id;
4355 context->ustorm_st_context.common.status_block_id = fp->sb_id;
4356 context->ustorm_st_context.common.flags =
4357 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4358 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4359 context->ustorm_st_context.common.statistics_counter_id =
4361 context->ustorm_st_context.common.mc_alignment_log_size =
4362 BNX2X_RX_ALIGN_SHIFT;
4363 context->ustorm_st_context.common.bd_buff_size =
4365 context->ustorm_st_context.common.bd_page_base_hi =
4366 U64_HI(fp->rx_desc_mapping);
4367 context->ustorm_st_context.common.bd_page_base_lo =
4368 U64_LO(fp->rx_desc_mapping);
4369 if (!fp->disable_tpa) {
4370 context->ustorm_st_context.common.flags |=
4371 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
4372 context->ustorm_st_context.common.sge_buff_size =
4373 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
4375 context->ustorm_st_context.common.sge_page_base_hi =
4376 U64_HI(fp->rx_sge_mapping);
4377 context->ustorm_st_context.common.sge_page_base_lo =
4378 U64_LO(fp->rx_sge_mapping);
4380 context->ustorm_st_context.common.max_sges_for_packet =
4381 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
4382 context->ustorm_st_context.common.max_sges_for_packet =
4383 ((context->ustorm_st_context.common.
4384 max_sges_for_packet + PAGES_PER_SGE - 1) &
4385 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
4388 context->ustorm_ag_context.cdu_usage =
4389 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4390 CDU_REGION_NUMBER_UCM_AG,
4391 ETH_CONNECTION_TYPE);
4393 context->xstorm_ag_context.cdu_reserved =
4394 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4395 CDU_REGION_NUMBER_XCM_AG,
4396 ETH_CONNECTION_TYPE);
4400 for_each_queue(bp, i) {
4401 struct bnx2x_fastpath *fp = &bp->fp[i];
4402 struct eth_context *context =
4403 bnx2x_sp(bp, context[i].eth);
4405 context->cstorm_st_context.sb_index_number =
4406 C_SB_ETH_TX_CQ_INDEX;
4407 context->cstorm_st_context.status_block_id = fp->sb_id;
4409 context->xstorm_st_context.tx_bd_page_base_hi =
4410 U64_HI(fp->tx_desc_mapping);
4411 context->xstorm_st_context.tx_bd_page_base_lo =
4412 U64_LO(fp->tx_desc_mapping);
4413 context->xstorm_st_context.statistics_data = (fp->cl_id |
4414 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4418 static void bnx2x_init_ind_table(struct bnx2x *bp)
4420 int func = BP_FUNC(bp);
4423 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4427 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4428 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4429 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4430 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4431 bp->fp->cl_id + (i % bp->num_queues));
4434 void bnx2x_set_client_config(struct bnx2x *bp)
4436 struct tstorm_eth_client_config tstorm_client = {0};
4437 int port = BP_PORT(bp);
4440 tstorm_client.mtu = bp->dev->mtu;
4441 tstorm_client.config_flags =
4442 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4443 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4445 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4446 tstorm_client.config_flags |=
4447 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4448 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4452 for_each_queue(bp, i) {
4453 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4455 REG_WR(bp, BAR_TSTRORM_INTMEM +
4456 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4457 ((u32 *)&tstorm_client)[0]);
4458 REG_WR(bp, BAR_TSTRORM_INTMEM +
4459 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4460 ((u32 *)&tstorm_client)[1]);
4463 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4464 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4467 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4469 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4470 int mode = bp->rx_mode;
4471 int mask = bp->rx_mode_cl_mask;
4472 int func = BP_FUNC(bp);
4473 int port = BP_PORT(bp);
4475 /* All but management unicast packets should pass to the host as well */
4477 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4478 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4479 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4480 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4482 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4485 case BNX2X_RX_MODE_NONE: /* no Rx */
4486 tstorm_mac_filter.ucast_drop_all = mask;
4487 tstorm_mac_filter.mcast_drop_all = mask;
4488 tstorm_mac_filter.bcast_drop_all = mask;
4491 case BNX2X_RX_MODE_NORMAL:
4492 tstorm_mac_filter.bcast_accept_all = mask;
4495 case BNX2X_RX_MODE_ALLMULTI:
4496 tstorm_mac_filter.mcast_accept_all = mask;
4497 tstorm_mac_filter.bcast_accept_all = mask;
4500 case BNX2X_RX_MODE_PROMISC:
4501 tstorm_mac_filter.ucast_accept_all = mask;
4502 tstorm_mac_filter.mcast_accept_all = mask;
4503 tstorm_mac_filter.bcast_accept_all = mask;
4504 /* pass management unicast packets as well */
4505 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4509 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4514 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4517 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4518 REG_WR(bp, BAR_TSTRORM_INTMEM +
4519 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4520 ((u32 *)&tstorm_mac_filter)[i]);
4522 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4523 ((u32 *)&tstorm_mac_filter)[i]); */
4526 if (mode != BNX2X_RX_MODE_NONE)
4527 bnx2x_set_client_config(bp);
4530 static void bnx2x_init_internal_common(struct bnx2x *bp)
4534 /* Zero this manually as its initialization is
4535 currently missing in the initTool */
4536 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4537 REG_WR(bp, BAR_USTRORM_INTMEM +
4538 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4541 static void bnx2x_init_internal_port(struct bnx2x *bp)
4543 int port = BP_PORT(bp);
4546 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
4548 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
4549 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4550 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4553 static void bnx2x_init_internal_func(struct bnx2x *bp)
4555 struct tstorm_eth_function_common_config tstorm_config = {0};
4556 struct stats_indication_flags stats_flags = {0};
4557 int port = BP_PORT(bp);
4558 int func = BP_FUNC(bp);
4563 tstorm_config.config_flags = RSS_FLAGS(bp);
4566 tstorm_config.rss_result_mask = MULTI_MASK;
4568 /* Enable TPA if needed */
4569 if (bp->flags & TPA_ENABLE_FLAG)
4570 tstorm_config.config_flags |=
4571 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
4574 tstorm_config.config_flags |=
4575 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4577 tstorm_config.leading_client_id = BP_L_ID(bp);
4579 REG_WR(bp, BAR_TSTRORM_INTMEM +
4580 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4581 (*(u32 *)&tstorm_config));
4583 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4584 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
4585 bnx2x_set_storm_rx_mode(bp);
4587 for_each_queue(bp, i) {
4588 u8 cl_id = bp->fp[i].cl_id;
4590 /* reset xstorm per client statistics */
4591 offset = BAR_XSTRORM_INTMEM +
4592 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4594 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4595 REG_WR(bp, offset + j*4, 0);
4597 /* reset tstorm per client statistics */
4598 offset = BAR_TSTRORM_INTMEM +
4599 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4601 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4602 REG_WR(bp, offset + j*4, 0);
4604 /* reset ustorm per client statistics */
4605 offset = BAR_USTRORM_INTMEM +
4606 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4608 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4609 REG_WR(bp, offset + j*4, 0);
4612 /* Init statistics related context */
4613 stats_flags.collect_eth = 1;
4615 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4616 ((u32 *)&stats_flags)[0]);
4617 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4618 ((u32 *)&stats_flags)[1]);
4620 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4621 ((u32 *)&stats_flags)[0]);
4622 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4623 ((u32 *)&stats_flags)[1]);
4625 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4626 ((u32 *)&stats_flags)[0]);
4627 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4628 ((u32 *)&stats_flags)[1]);
4630 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4631 ((u32 *)&stats_flags)[0]);
4632 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4633 ((u32 *)&stats_flags)[1]);
4635 REG_WR(bp, BAR_XSTRORM_INTMEM +
4636 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4637 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4638 REG_WR(bp, BAR_XSTRORM_INTMEM +
4639 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4640 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4642 REG_WR(bp, BAR_TSTRORM_INTMEM +
4643 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4644 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4645 REG_WR(bp, BAR_TSTRORM_INTMEM +
4646 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4647 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4649 REG_WR(bp, BAR_USTRORM_INTMEM +
4650 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4651 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4652 REG_WR(bp, BAR_USTRORM_INTMEM +
4653 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4654 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4656 if (CHIP_IS_E1H(bp)) {
4657 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4659 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4661 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4663 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4666 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4670 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4671 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
4672 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
4673 for_each_queue(bp, i) {
4674 struct bnx2x_fastpath *fp = &bp->fp[i];
4676 REG_WR(bp, BAR_USTRORM_INTMEM +
4677 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
4678 U64_LO(fp->rx_comp_mapping));
4679 REG_WR(bp, BAR_USTRORM_INTMEM +
4680 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
4681 U64_HI(fp->rx_comp_mapping));
4684 REG_WR(bp, BAR_USTRORM_INTMEM +
4685 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
4686 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
4687 REG_WR(bp, BAR_USTRORM_INTMEM +
4688 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
4689 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
4691 REG_WR16(bp, BAR_USTRORM_INTMEM +
4692 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
4696 /* dropless flow control */
4697 if (CHIP_IS_E1H(bp)) {
4698 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
4700 rx_pause.bd_thr_low = 250;
4701 rx_pause.cqe_thr_low = 250;
4703 rx_pause.sge_thr_low = 0;
4704 rx_pause.bd_thr_high = 350;
4705 rx_pause.cqe_thr_high = 350;
4706 rx_pause.sge_thr_high = 0;
4708 for_each_queue(bp, i) {
4709 struct bnx2x_fastpath *fp = &bp->fp[i];
4711 if (!fp->disable_tpa) {
4712 rx_pause.sge_thr_low = 150;
4713 rx_pause.sge_thr_high = 250;
4717 offset = BAR_USTRORM_INTMEM +
4718 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
4721 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
4723 REG_WR(bp, offset + j*4,
4724 ((u32 *)&rx_pause)[j]);
4728 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
4730 /* Init rate shaping and fairness contexts */
4734 /* During init there is no active link
4735 Until link is up, set link rate to 10Gbps */
4736 bp->link_vars.line_speed = SPEED_10000;
4737 bnx2x_init_port_minmax(bp);
4741 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4742 bnx2x_calc_vn_weight_sum(bp);
4744 for (vn = VN_0; vn < E1HVN_MAX; vn++)
4745 bnx2x_init_vn_minmax(bp, 2*vn + port);
4747 /* Enable rate shaping and fairness */
4748 bp->cmng.flags.cmng_enables |=
4749 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
4752 /* rate shaping and fairness are disabled */
4754 "single function mode minmax will be disabled\n");
4758 /* Store cmng structures to internal memory */
4760 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
4761 REG_WR(bp, BAR_XSTRORM_INTMEM +
4762 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
4763 ((u32 *)(&bp->cmng))[i]);
4766 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4768 switch (load_code) {
4769 case FW_MSG_CODE_DRV_LOAD_COMMON:
4770 bnx2x_init_internal_common(bp);
4773 case FW_MSG_CODE_DRV_LOAD_PORT:
4774 bnx2x_init_internal_port(bp);
4777 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4778 bnx2x_init_internal_func(bp);
4782 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4787 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4791 for_each_queue(bp, i) {
4792 struct bnx2x_fastpath *fp = &bp->fp[i];
4795 fp->state = BNX2X_FP_STATE_CLOSED;
4797 fp->cl_id = BP_L_ID(bp) + i;
4799 fp->sb_id = fp->cl_id + 1;
4801 fp->sb_id = fp->cl_id;
4804 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
4805 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
4806 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4808 bnx2x_update_fpsb_idx(fp);
4811 /* ensure status block indices were read */
4815 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4817 bnx2x_update_dsb_idx(bp);
4818 bnx2x_update_coalesce(bp);
4819 bnx2x_init_rx_rings(bp);
4820 bnx2x_init_tx_ring(bp);
4821 bnx2x_init_sp_ring(bp);
4822 bnx2x_init_context(bp);
4823 bnx2x_init_internal(bp, load_code);
4824 bnx2x_init_ind_table(bp);
4825 bnx2x_stats_init(bp);
4827 /* At this point, we are ready for interrupts */
4828 atomic_set(&bp->intr_sem, 0);
4830 /* flush all before enabling interrupts */
4834 bnx2x_int_enable(bp);
4836 /* Check for SPIO5 */
4837 bnx2x_attn_int_deasserted0(bp,
4838 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4839 AEU_INPUTS_ATTN_BITS_SPIO5);
4842 /* end of nic init */
4845 * gzip service functions
4848 static int bnx2x_gunzip_init(struct bnx2x *bp)
4850 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4851 &bp->gunzip_mapping, GFP_KERNEL);
4852 if (bp->gunzip_buf == NULL)
4855 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4856 if (bp->strm == NULL)
4859 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4861 if (bp->strm->workspace == NULL)
4871 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4872 bp->gunzip_mapping);
4873 bp->gunzip_buf = NULL;
4876 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4877 " un-compression\n");
4881 static void bnx2x_gunzip_end(struct bnx2x *bp)
4883 kfree(bp->strm->workspace);
4888 if (bp->gunzip_buf) {
4889 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4890 bp->gunzip_mapping);
4891 bp->gunzip_buf = NULL;
4895 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4899 /* check gzip header */
4900 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4901 BNX2X_ERR("Bad gzip header\n");
4909 if (zbuf[3] & FNAME)
4910 while ((zbuf[n++] != 0) && (n < len));
4912 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4913 bp->strm->avail_in = len - n;
4914 bp->strm->next_out = bp->gunzip_buf;
4915 bp->strm->avail_out = FW_BUF_SIZE;
4917 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4921 rc = zlib_inflate(bp->strm, Z_FINISH);
4922 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4923 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4926 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4927 if (bp->gunzip_outlen & 0x3)
4928 netdev_err(bp->dev, "Firmware decompression error:"
4929 " gunzip_outlen (%d) not aligned\n",
4931 bp->gunzip_outlen >>= 2;
4933 zlib_inflateEnd(bp->strm);
4935 if (rc == Z_STREAM_END)
4941 /* nic load/unload */
4944 * General service functions
4947 /* send a NIG loopback debug packet */
4948 static void bnx2x_lb_pckt(struct bnx2x *bp)
4952 /* Ethernet source and destination addresses */
4953 wb_write[0] = 0x55555555;
4954 wb_write[1] = 0x55555555;
4955 wb_write[2] = 0x20; /* SOP */
4956 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4958 /* NON-IP protocol */
4959 wb_write[0] = 0x09000000;
4960 wb_write[1] = 0x55555555;
4961 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4962 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4965 /* some of the internal memories
4966 * are not directly readable from the driver
4967 * to test them we send debug packets
4969 static int bnx2x_int_mem_test(struct bnx2x *bp)
4975 if (CHIP_REV_IS_FPGA(bp))
4977 else if (CHIP_REV_IS_EMUL(bp))
4982 DP(NETIF_MSG_HW, "start part1\n");
4984 /* Disable inputs of parser neighbor blocks */
4985 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4986 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4987 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4988 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4990 /* Write 0 to parser credits for CFC search request */
4991 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4993 /* send Ethernet packet */
4996 /* TODO do i reset NIG statistic? */
4997 /* Wait until NIG register shows 1 packet of size 0x10 */
4998 count = 1000 * factor;
5001 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5002 val = *bnx2x_sp(bp, wb_data[0]);
5010 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5014 /* Wait until PRS register shows 1 packet */
5015 count = 1000 * factor;
5017 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5025 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5029 /* Reset and init BRB, PRS */
5030 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5032 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5034 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5035 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5037 DP(NETIF_MSG_HW, "part2\n");
5039 /* Disable inputs of parser neighbor blocks */
5040 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5041 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5042 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5043 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5045 /* Write 0 to parser credits for CFC search request */
5046 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5048 /* send 10 Ethernet packets */
5049 for (i = 0; i < 10; i++)
5052 /* Wait until NIG register shows 10 + 1
5053 packets of size 11*0x10 = 0xb0 */
5054 count = 1000 * factor;
5057 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5058 val = *bnx2x_sp(bp, wb_data[0]);
5066 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5070 /* Wait until PRS register shows 2 packets */
5071 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5073 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5075 /* Write 1 to parser credits for CFC search request */
5076 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5078 /* Wait until PRS register shows 3 packets */
5079 msleep(10 * factor);
5080 /* Wait until NIG register shows 1 packet of size 0x10 */
5081 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5083 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5085 /* clear NIG EOP FIFO */
5086 for (i = 0; i < 11; i++)
5087 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5088 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5090 BNX2X_ERR("clear of NIG failed\n");
5094 /* Reset and init BRB, PRS, NIG */
5095 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5097 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5099 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5100 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5103 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5106 /* Enable inputs of parser neighbor blocks */
5107 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5108 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5109 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5110 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5112 DP(NETIF_MSG_HW, "done\n");
5117 static void enable_blocks_attention(struct bnx2x *bp)
5119 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5120 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5121 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5122 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5123 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5124 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5125 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5126 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5127 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5128 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5129 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5130 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5131 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5132 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5133 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5134 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5135 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5136 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5137 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5138 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5139 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5140 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5141 if (CHIP_REV_IS_FPGA(bp))
5142 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5144 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5145 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5146 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5147 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5148 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5149 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5150 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5151 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5152 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5153 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5156 static const struct {
5159 } bnx2x_parity_mask[] = {
5160 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
5161 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
5162 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
5163 {HC_REG_HC_PRTY_MASK, 0xffffffff},
5164 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
5165 {QM_REG_QM_PRTY_MASK, 0x0},
5166 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
5167 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
5168 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
5169 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
5170 {CDU_REG_CDU_PRTY_MASK, 0x0},
5171 {CFC_REG_CFC_PRTY_MASK, 0x0},
5172 {DBG_REG_DBG_PRTY_MASK, 0x0},
5173 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
5174 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
5175 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
5176 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
5177 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
5178 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
5179 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
5180 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
5181 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
5182 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
5183 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
5184 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
5185 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
5186 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
5187 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
5190 static void enable_blocks_parity(struct bnx2x *bp)
5192 int i, mask_arr_len =
5193 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
5195 for (i = 0; i < mask_arr_len; i++)
5196 REG_WR(bp, bnx2x_parity_mask[i].addr,
5197 bnx2x_parity_mask[i].mask);
5201 static void bnx2x_reset_common(struct bnx2x *bp)
5204 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5206 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5209 static void bnx2x_init_pxp(struct bnx2x *bp)
5212 int r_order, w_order;
5214 pci_read_config_word(bp->pdev,
5215 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5216 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5217 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5219 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5221 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5225 bnx2x_init_pxp_arb(bp, r_order, w_order);
5228 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5238 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5239 SHARED_HW_CFG_FAN_FAILURE_MASK;
5241 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5245 * The fan failure mechanism is usually related to the PHY type since
5246 * the power consumption of the board is affected by the PHY. Currently,
5247 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5249 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5250 for (port = PORT_0; port < PORT_MAX; port++) {
5252 SHMEM_RD(bp, dev_info.port_hw_config[port].
5253 external_phy_config) &
5254 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5257 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5259 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5261 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5264 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5266 if (is_required == 0)
5269 /* Fan failure is indicated by SPIO 5 */
5270 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5271 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5273 /* set to active low mode */
5274 val = REG_RD(bp, MISC_REG_SPIO_INT);
5275 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5276 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5277 REG_WR(bp, MISC_REG_SPIO_INT, val);
5279 /* enable interrupt to signal the IGU */
5280 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5281 val |= (1 << MISC_REGISTERS_SPIO_5);
5282 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5285 static int bnx2x_init_common(struct bnx2x *bp)
5292 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5294 bnx2x_reset_common(bp);
5295 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5296 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5298 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5299 if (CHIP_IS_E1H(bp))
5300 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5302 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5304 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5306 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5307 if (CHIP_IS_E1(bp)) {
5308 /* enable HW interrupt from PXP on USDM overflow
5309 bit 16 on INT_MASK_0 */
5310 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5313 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5317 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5318 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5319 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5320 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5321 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5322 /* make sure this value is 0 */
5323 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5325 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5326 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5327 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5328 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5329 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5332 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5334 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5335 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5336 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5339 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5340 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5342 /* let the HW do it's magic ... */
5344 /* finish PXP init */
5345 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5347 BNX2X_ERR("PXP2 CFG failed\n");
5350 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5352 BNX2X_ERR("PXP2 RD_INIT failed\n");
5356 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5357 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5359 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5361 /* clean the DMAE memory */
5363 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5365 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5366 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5367 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5368 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5370 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5371 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5372 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5373 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5375 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5380 for (i = 0; i < 64; i++) {
5381 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
5382 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
5384 if (CHIP_IS_E1H(bp)) {
5385 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
5386 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
5391 /* soft reset pulse */
5392 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5393 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5396 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5399 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5400 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5401 if (!CHIP_REV_IS_SLOW(bp)) {
5402 /* enable hw interrupt from doorbell Q */
5403 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5406 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5407 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5408 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5411 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5413 if (CHIP_IS_E1H(bp))
5414 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5416 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5417 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5418 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5419 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5421 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5422 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5423 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5424 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5426 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5427 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5428 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5429 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5432 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5434 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5437 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5438 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5439 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5441 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5442 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5443 REG_WR(bp, i, random32());
5444 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5446 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5447 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5448 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5449 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5450 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5451 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5452 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5453 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5454 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5455 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5457 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5459 if (sizeof(union cdu_context) != 1024)
5460 /* we currently assume that a context is 1024 bytes */
5461 dev_alert(&bp->pdev->dev, "please adjust the size "
5462 "of cdu_context(%ld)\n",
5463 (long)sizeof(union cdu_context));
5465 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5466 val = (4 << 24) + (0 << 12) + 1024;
5467 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5469 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5470 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5471 /* enable context validation interrupt from CFC */
5472 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5474 /* set the thresholds to prevent CFC/CDU race */
5475 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5477 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5478 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5480 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5481 /* Reset PCIE errors for debug */
5482 REG_WR(bp, 0x2814, 0xffffffff);
5483 REG_WR(bp, 0x3820, 0xffffffff);
5485 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5486 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5487 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5488 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5490 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5491 if (CHIP_IS_E1H(bp)) {
5492 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5493 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5496 if (CHIP_REV_IS_SLOW(bp))
5499 /* finish CFC init */
5500 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5502 BNX2X_ERR("CFC LL_INIT failed\n");
5505 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5507 BNX2X_ERR("CFC AC_INIT failed\n");
5510 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5512 BNX2X_ERR("CFC CAM_INIT failed\n");
5515 REG_WR(bp, CFC_REG_DEBUG0, 0);
5517 /* read NIG statistic
5518 to see if this is our first up since powerup */
5519 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5520 val = *bnx2x_sp(bp, wb_data[0]);
5522 /* do internal memory self test */
5523 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5524 BNX2X_ERR("internal mem self test failed\n");
5528 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5529 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5530 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5531 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5532 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5533 bp->port.need_hw_lock = 1;
5540 bnx2x_setup_fan_failure_detection(bp);
5542 /* clear PXP2 attentions */
5543 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5545 enable_blocks_attention(bp);
5546 if (CHIP_PARITY_SUPPORTED(bp))
5547 enable_blocks_parity(bp);
5549 if (!BP_NOMCP(bp)) {
5550 bnx2x_acquire_phy_lock(bp);
5551 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5552 bnx2x_release_phy_lock(bp);
5554 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5559 static int bnx2x_init_port(struct bnx2x *bp)
5561 int port = BP_PORT(bp);
5562 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5566 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
5568 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5570 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5571 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5573 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5574 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5575 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5576 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5579 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
5581 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5582 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5583 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5586 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5588 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5589 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5590 /* no pause for emulation and FPGA */
5595 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5596 else if (bp->dev->mtu > 4096) {
5597 if (bp->flags & ONE_PORT_FLAG)
5601 /* (24*1024 + val*4)/256 */
5602 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5605 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5606 high = low + 56; /* 14*1024/256 */
5608 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5609 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5612 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5614 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5615 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5616 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5617 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5619 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5620 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5621 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5622 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5624 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5625 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5627 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5629 /* configure PBF to work without PAUSE mtu 9000 */
5630 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5632 /* update threshold */
5633 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5634 /* update init credit */
5635 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5638 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5640 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5643 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5645 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5646 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5648 if (CHIP_IS_E1(bp)) {
5649 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5650 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5652 bnx2x_init_block(bp, HC_BLOCK, init_stage);
5654 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5655 /* init aeu_mask_attn_func_0/1:
5656 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5657 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5658 * bits 4-7 are used for "per vn group attention" */
5659 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5660 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5662 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5663 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5664 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5665 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5666 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5668 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5670 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5672 if (CHIP_IS_E1H(bp)) {
5673 /* 0x2 disable e1hov, 0x1 enable */
5674 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5675 (IS_E1HMF(bp) ? 0x1 : 0x2));
5678 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5679 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5680 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5684 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5685 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5687 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5688 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5690 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5692 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5693 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5695 /* The GPIO should be swapped if the swap register is
5697 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5698 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5700 /* Select function upon port-swap configuration */
5702 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5703 aeu_gpio_mask = (swap_val && swap_override) ?
5704 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5705 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5707 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5708 aeu_gpio_mask = (swap_val && swap_override) ?
5709 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5710 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5712 val = REG_RD(bp, offset);
5713 /* add GPIO3 to group */
5714 val |= aeu_gpio_mask;
5715 REG_WR(bp, offset, val);
5719 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5721 /* add SPIO 5 to group 0 */
5723 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5724 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5725 val = REG_RD(bp, reg_addr);
5726 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5727 REG_WR(bp, reg_addr, val);
5735 bnx2x__link_reset(bp);
5740 #define ILT_PER_FUNC (768/2)
5741 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5742 /* the phys address is shifted right 12 bits and has an added
5743 1=valid bit added to the 53rd bit
5744 then since this is a wide register(TM)
5745 we split it into two 32 bit writes
5747 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5748 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5749 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5750 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5753 #define CNIC_ILT_LINES 127
5754 #define CNIC_CTX_PER_ILT 16
5756 #define CNIC_ILT_LINES 0
5759 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5763 if (CHIP_IS_E1H(bp))
5764 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5766 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5768 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5771 static int bnx2x_init_func(struct bnx2x *bp)
5773 int port = BP_PORT(bp);
5774 int func = BP_FUNC(bp);
5778 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
5780 /* set MSI reconfigure capability */
5781 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5782 val = REG_RD(bp, addr);
5783 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5784 REG_WR(bp, addr, val);
5786 i = FUNC_ILT_BASE(func);
5788 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5789 if (CHIP_IS_E1H(bp)) {
5790 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5791 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5793 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5794 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5797 i += 1 + CNIC_ILT_LINES;
5798 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
5800 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5802 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
5803 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
5807 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
5809 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5811 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
5812 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
5816 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
5818 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5820 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
5821 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
5824 /* tell the searcher where the T2 table is */
5825 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
5827 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
5828 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
5830 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
5831 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
5832 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
5834 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
5837 if (CHIP_IS_E1H(bp)) {
5838 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5839 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5840 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5841 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5842 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5843 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5844 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5845 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5846 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5848 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5849 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5852 /* HC init per function */
5853 if (CHIP_IS_E1H(bp)) {
5854 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5856 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5857 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5859 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5861 /* Reset PCIE errors for debug */
5862 REG_WR(bp, 0x2114, 0xffffffff);
5863 REG_WR(bp, 0x2120, 0xffffffff);
5868 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5872 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5873 BP_FUNC(bp), load_code);
5876 mutex_init(&bp->dmae_mutex);
5877 rc = bnx2x_gunzip_init(bp);
5881 switch (load_code) {
5882 case FW_MSG_CODE_DRV_LOAD_COMMON:
5883 rc = bnx2x_init_common(bp);
5888 case FW_MSG_CODE_DRV_LOAD_PORT:
5890 rc = bnx2x_init_port(bp);
5895 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5897 rc = bnx2x_init_func(bp);
5903 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5907 if (!BP_NOMCP(bp)) {
5908 int func = BP_FUNC(bp);
5910 bp->fw_drv_pulse_wr_seq =
5911 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5912 DRV_PULSE_SEQ_MASK);
5913 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5916 /* this needs to be done before gunzip end */
5917 bnx2x_zero_def_sb(bp);
5918 for_each_queue(bp, i)
5919 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5921 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5925 bnx2x_gunzip_end(bp);
5930 void bnx2x_free_mem(struct bnx2x *bp)
5933 #define BNX2X_PCI_FREE(x, y, size) \
5936 dma_free_coherent(&bp->pdev->dev, size, x, y); \
5942 #define BNX2X_FREE(x) \
5954 for_each_queue(bp, i) {
5957 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5958 bnx2x_fp(bp, i, status_blk_mapping),
5959 sizeof(struct host_status_block));
5962 for_each_queue(bp, i) {
5964 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5965 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5966 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5967 bnx2x_fp(bp, i, rx_desc_mapping),
5968 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5970 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5971 bnx2x_fp(bp, i, rx_comp_mapping),
5972 sizeof(struct eth_fast_path_rx_cqe) *
5976 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5977 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5978 bnx2x_fp(bp, i, rx_sge_mapping),
5979 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5982 for_each_queue(bp, i) {
5984 /* fastpath tx rings: tx_buf tx_desc */
5985 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5986 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5987 bnx2x_fp(bp, i, tx_desc_mapping),
5988 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5990 /* end of fastpath */
5992 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5993 sizeof(struct host_def_status_block));
5995 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5996 sizeof(struct bnx2x_slowpath));
5999 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6000 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6001 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6002 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6003 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6004 sizeof(struct host_status_block));
6006 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6008 #undef BNX2X_PCI_FREE
6012 int bnx2x_alloc_mem(struct bnx2x *bp)
6015 #define BNX2X_PCI_ALLOC(x, y, size) \
6017 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6019 goto alloc_mem_err; \
6020 memset(x, 0, size); \
6023 #define BNX2X_ALLOC(x, size) \
6025 x = vmalloc(size); \
6027 goto alloc_mem_err; \
6028 memset(x, 0, size); \
6035 for_each_queue(bp, i) {
6036 bnx2x_fp(bp, i, bp) = bp;
6039 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6040 &bnx2x_fp(bp, i, status_blk_mapping),
6041 sizeof(struct host_status_block));
6044 for_each_queue(bp, i) {
6046 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6047 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6048 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6049 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6050 &bnx2x_fp(bp, i, rx_desc_mapping),
6051 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6053 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6054 &bnx2x_fp(bp, i, rx_comp_mapping),
6055 sizeof(struct eth_fast_path_rx_cqe) *
6059 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6060 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6061 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6062 &bnx2x_fp(bp, i, rx_sge_mapping),
6063 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6066 for_each_queue(bp, i) {
6068 /* fastpath tx rings: tx_buf tx_desc */
6069 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6070 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6071 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6072 &bnx2x_fp(bp, i, tx_desc_mapping),
6073 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6075 /* end of fastpath */
6077 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6078 sizeof(struct host_def_status_block));
6080 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6081 sizeof(struct bnx2x_slowpath));
6084 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6086 /* allocate searcher T2 table
6087 we allocate 1/4 of alloc num for T2
6088 (which is not entered into the ILT) */
6089 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6091 /* Initialize T2 (for 1024 connections) */
6092 for (i = 0; i < 16*1024; i += 64)
6093 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6095 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6096 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6098 /* QM queues (128*MAX_CONN) */
6099 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6101 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6102 sizeof(struct host_status_block));
6105 /* Slow path ring */
6106 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6114 #undef BNX2X_PCI_ALLOC
6120 * Init service functions
6124 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
6126 * @param bp driver descriptor
6127 * @param set set or clear an entry (1 or 0)
6128 * @param mac pointer to a buffer containing a MAC
6129 * @param cl_bit_vec bit vector of clients to register a MAC for
6130 * @param cam_offset offset in a CAM to use
6131 * @param with_bcast set broadcast MAC as well
6133 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
6134 u32 cl_bit_vec, u8 cam_offset,
6137 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6138 int port = BP_PORT(bp);
6141 * unicasts 0-31:port0 32-63:port1
6142 * multicast 64-127:port0 128-191:port1
6144 config->hdr.length = 1 + (with_bcast ? 1 : 0);
6145 config->hdr.offset = cam_offset;
6146 config->hdr.client_id = 0xff;
6147 config->hdr.reserved1 = 0;
6150 config->config_table[0].cam_entry.msb_mac_addr =
6151 swab16(*(u16 *)&mac[0]);
6152 config->config_table[0].cam_entry.middle_mac_addr =
6153 swab16(*(u16 *)&mac[2]);
6154 config->config_table[0].cam_entry.lsb_mac_addr =
6155 swab16(*(u16 *)&mac[4]);
6156 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6158 config->config_table[0].target_table_entry.flags = 0;
6160 CAM_INVALIDATE(config->config_table[0]);
6161 config->config_table[0].target_table_entry.clients_bit_vector =
6162 cpu_to_le32(cl_bit_vec);
6163 config->config_table[0].target_table_entry.vlan_id = 0;
6165 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6166 (set ? "setting" : "clearing"),
6167 config->config_table[0].cam_entry.msb_mac_addr,
6168 config->config_table[0].cam_entry.middle_mac_addr,
6169 config->config_table[0].cam_entry.lsb_mac_addr);
6173 config->config_table[1].cam_entry.msb_mac_addr =
6174 cpu_to_le16(0xffff);
6175 config->config_table[1].cam_entry.middle_mac_addr =
6176 cpu_to_le16(0xffff);
6177 config->config_table[1].cam_entry.lsb_mac_addr =
6178 cpu_to_le16(0xffff);
6179 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6181 config->config_table[1].target_table_entry.flags =
6182 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6184 CAM_INVALIDATE(config->config_table[1]);
6185 config->config_table[1].target_table_entry.clients_bit_vector =
6186 cpu_to_le32(cl_bit_vec);
6187 config->config_table[1].target_table_entry.vlan_id = 0;
6190 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6191 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6192 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6196 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
6198 * @param bp driver descriptor
6199 * @param set set or clear an entry (1 or 0)
6200 * @param mac pointer to a buffer containing a MAC
6201 * @param cl_bit_vec bit vector of clients to register a MAC for
6202 * @param cam_offset offset in a CAM to use
6204 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
6205 u32 cl_bit_vec, u8 cam_offset)
6207 struct mac_configuration_cmd_e1h *config =
6208 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6210 config->hdr.length = 1;
6211 config->hdr.offset = cam_offset;
6212 config->hdr.client_id = 0xff;
6213 config->hdr.reserved1 = 0;
6216 config->config_table[0].msb_mac_addr =
6217 swab16(*(u16 *)&mac[0]);
6218 config->config_table[0].middle_mac_addr =
6219 swab16(*(u16 *)&mac[2]);
6220 config->config_table[0].lsb_mac_addr =
6221 swab16(*(u16 *)&mac[4]);
6222 config->config_table[0].clients_bit_vector =
6223 cpu_to_le32(cl_bit_vec);
6224 config->config_table[0].vlan_id = 0;
6225 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6227 config->config_table[0].flags = BP_PORT(bp);
6229 config->config_table[0].flags =
6230 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6232 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
6233 (set ? "setting" : "clearing"),
6234 config->config_table[0].msb_mac_addr,
6235 config->config_table[0].middle_mac_addr,
6236 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
6238 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6239 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6240 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6243 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6244 int *state_p, int poll)
6246 /* can take a while if any port is running */
6249 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6250 poll ? "polling" : "waiting", state, idx);
6255 bnx2x_rx_int(bp->fp, 10);
6256 /* if index is different from 0
6257 * the reply for some commands will
6258 * be on the non default queue
6261 bnx2x_rx_int(&bp->fp[idx], 10);
6264 mb(); /* state is changed by bnx2x_sp_event() */
6265 if (*state_p == state) {
6266 #ifdef BNX2X_STOP_ON_ERROR
6267 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6279 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6280 poll ? "polling" : "waiting", state, idx);
6281 #ifdef BNX2X_STOP_ON_ERROR
6288 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
6290 bp->set_mac_pending++;
6293 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
6294 (1 << bp->fp->cl_id), BP_FUNC(bp));
6296 /* Wait for a completion */
6297 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
6300 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
6302 bp->set_mac_pending++;
6305 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
6306 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
6309 /* Wait for a completion */
6310 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
6315 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6316 * MAC(s). This function will wait until the ramdord completion
6319 * @param bp driver handle
6320 * @param set set or clear the CAM entry
6322 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6324 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6326 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
6328 bp->set_mac_pending++;
6331 /* Send a SET_MAC ramrod */
6333 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
6334 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
6337 /* CAM allocation for E1H
6338 * unicasts: by func number
6339 * multicast: 20+FUNC*20, 20 each
6341 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
6342 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
6344 /* Wait for a completion when setting */
6345 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
6351 int bnx2x_setup_leading(struct bnx2x *bp)
6355 /* reset IGU state */
6356 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6359 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6361 /* Wait for completion */
6362 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6367 int bnx2x_setup_multi(struct bnx2x *bp, int index)
6369 struct bnx2x_fastpath *fp = &bp->fp[index];
6371 /* reset IGU state */
6372 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6375 fp->state = BNX2X_FP_STATE_OPENING;
6376 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6379 /* Wait for completion */
6380 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6385 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
6388 switch (bp->multi_mode) {
6389 case ETH_RSS_MODE_DISABLED:
6393 case ETH_RSS_MODE_REGULAR:
6395 bp->num_queues = min_t(u32, num_queues,
6396 BNX2X_MAX_QUEUES(bp));
6398 bp->num_queues = min_t(u32, num_online_cpus(),
6399 BNX2X_MAX_QUEUES(bp));
6411 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6413 struct bnx2x_fastpath *fp = &bp->fp[index];
6416 /* halt the connection */
6417 fp->state = BNX2X_FP_STATE_HALTING;
6418 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
6420 /* Wait for completion */
6421 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6423 if (rc) /* timeout */
6426 /* delete cfc entry */
6427 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6429 /* Wait for completion */
6430 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6435 static int bnx2x_stop_leading(struct bnx2x *bp)
6437 __le16 dsb_sp_prod_idx;
6438 /* if the other port is handling traffic,
6439 this can take a lot of time */
6445 /* Send HALT ramrod */
6446 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6447 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
6449 /* Wait for completion */
6450 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6451 &(bp->fp[0].state), 1);
6452 if (rc) /* timeout */
6455 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6457 /* Send PORT_DELETE ramrod */
6458 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6460 /* Wait for completion to arrive on default status block
6461 we are going to reset the chip anyway
6462 so there is not much to do if this times out
6464 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6466 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6467 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6468 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6469 #ifdef BNX2X_STOP_ON_ERROR
6477 rmb(); /* Refresh the dsb_sp_prod */
6479 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6480 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6485 static void bnx2x_reset_func(struct bnx2x *bp)
6487 int port = BP_PORT(bp);
6488 int func = BP_FUNC(bp);
6492 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6493 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6496 /* Disable Timer scan */
6497 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6499 * Wait for at least 10ms and up to 2 second for the timers scan to
6502 for (i = 0; i < 200; i++) {
6504 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6509 base = FUNC_ILT_BASE(func);
6510 for (i = base; i < base + ILT_PER_FUNC; i++)
6511 bnx2x_ilt_wr(bp, i, 0);
6514 static void bnx2x_reset_port(struct bnx2x *bp)
6516 int port = BP_PORT(bp);
6519 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6521 /* Do not rcv packets to BRB */
6522 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6523 /* Do not direct rcv packets that are not for MCP to the BRB */
6524 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6525 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6528 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6531 /* Check for BRB port occupancy */
6532 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6534 DP(NETIF_MSG_IFDOWN,
6535 "BRB1 is not empty %d blocks are occupied\n", val);
6537 /* TODO: Close Doorbell port? */
6540 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6542 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6543 BP_FUNC(bp), reset_code);
6545 switch (reset_code) {
6546 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6547 bnx2x_reset_port(bp);
6548 bnx2x_reset_func(bp);
6549 bnx2x_reset_common(bp);
6552 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6553 bnx2x_reset_port(bp);
6554 bnx2x_reset_func(bp);
6557 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6558 bnx2x_reset_func(bp);
6562 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6567 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
6569 int port = BP_PORT(bp);
6573 /* Wait until tx fastpath tasks complete */
6574 for_each_queue(bp, i) {
6575 struct bnx2x_fastpath *fp = &bp->fp[i];
6578 while (bnx2x_has_tx_work_unload(fp)) {
6582 BNX2X_ERR("timeout waiting for queue[%d]\n",
6584 #ifdef BNX2X_STOP_ON_ERROR
6595 /* Give HW time to discard old tx messages */
6598 if (CHIP_IS_E1(bp)) {
6599 struct mac_configuration_cmd *config =
6600 bnx2x_sp(bp, mcast_config);
6602 bnx2x_set_eth_mac_addr_e1(bp, 0);
6604 for (i = 0; i < config->hdr.length; i++)
6605 CAM_INVALIDATE(config->config_table[i]);
6607 config->hdr.length = i;
6608 if (CHIP_REV_IS_SLOW(bp))
6609 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6611 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6612 config->hdr.client_id = bp->fp->cl_id;
6613 config->hdr.reserved1 = 0;
6615 bp->set_mac_pending++;
6618 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6619 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6620 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6623 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6625 bnx2x_set_eth_mac_addr_e1h(bp, 0);
6627 for (i = 0; i < MC_HASH_SIZE; i++)
6628 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6630 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6633 /* Clear iSCSI L2 MAC */
6634 mutex_lock(&bp->cnic_mutex);
6635 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6636 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6637 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6639 mutex_unlock(&bp->cnic_mutex);
6642 if (unload_mode == UNLOAD_NORMAL)
6643 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6645 else if (bp->flags & NO_WOL_FLAG)
6646 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6649 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6650 u8 *mac_addr = bp->dev->dev_addr;
6652 /* The mac address is written to entries 1-4 to
6653 preserve entry 0 which is used by the PMF */
6654 u8 entry = (BP_E1HVN(bp) + 1)*8;
6656 val = (mac_addr[0] << 8) | mac_addr[1];
6657 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6659 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6660 (mac_addr[4] << 8) | mac_addr[5];
6661 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6663 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6666 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6668 /* Close multi and leading connections
6669 Completions for ramrods are collected in a synchronous way */
6670 for_each_nondefault_queue(bp, i)
6671 if (bnx2x_stop_multi(bp, i))
6674 rc = bnx2x_stop_leading(bp);
6676 BNX2X_ERR("Stop leading failed!\n");
6677 #ifdef BNX2X_STOP_ON_ERROR
6686 reset_code = bnx2x_fw_command(bp, reset_code);
6688 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
6689 load_count[0], load_count[1], load_count[2]);
6691 load_count[1 + port]--;
6692 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
6693 load_count[0], load_count[1], load_count[2]);
6694 if (load_count[0] == 0)
6695 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6696 else if (load_count[1 + port] == 0)
6697 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6699 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6702 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6703 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6704 bnx2x__link_reset(bp);
6706 /* Reset the chip */
6707 bnx2x_reset_chip(bp, reset_code);
6709 /* Report UNLOAD_DONE to MCP */
6711 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6715 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
6719 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6721 if (CHIP_IS_E1(bp)) {
6722 int port = BP_PORT(bp);
6723 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6724 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6726 val = REG_RD(bp, addr);
6728 REG_WR(bp, addr, val);
6729 } else if (CHIP_IS_E1H(bp)) {
6730 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6731 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6732 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6733 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6738 /* Close gates #2, #3 and #4: */
6739 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6743 /* Gates #2 and #4a are closed/opened for "not E1" only */
6744 if (!CHIP_IS_E1(bp)) {
6746 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6747 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6748 close ? (val | 0x1) : (val & (~(u32)1)));
6750 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6751 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6752 close ? (val | 0x1) : (val & (~(u32)1)));
6756 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6757 val = REG_RD(bp, addr);
6758 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6760 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6761 close ? "closing" : "opening");
6765 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6767 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6769 /* Do some magic... */
6770 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6771 *magic_val = val & SHARED_MF_CLP_MAGIC;
6772 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6775 /* Restore the value of the `magic' bit.
6777 * @param pdev Device handle.
6778 * @param magic_val Old value of the `magic' bit.
6780 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6782 /* Restore the `magic' bit value... */
6783 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
6784 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
6785 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
6786 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6787 MF_CFG_WR(bp, shared_mf_config.clp_mb,
6788 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
6791 /* Prepares for MCP reset: takes care of CLP configurations.
6794 * @param magic_val Old value of 'magic' bit.
6796 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
6799 u32 validity_offset;
6801 DP(NETIF_MSG_HW, "Starting\n");
6803 /* Set `magic' bit in order to save MF config */
6804 if (!CHIP_IS_E1(bp))
6805 bnx2x_clp_reset_prep(bp, magic_val);
6807 /* Get shmem offset */
6808 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6809 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6811 /* Clear validity map flags */
6813 REG_WR(bp, shmem + validity_offset, 0);
6816 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
6817 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
6819 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
6820 * depending on the HW type.
6824 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
6826 /* special handling for emulation and FPGA,
6827 wait 10 times longer */
6828 if (CHIP_REV_IS_SLOW(bp))
6829 msleep(MCP_ONE_TIMEOUT*10);
6831 msleep(MCP_ONE_TIMEOUT);
6834 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
6836 u32 shmem, cnt, validity_offset, val;
6841 /* Get shmem offset */
6842 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6844 BNX2X_ERR("Shmem 0 return failure\n");
6849 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6851 /* Wait for MCP to come up */
6852 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
6853 /* TBD: its best to check validity map of last port.
6854 * currently checks on port 0.
6856 val = REG_RD(bp, shmem + validity_offset);
6857 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
6858 shmem + validity_offset, val);
6860 /* check that shared memory is valid. */
6861 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6862 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6865 bnx2x_mcp_wait_one(bp);
6868 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
6870 /* Check that shared memory is valid. This indicates that MCP is up. */
6871 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
6872 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
6873 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
6879 /* Restore the `magic' bit value */
6880 if (!CHIP_IS_E1(bp))
6881 bnx2x_clp_reset_done(bp, magic_val);
6886 static void bnx2x_pxp_prep(struct bnx2x *bp)
6888 if (!CHIP_IS_E1(bp)) {
6889 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
6890 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
6891 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
6897 * Reset the whole chip except for:
6899 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
6902 * - MISC (including AEU)
6906 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
6908 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
6911 MISC_REGISTERS_RESET_REG_1_RST_HC |
6912 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
6913 MISC_REGISTERS_RESET_REG_1_RST_PXP;
6916 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
6917 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
6918 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
6919 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
6920 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
6921 MISC_REGISTERS_RESET_REG_2_RST_GRC |
6922 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
6923 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
6925 reset_mask1 = 0xffffffff;
6928 reset_mask2 = 0xffff;
6930 reset_mask2 = 0x1ffff;
6932 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6933 reset_mask1 & (~not_reset_mask1));
6934 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6935 reset_mask2 & (~not_reset_mask2));
6940 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
6941 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
6945 static int bnx2x_process_kill(struct bnx2x *bp)
6949 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
6952 /* Empty the Tetris buffer, wait for 1s */
6954 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
6955 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
6956 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
6957 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
6958 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
6959 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
6960 ((port_is_idle_0 & 0x1) == 0x1) &&
6961 ((port_is_idle_1 & 0x1) == 0x1) &&
6962 (pgl_exp_rom2 == 0xffffffff))
6965 } while (cnt-- > 0);
6968 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
6970 " outstanding read requests after 1s!\n");
6971 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
6972 " port_is_idle_0=0x%08x,"
6973 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
6974 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
6981 /* Close gates #2, #3 and #4 */
6982 bnx2x_set_234_gates(bp, true);
6984 /* TBD: Indicate that "process kill" is in progress to MCP */
6986 /* Clear "unprepared" bit */
6987 REG_WR(bp, MISC_REG_UNPREPARED, 0);
6990 /* Make sure all is written to the chip before the reset */
6993 /* Wait for 1ms to empty GLUE and PCI-E core queues,
6994 * PSWHST, GRC and PSWRD Tetris buffer.
6998 /* Prepare to chip reset: */
7000 bnx2x_reset_mcp_prep(bp, &val);
7006 /* reset the chip */
7007 bnx2x_process_kill_chip_reset(bp);
7010 /* Recover after reset: */
7012 if (bnx2x_reset_mcp_comp(bp, val))
7018 /* Open the gates #2, #3 and #4 */
7019 bnx2x_set_234_gates(bp, false);
7021 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7022 * reset state, re-enable attentions. */
7027 static int bnx2x_leader_reset(struct bnx2x *bp)
7030 /* Try to recover after the failure */
7031 if (bnx2x_process_kill(bp)) {
7032 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7035 goto exit_leader_reset;
7038 /* Clear "reset is in progress" bit and update the driver state */
7039 bnx2x_set_reset_done(bp);
7040 bp->recovery_state = BNX2X_RECOVERY_DONE;
7044 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7049 /* Assumption: runs under rtnl lock. This together with the fact
7050 * that it's called only from bnx2x_reset_task() ensure that it
7051 * will never be called when netif_running(bp->dev) is false.
7053 static void bnx2x_parity_recover(struct bnx2x *bp)
7055 DP(NETIF_MSG_HW, "Handling parity\n");
7057 switch (bp->recovery_state) {
7058 case BNX2X_RECOVERY_INIT:
7059 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7060 /* Try to get a LEADER_LOCK HW lock */
7061 if (bnx2x_trylock_hw_lock(bp,
7062 HW_LOCK_RESOURCE_RESERVED_08))
7065 /* Stop the driver */
7066 /* If interface has been removed - break */
7067 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7070 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7071 /* Ensure "is_leader" and "recovery_state"
7072 * update values are seen on other CPUs
7077 case BNX2X_RECOVERY_WAIT:
7078 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7079 if (bp->is_leader) {
7080 u32 load_counter = bnx2x_get_load_cnt(bp);
7082 /* Wait until all other functions get
7085 schedule_delayed_work(&bp->reset_task,
7089 /* If all other functions got down -
7090 * try to bring the chip back to
7091 * normal. In any case it's an exit
7092 * point for a leader.
7094 if (bnx2x_leader_reset(bp) ||
7095 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7096 printk(KERN_ERR"%s: Recovery "
7097 "has failed. Power cycle is "
7098 "needed.\n", bp->dev->name);
7099 /* Disconnect this device */
7100 netif_device_detach(bp->dev);
7101 /* Block ifup for all function
7102 * of this ASIC until
7103 * "process kill" or power
7106 bnx2x_set_reset_in_progress(bp);
7107 /* Shut down the power */
7108 bnx2x_set_power_state(bp,
7115 } else { /* non-leader */
7116 if (!bnx2x_reset_is_done(bp)) {
7117 /* Try to get a LEADER_LOCK HW lock as
7118 * long as a former leader may have
7119 * been unloaded by the user or
7120 * released a leadership by another
7123 if (bnx2x_trylock_hw_lock(bp,
7124 HW_LOCK_RESOURCE_RESERVED_08)) {
7125 /* I'm a leader now! Restart a
7132 schedule_delayed_work(&bp->reset_task,
7136 } else { /* A leader has completed
7137 * the "process kill". It's an exit
7138 * point for a non-leader.
7140 bnx2x_nic_load(bp, LOAD_NORMAL);
7141 bp->recovery_state =
7142 BNX2X_RECOVERY_DONE;
7153 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7154 * scheduled on a general queue in order to prevent a dead lock.
7156 static void bnx2x_reset_task(struct work_struct *work)
7158 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7160 #ifdef BNX2X_STOP_ON_ERROR
7161 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7162 " so reset not done to allow debug dump,\n"
7163 KERN_ERR " you will need to reboot when done\n");
7169 if (!netif_running(bp->dev))
7170 goto reset_task_exit;
7172 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7173 bnx2x_parity_recover(bp);
7175 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7176 bnx2x_nic_load(bp, LOAD_NORMAL);
7183 /* end of nic load/unload */
7188 * Init service functions
7191 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7194 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7195 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7196 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7197 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7198 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7199 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7200 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7201 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7203 BNX2X_ERR("Unsupported function index: %d\n", func);
7208 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7210 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7212 /* Flush all outstanding writes */
7215 /* Pretend to be function 0 */
7217 /* Flush the GRC transaction (in the chip) */
7218 new_val = REG_RD(bp, reg);
7220 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7225 /* From now we are in the "like-E1" mode */
7226 bnx2x_int_disable(bp);
7228 /* Flush all outstanding writes */
7231 /* Restore the original funtion settings */
7232 REG_WR(bp, reg, orig_func);
7233 new_val = REG_RD(bp, reg);
7234 if (new_val != orig_func) {
7235 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7236 orig_func, new_val);
7241 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7243 if (CHIP_IS_E1H(bp))
7244 bnx2x_undi_int_disable_e1h(bp, func);
7246 bnx2x_int_disable(bp);
7249 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7253 /* Check if there is any driver already loaded */
7254 val = REG_RD(bp, MISC_REG_UNPREPARED);
7256 /* Check if it is the UNDI driver
7257 * UNDI driver initializes CID offset for normal bell to 0x7
7259 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7260 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7262 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7264 int func = BP_FUNC(bp);
7268 /* clear the UNDI indication */
7269 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7271 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7273 /* try unload UNDI on port 0 */
7276 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7277 DRV_MSG_SEQ_NUMBER_MASK);
7278 reset_code = bnx2x_fw_command(bp, reset_code);
7280 /* if UNDI is loaded on the other port */
7281 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7283 /* send "DONE" for previous unload */
7284 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7286 /* unload UNDI on port 1 */
7289 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7290 DRV_MSG_SEQ_NUMBER_MASK);
7291 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7293 bnx2x_fw_command(bp, reset_code);
7296 /* now it's safe to release the lock */
7297 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7299 bnx2x_undi_int_disable(bp, func);
7301 /* close input traffic and wait for it */
7302 /* Do not rcv packets to BRB */
7304 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7305 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7306 /* Do not direct rcv packets that are not for MCP to
7309 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7310 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7313 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7314 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7317 /* save NIG port swap info */
7318 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7319 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7322 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7325 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7327 /* take the NIG out of reset and restore swap values */
7329 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7330 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7331 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7332 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7334 /* send unload done to the MCP */
7335 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7337 /* restore our func and fw_seq */
7340 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7341 DRV_MSG_SEQ_NUMBER_MASK);
7344 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7348 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7350 u32 val, val2, val3, val4, id;
7353 /* Get the chip revision id and number. */
7354 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7355 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7356 id = ((val & 0xffff) << 16);
7357 val = REG_RD(bp, MISC_REG_CHIP_REV);
7358 id |= ((val & 0xf) << 12);
7359 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7360 id |= ((val & 0xff) << 4);
7361 val = REG_RD(bp, MISC_REG_BOND_ID);
7363 bp->common.chip_id = id;
7364 bp->link_params.chip_id = bp->common.chip_id;
7365 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7367 val = (REG_RD(bp, 0x2874) & 0x55);
7368 if ((bp->common.chip_id & 0x1) ||
7369 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7370 bp->flags |= ONE_PORT_FLAG;
7371 BNX2X_DEV_INFO("single port device\n");
7374 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7375 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7376 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7377 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7378 bp->common.flash_size, bp->common.flash_size);
7380 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7381 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
7382 bp->link_params.shmem_base = bp->common.shmem_base;
7383 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7384 bp->common.shmem_base, bp->common.shmem2_base);
7386 if (!bp->common.shmem_base ||
7387 (bp->common.shmem_base < 0xA0000) ||
7388 (bp->common.shmem_base >= 0xC0000)) {
7389 BNX2X_DEV_INFO("MCP not active\n");
7390 bp->flags |= NO_MCP_FLAG;
7394 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7395 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7396 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7397 BNX2X_ERROR("BAD MCP validity signature\n");
7399 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7400 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7402 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7403 SHARED_HW_CFG_LED_MODE_MASK) >>
7404 SHARED_HW_CFG_LED_MODE_SHIFT);
7406 bp->link_params.feature_config_flags = 0;
7407 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7408 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7409 bp->link_params.feature_config_flags |=
7410 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7412 bp->link_params.feature_config_flags &=
7413 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7415 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7416 bp->common.bc_ver = val;
7417 BNX2X_DEV_INFO("bc_ver %X\n", val);
7418 if (val < BNX2X_BC_VER) {
7419 /* for now only warn
7420 * later we might need to enforce this */
7421 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
7422 "please upgrade BC\n", BNX2X_BC_VER, val);
7424 bp->link_params.feature_config_flags |=
7425 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7426 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7428 if (BP_E1HVN(bp) == 0) {
7429 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7430 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7432 /* no WOL capability for E1HVN != 0 */
7433 bp->flags |= NO_WOL_FLAG;
7435 BNX2X_DEV_INFO("%sWoL capable\n",
7436 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7438 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7439 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7440 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7441 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7443 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7444 val, val2, val3, val4);
7447 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7450 int port = BP_PORT(bp);
7453 switch (switch_cfg) {
7455 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7458 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7459 switch (ext_phy_type) {
7460 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7461 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7464 bp->port.supported |= (SUPPORTED_10baseT_Half |
7465 SUPPORTED_10baseT_Full |
7466 SUPPORTED_100baseT_Half |
7467 SUPPORTED_100baseT_Full |
7468 SUPPORTED_1000baseT_Full |
7469 SUPPORTED_2500baseX_Full |
7474 SUPPORTED_Asym_Pause);
7477 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7478 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7481 bp->port.supported |= (SUPPORTED_10baseT_Half |
7482 SUPPORTED_10baseT_Full |
7483 SUPPORTED_100baseT_Half |
7484 SUPPORTED_100baseT_Full |
7485 SUPPORTED_1000baseT_Full |
7490 SUPPORTED_Asym_Pause);
7494 BNX2X_ERR("NVRAM config error. "
7495 "BAD SerDes ext_phy_config 0x%x\n",
7496 bp->link_params.ext_phy_config);
7500 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7502 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7505 case SWITCH_CFG_10G:
7506 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7509 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7510 switch (ext_phy_type) {
7511 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7512 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7515 bp->port.supported |= (SUPPORTED_10baseT_Half |
7516 SUPPORTED_10baseT_Full |
7517 SUPPORTED_100baseT_Half |
7518 SUPPORTED_100baseT_Full |
7519 SUPPORTED_1000baseT_Full |
7520 SUPPORTED_2500baseX_Full |
7521 SUPPORTED_10000baseT_Full |
7526 SUPPORTED_Asym_Pause);
7529 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7530 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7533 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7534 SUPPORTED_1000baseT_Full |
7538 SUPPORTED_Asym_Pause);
7541 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7542 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7545 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7546 SUPPORTED_2500baseX_Full |
7547 SUPPORTED_1000baseT_Full |
7551 SUPPORTED_Asym_Pause);
7554 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7555 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7558 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7561 SUPPORTED_Asym_Pause);
7564 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7565 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7568 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7569 SUPPORTED_1000baseT_Full |
7572 SUPPORTED_Asym_Pause);
7575 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7576 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7579 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7580 SUPPORTED_1000baseT_Full |
7584 SUPPORTED_Asym_Pause);
7587 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7588 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7591 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7592 SUPPORTED_1000baseT_Full |
7596 SUPPORTED_Asym_Pause);
7599 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7600 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7603 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7607 SUPPORTED_Asym_Pause);
7610 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7611 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7614 bp->port.supported |= (SUPPORTED_10baseT_Half |
7615 SUPPORTED_10baseT_Full |
7616 SUPPORTED_100baseT_Half |
7617 SUPPORTED_100baseT_Full |
7618 SUPPORTED_1000baseT_Full |
7619 SUPPORTED_10000baseT_Full |
7623 SUPPORTED_Asym_Pause);
7626 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7627 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7628 bp->link_params.ext_phy_config);
7632 BNX2X_ERR("NVRAM config error. "
7633 "BAD XGXS ext_phy_config 0x%x\n",
7634 bp->link_params.ext_phy_config);
7638 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7640 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7645 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7646 bp->port.link_config);
7649 bp->link_params.phy_addr = bp->port.phy_addr;
7651 /* mask what we support according to speed_cap_mask */
7652 if (!(bp->link_params.speed_cap_mask &
7653 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7654 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7656 if (!(bp->link_params.speed_cap_mask &
7657 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7658 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7660 if (!(bp->link_params.speed_cap_mask &
7661 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7662 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7664 if (!(bp->link_params.speed_cap_mask &
7665 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7666 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7668 if (!(bp->link_params.speed_cap_mask &
7669 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7670 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7671 SUPPORTED_1000baseT_Full);
7673 if (!(bp->link_params.speed_cap_mask &
7674 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7675 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7677 if (!(bp->link_params.speed_cap_mask &
7678 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7679 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7681 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7684 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7686 bp->link_params.req_duplex = DUPLEX_FULL;
7688 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7689 case PORT_FEATURE_LINK_SPEED_AUTO:
7690 if (bp->port.supported & SUPPORTED_Autoneg) {
7691 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7692 bp->port.advertising = bp->port.supported;
7695 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7697 if ((ext_phy_type ==
7698 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7700 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7701 /* force 10G, no AN */
7702 bp->link_params.req_line_speed = SPEED_10000;
7703 bp->port.advertising =
7704 (ADVERTISED_10000baseT_Full |
7708 BNX2X_ERR("NVRAM config error. "
7709 "Invalid link_config 0x%x"
7710 " Autoneg not supported\n",
7711 bp->port.link_config);
7716 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7717 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7718 bp->link_params.req_line_speed = SPEED_10;
7719 bp->port.advertising = (ADVERTISED_10baseT_Full |
7722 BNX2X_ERROR("NVRAM config error. "
7723 "Invalid link_config 0x%x"
7724 " speed_cap_mask 0x%x\n",
7725 bp->port.link_config,
7726 bp->link_params.speed_cap_mask);
7731 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7732 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7733 bp->link_params.req_line_speed = SPEED_10;
7734 bp->link_params.req_duplex = DUPLEX_HALF;
7735 bp->port.advertising = (ADVERTISED_10baseT_Half |
7738 BNX2X_ERROR("NVRAM config error. "
7739 "Invalid link_config 0x%x"
7740 " speed_cap_mask 0x%x\n",
7741 bp->port.link_config,
7742 bp->link_params.speed_cap_mask);
7747 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7748 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7749 bp->link_params.req_line_speed = SPEED_100;
7750 bp->port.advertising = (ADVERTISED_100baseT_Full |
7753 BNX2X_ERROR("NVRAM config error. "
7754 "Invalid link_config 0x%x"
7755 " speed_cap_mask 0x%x\n",
7756 bp->port.link_config,
7757 bp->link_params.speed_cap_mask);
7762 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7763 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7764 bp->link_params.req_line_speed = SPEED_100;
7765 bp->link_params.req_duplex = DUPLEX_HALF;
7766 bp->port.advertising = (ADVERTISED_100baseT_Half |
7769 BNX2X_ERROR("NVRAM config error. "
7770 "Invalid link_config 0x%x"
7771 " speed_cap_mask 0x%x\n",
7772 bp->port.link_config,
7773 bp->link_params.speed_cap_mask);
7778 case PORT_FEATURE_LINK_SPEED_1G:
7779 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7780 bp->link_params.req_line_speed = SPEED_1000;
7781 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7784 BNX2X_ERROR("NVRAM config error. "
7785 "Invalid link_config 0x%x"
7786 " speed_cap_mask 0x%x\n",
7787 bp->port.link_config,
7788 bp->link_params.speed_cap_mask);
7793 case PORT_FEATURE_LINK_SPEED_2_5G:
7794 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7795 bp->link_params.req_line_speed = SPEED_2500;
7796 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7799 BNX2X_ERROR("NVRAM config error. "
7800 "Invalid link_config 0x%x"
7801 " speed_cap_mask 0x%x\n",
7802 bp->port.link_config,
7803 bp->link_params.speed_cap_mask);
7808 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7809 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7810 case PORT_FEATURE_LINK_SPEED_10G_KR:
7811 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7812 bp->link_params.req_line_speed = SPEED_10000;
7813 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7816 BNX2X_ERROR("NVRAM config error. "
7817 "Invalid link_config 0x%x"
7818 " speed_cap_mask 0x%x\n",
7819 bp->port.link_config,
7820 bp->link_params.speed_cap_mask);
7826 BNX2X_ERROR("NVRAM config error. "
7827 "BAD link speed link_config 0x%x\n",
7828 bp->port.link_config);
7829 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7830 bp->port.advertising = bp->port.supported;
7834 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7835 PORT_FEATURE_FLOW_CONTROL_MASK);
7836 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7837 !(bp->port.supported & SUPPORTED_Autoneg))
7838 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7840 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7841 " advertising 0x%x\n",
7842 bp->link_params.req_line_speed,
7843 bp->link_params.req_duplex,
7844 bp->link_params.req_flow_ctrl, bp->port.advertising);
7847 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
7849 mac_hi = cpu_to_be16(mac_hi);
7850 mac_lo = cpu_to_be32(mac_lo);
7851 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
7852 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
7855 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7857 int port = BP_PORT(bp);
7863 bp->link_params.bp = bp;
7864 bp->link_params.port = port;
7866 bp->link_params.lane_config =
7867 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7868 bp->link_params.ext_phy_config =
7870 dev_info.port_hw_config[port].external_phy_config);
7871 /* BCM8727_NOC => BCM8727 no over current */
7872 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
7873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
7874 bp->link_params.ext_phy_config &=
7875 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7876 bp->link_params.ext_phy_config |=
7877 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
7878 bp->link_params.feature_config_flags |=
7879 FEATURE_CONFIG_BCM8727_NOC;
7882 bp->link_params.speed_cap_mask =
7884 dev_info.port_hw_config[port].speed_capability_mask);
7886 bp->port.link_config =
7887 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7889 /* Get the 4 lanes xgxs config rx and tx */
7890 for (i = 0; i < 2; i++) {
7892 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
7893 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
7894 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
7897 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
7898 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
7899 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
7902 /* If the device is capable of WoL, set the default state according
7905 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
7906 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
7907 (config & PORT_FEATURE_WOL_ENABLED));
7909 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
7910 " speed_cap_mask 0x%08x link_config 0x%08x\n",
7911 bp->link_params.lane_config,
7912 bp->link_params.ext_phy_config,
7913 bp->link_params.speed_cap_mask, bp->port.link_config);
7915 bp->link_params.switch_cfg |= (bp->port.link_config &
7916 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7917 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7919 bnx2x_link_settings_requested(bp);
7922 * If connected directly, work with the internal PHY, otherwise, work
7923 * with the external PHY
7925 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7926 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
7927 bp->mdio.prtad = bp->link_params.phy_addr;
7929 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
7930 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
7932 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
7934 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7935 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7936 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
7937 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7938 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7941 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
7942 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
7943 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
7947 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7949 int func = BP_FUNC(bp);
7953 bnx2x_get_common_hwinfo(bp);
7957 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
7959 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7961 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
7962 FUNC_MF_CFG_E1HOV_TAG_MASK);
7963 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
7965 BNX2X_DEV_INFO("%s function mode\n",
7966 IS_E1HMF(bp) ? "multi" : "single");
7969 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
7971 FUNC_MF_CFG_E1HOV_TAG_MASK);
7972 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7974 BNX2X_DEV_INFO("E1HOV for func %d is %d "
7976 func, bp->e1hov, bp->e1hov);
7978 BNX2X_ERROR("No valid E1HOV for func %d,"
7979 " aborting\n", func);
7984 BNX2X_ERROR("VN %d in single function mode,"
7985 " aborting\n", BP_E1HVN(bp));
7991 if (!BP_NOMCP(bp)) {
7992 bnx2x_get_port_hwinfo(bp);
7994 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7995 DRV_MSG_SEQ_NUMBER_MASK);
7996 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8000 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8001 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8002 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8003 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8004 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8005 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8006 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8007 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8008 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8009 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8010 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8012 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8020 /* only supposed to happen on emulation/FPGA */
8021 BNX2X_ERROR("warning: random MAC workaround active\n");
8022 random_ether_addr(bp->dev->dev_addr);
8023 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8029 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8031 int cnt, i, block_end, rodi;
8032 char vpd_data[BNX2X_VPD_LEN+1];
8033 char str_id_reg[VENDOR_ID_LEN+1];
8034 char str_id_cap[VENDOR_ID_LEN+1];
8037 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8038 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8040 if (cnt < BNX2X_VPD_LEN)
8043 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8044 PCI_VPD_LRDT_RO_DATA);
8049 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8050 pci_vpd_lrdt_size(&vpd_data[i]);
8052 i += PCI_VPD_LRDT_TAG_SIZE;
8054 if (block_end > BNX2X_VPD_LEN)
8057 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8058 PCI_VPD_RO_KEYWORD_MFR_ID);
8062 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8064 if (len != VENDOR_ID_LEN)
8067 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8069 /* vendor specific info */
8070 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8071 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8072 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8073 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8075 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8076 PCI_VPD_RO_KEYWORD_VENDOR0);
8078 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8080 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8082 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8083 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8084 bp->fw_ver[len] = ' ';
8093 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8095 int func = BP_FUNC(bp);
8099 /* Disable interrupt handling until HW is initialized */
8100 atomic_set(&bp->intr_sem, 1);
8101 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8103 mutex_init(&bp->port.phy_mutex);
8104 mutex_init(&bp->fw_mb_mutex);
8106 mutex_init(&bp->cnic_mutex);
8109 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8110 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8112 rc = bnx2x_get_hwinfo(bp);
8114 bnx2x_read_fwinfo(bp);
8115 /* need to reset chip if undi was active */
8117 bnx2x_undi_unload(bp);
8119 if (CHIP_REV_IS_FPGA(bp))
8120 dev_err(&bp->pdev->dev, "FPGA detected\n");
8122 if (BP_NOMCP(bp) && (func == 0))
8123 dev_err(&bp->pdev->dev, "MCP disabled, "
8124 "must load devices in order!\n");
8126 /* Set multi queue mode */
8127 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8128 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8129 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8130 "requested is not MSI-X\n");
8131 multi_mode = ETH_RSS_MODE_DISABLED;
8133 bp->multi_mode = multi_mode;
8134 bp->int_mode = int_mode;
8136 bp->dev->features |= NETIF_F_GRO;
8140 bp->flags &= ~TPA_ENABLE_FLAG;
8141 bp->dev->features &= ~NETIF_F_LRO;
8143 bp->flags |= TPA_ENABLE_FLAG;
8144 bp->dev->features |= NETIF_F_LRO;
8146 bp->disable_tpa = disable_tpa;
8149 bp->dropless_fc = 0;
8151 bp->dropless_fc = dropless_fc;
8155 bp->tx_ring_size = MAX_TX_AVAIL;
8156 bp->rx_ring_size = MAX_RX_AVAIL;
8160 /* make sure that the numbers are in the right granularity */
8161 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8162 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8164 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8165 bp->current_interval = (poll ? poll : timer_interval);
8167 init_timer(&bp->timer);
8168 bp->timer.expires = jiffies + bp->current_interval;
8169 bp->timer.data = (unsigned long) bp;
8170 bp->timer.function = bnx2x_timer;
8176 * ethtool service functions
8179 /* All ethtool functions called with rtnl_lock */
8181 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8183 struct bnx2x *bp = netdev_priv(dev);
8185 cmd->supported = bp->port.supported;
8186 cmd->advertising = bp->port.advertising;
8188 if ((bp->state == BNX2X_STATE_OPEN) &&
8189 !(bp->flags & MF_FUNC_DIS) &&
8190 (bp->link_vars.link_up)) {
8191 cmd->speed = bp->link_vars.line_speed;
8192 cmd->duplex = bp->link_vars.duplex;
8197 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8198 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8199 if (vn_max_rate < cmd->speed)
8200 cmd->speed = vn_max_rate;
8207 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8209 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8211 switch (ext_phy_type) {
8212 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8213 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8214 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8215 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8216 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8217 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8218 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8219 cmd->port = PORT_FIBRE;
8222 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8223 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8224 cmd->port = PORT_TP;
8227 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8228 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8229 bp->link_params.ext_phy_config);
8233 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8234 bp->link_params.ext_phy_config);
8238 cmd->port = PORT_TP;
8240 cmd->phy_address = bp->mdio.prtad;
8241 cmd->transceiver = XCVR_INTERNAL;
8243 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8244 cmd->autoneg = AUTONEG_ENABLE;
8246 cmd->autoneg = AUTONEG_DISABLE;
8251 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8252 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8253 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8254 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8255 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8256 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8257 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8262 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8264 struct bnx2x *bp = netdev_priv(dev);
8270 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8271 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8272 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8273 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8274 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8275 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8276 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8278 if (cmd->autoneg == AUTONEG_ENABLE) {
8279 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8280 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8284 /* advertise the requested speed and duplex if supported */
8285 cmd->advertising &= bp->port.supported;
8287 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8288 bp->link_params.req_duplex = DUPLEX_FULL;
8289 bp->port.advertising |= (ADVERTISED_Autoneg |
8292 } else { /* forced speed */
8293 /* advertise the requested speed and duplex if supported */
8294 switch (cmd->speed) {
8296 if (cmd->duplex == DUPLEX_FULL) {
8297 if (!(bp->port.supported &
8298 SUPPORTED_10baseT_Full)) {
8300 "10M full not supported\n");
8304 advertising = (ADVERTISED_10baseT_Full |
8307 if (!(bp->port.supported &
8308 SUPPORTED_10baseT_Half)) {
8310 "10M half not supported\n");
8314 advertising = (ADVERTISED_10baseT_Half |
8320 if (cmd->duplex == DUPLEX_FULL) {
8321 if (!(bp->port.supported &
8322 SUPPORTED_100baseT_Full)) {
8324 "100M full not supported\n");
8328 advertising = (ADVERTISED_100baseT_Full |
8331 if (!(bp->port.supported &
8332 SUPPORTED_100baseT_Half)) {
8334 "100M half not supported\n");
8338 advertising = (ADVERTISED_100baseT_Half |
8344 if (cmd->duplex != DUPLEX_FULL) {
8345 DP(NETIF_MSG_LINK, "1G half not supported\n");
8349 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8350 DP(NETIF_MSG_LINK, "1G full not supported\n");
8354 advertising = (ADVERTISED_1000baseT_Full |
8359 if (cmd->duplex != DUPLEX_FULL) {
8361 "2.5G half not supported\n");
8365 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8367 "2.5G full not supported\n");
8371 advertising = (ADVERTISED_2500baseX_Full |
8376 if (cmd->duplex != DUPLEX_FULL) {
8377 DP(NETIF_MSG_LINK, "10G half not supported\n");
8381 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8382 DP(NETIF_MSG_LINK, "10G full not supported\n");
8386 advertising = (ADVERTISED_10000baseT_Full |
8391 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8395 bp->link_params.req_line_speed = cmd->speed;
8396 bp->link_params.req_duplex = cmd->duplex;
8397 bp->port.advertising = advertising;
8400 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8401 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8402 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8403 bp->port.advertising);
8405 if (netif_running(dev)) {
8406 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8413 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8414 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8416 static int bnx2x_get_regs_len(struct net_device *dev)
8418 struct bnx2x *bp = netdev_priv(dev);
8419 int regdump_len = 0;
8422 if (CHIP_IS_E1(bp)) {
8423 for (i = 0; i < REGS_COUNT; i++)
8424 if (IS_E1_ONLINE(reg_addrs[i].info))
8425 regdump_len += reg_addrs[i].size;
8427 for (i = 0; i < WREGS_COUNT_E1; i++)
8428 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8429 regdump_len += wreg_addrs_e1[i].size *
8430 (1 + wreg_addrs_e1[i].read_regs_count);
8433 for (i = 0; i < REGS_COUNT; i++)
8434 if (IS_E1H_ONLINE(reg_addrs[i].info))
8435 regdump_len += reg_addrs[i].size;
8437 for (i = 0; i < WREGS_COUNT_E1H; i++)
8438 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8439 regdump_len += wreg_addrs_e1h[i].size *
8440 (1 + wreg_addrs_e1h[i].read_regs_count);
8443 regdump_len += sizeof(struct dump_hdr);
8448 static void bnx2x_get_regs(struct net_device *dev,
8449 struct ethtool_regs *regs, void *_p)
8452 struct bnx2x *bp = netdev_priv(dev);
8453 struct dump_hdr dump_hdr = {0};
8456 memset(p, 0, regs->len);
8458 if (!netif_running(bp->dev))
8461 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8462 dump_hdr.dump_sign = dump_sign_all;
8463 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8464 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8465 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8466 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8467 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8469 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8470 p += dump_hdr.hdr_size + 1;
8472 if (CHIP_IS_E1(bp)) {
8473 for (i = 0; i < REGS_COUNT; i++)
8474 if (IS_E1_ONLINE(reg_addrs[i].info))
8475 for (j = 0; j < reg_addrs[i].size; j++)
8477 reg_addrs[i].addr + j*4);
8480 for (i = 0; i < REGS_COUNT; i++)
8481 if (IS_E1H_ONLINE(reg_addrs[i].info))
8482 for (j = 0; j < reg_addrs[i].size; j++)
8484 reg_addrs[i].addr + j*4);
8488 #define PHY_FW_VER_LEN 10
8490 static void bnx2x_get_drvinfo(struct net_device *dev,
8491 struct ethtool_drvinfo *info)
8493 struct bnx2x *bp = netdev_priv(dev);
8494 u8 phy_fw_ver[PHY_FW_VER_LEN];
8496 strcpy(info->driver, DRV_MODULE_NAME);
8497 strcpy(info->version, DRV_MODULE_VERSION);
8499 phy_fw_ver[0] = '\0';
8501 bnx2x_acquire_phy_lock(bp);
8502 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8503 (bp->state != BNX2X_STATE_CLOSED),
8504 phy_fw_ver, PHY_FW_VER_LEN);
8505 bnx2x_release_phy_lock(bp);
8508 strncpy(info->fw_version, bp->fw_ver, 32);
8509 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
8511 (bp->common.bc_ver & 0xff0000) >> 16,
8512 (bp->common.bc_ver & 0xff00) >> 8,
8513 (bp->common.bc_ver & 0xff),
8514 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
8515 strcpy(info->bus_info, pci_name(bp->pdev));
8516 info->n_stats = BNX2X_NUM_STATS;
8517 info->testinfo_len = BNX2X_NUM_TESTS;
8518 info->eedump_len = bp->common.flash_size;
8519 info->regdump_len = bnx2x_get_regs_len(dev);
8522 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8524 struct bnx2x *bp = netdev_priv(dev);
8526 if (bp->flags & NO_WOL_FLAG) {
8530 wol->supported = WAKE_MAGIC;
8532 wol->wolopts = WAKE_MAGIC;
8536 memset(&wol->sopass, 0, sizeof(wol->sopass));
8539 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8541 struct bnx2x *bp = netdev_priv(dev);
8543 if (wol->wolopts & ~WAKE_MAGIC)
8546 if (wol->wolopts & WAKE_MAGIC) {
8547 if (bp->flags & NO_WOL_FLAG)
8557 static u32 bnx2x_get_msglevel(struct net_device *dev)
8559 struct bnx2x *bp = netdev_priv(dev);
8561 return bp->msg_enable;
8564 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8566 struct bnx2x *bp = netdev_priv(dev);
8568 if (capable(CAP_NET_ADMIN))
8569 bp->msg_enable = level;
8572 static int bnx2x_nway_reset(struct net_device *dev)
8574 struct bnx2x *bp = netdev_priv(dev);
8579 if (netif_running(dev)) {
8580 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8587 static u32 bnx2x_get_link(struct net_device *dev)
8589 struct bnx2x *bp = netdev_priv(dev);
8591 if (bp->flags & MF_FUNC_DIS)
8594 return bp->link_vars.link_up;
8597 static int bnx2x_get_eeprom_len(struct net_device *dev)
8599 struct bnx2x *bp = netdev_priv(dev);
8601 return bp->common.flash_size;
8604 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8606 int port = BP_PORT(bp);
8610 /* adjust timeout for emulation/FPGA */
8611 count = NVRAM_TIMEOUT_COUNT;
8612 if (CHIP_REV_IS_SLOW(bp))
8615 /* request access to nvram interface */
8616 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8617 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8619 for (i = 0; i < count*10; i++) {
8620 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8621 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8627 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8628 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8635 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8637 int port = BP_PORT(bp);
8641 /* adjust timeout for emulation/FPGA */
8642 count = NVRAM_TIMEOUT_COUNT;
8643 if (CHIP_REV_IS_SLOW(bp))
8646 /* relinquish nvram interface */
8647 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8648 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8650 for (i = 0; i < count*10; i++) {
8651 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8652 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8658 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8659 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8666 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8670 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8672 /* enable both bits, even on read */
8673 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8674 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8675 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8678 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8682 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8684 /* disable both bits, even after read */
8685 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8686 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8687 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8690 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8696 /* build the command word */
8697 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8699 /* need to clear DONE bit separately */
8700 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8702 /* address of the NVRAM to read from */
8703 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8704 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8706 /* issue a read command */
8707 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8709 /* adjust timeout for emulation/FPGA */
8710 count = NVRAM_TIMEOUT_COUNT;
8711 if (CHIP_REV_IS_SLOW(bp))
8714 /* wait for completion */
8717 for (i = 0; i < count; i++) {
8719 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8721 if (val & MCPR_NVM_COMMAND_DONE) {
8722 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8723 /* we read nvram data in cpu order
8724 * but ethtool sees it as an array of bytes
8725 * converting to big-endian will do the work */
8726 *ret_val = cpu_to_be32(val);
8735 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8742 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8744 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8749 if (offset + buf_size > bp->common.flash_size) {
8750 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8751 " buf_size (0x%x) > flash_size (0x%x)\n",
8752 offset, buf_size, bp->common.flash_size);
8756 /* request access to nvram interface */
8757 rc = bnx2x_acquire_nvram_lock(bp);
8761 /* enable access to nvram interface */
8762 bnx2x_enable_nvram_access(bp);
8764 /* read the first word(s) */
8765 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8766 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8767 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8768 memcpy(ret_buf, &val, 4);
8770 /* advance to the next dword */
8771 offset += sizeof(u32);
8772 ret_buf += sizeof(u32);
8773 buf_size -= sizeof(u32);
8778 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8779 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8780 memcpy(ret_buf, &val, 4);
8783 /* disable access to nvram interface */
8784 bnx2x_disable_nvram_access(bp);
8785 bnx2x_release_nvram_lock(bp);
8790 static int bnx2x_get_eeprom(struct net_device *dev,
8791 struct ethtool_eeprom *eeprom, u8 *eebuf)
8793 struct bnx2x *bp = netdev_priv(dev);
8796 if (!netif_running(dev))
8799 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8800 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8801 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8802 eeprom->len, eeprom->len);
8804 /* parameters already validated in ethtool_get_eeprom */
8806 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8811 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8816 /* build the command word */
8817 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8819 /* need to clear DONE bit separately */
8820 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8822 /* write the data */
8823 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8825 /* address of the NVRAM to write to */
8826 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8827 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8829 /* issue the write command */
8830 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8832 /* adjust timeout for emulation/FPGA */
8833 count = NVRAM_TIMEOUT_COUNT;
8834 if (CHIP_REV_IS_SLOW(bp))
8837 /* wait for completion */
8839 for (i = 0; i < count; i++) {
8841 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8842 if (val & MCPR_NVM_COMMAND_DONE) {
8851 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8853 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8861 if (offset + buf_size > bp->common.flash_size) {
8862 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8863 " buf_size (0x%x) > flash_size (0x%x)\n",
8864 offset, buf_size, bp->common.flash_size);
8868 /* request access to nvram interface */
8869 rc = bnx2x_acquire_nvram_lock(bp);
8873 /* enable access to nvram interface */
8874 bnx2x_enable_nvram_access(bp);
8876 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8877 align_offset = (offset & ~0x03);
8878 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8881 val &= ~(0xff << BYTE_OFFSET(offset));
8882 val |= (*data_buf << BYTE_OFFSET(offset));
8884 /* nvram data is returned as an array of bytes
8885 * convert it back to cpu order */
8886 val = be32_to_cpu(val);
8888 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8892 /* disable access to nvram interface */
8893 bnx2x_disable_nvram_access(bp);
8894 bnx2x_release_nvram_lock(bp);
8899 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8907 if (buf_size == 1) /* ethtool */
8908 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8910 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8912 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8917 if (offset + buf_size > bp->common.flash_size) {
8918 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8919 " buf_size (0x%x) > flash_size (0x%x)\n",
8920 offset, buf_size, bp->common.flash_size);
8924 /* request access to nvram interface */
8925 rc = bnx2x_acquire_nvram_lock(bp);
8929 /* enable access to nvram interface */
8930 bnx2x_enable_nvram_access(bp);
8933 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8934 while ((written_so_far < buf_size) && (rc == 0)) {
8935 if (written_so_far == (buf_size - sizeof(u32)))
8936 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8937 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8938 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8939 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8940 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8942 memcpy(&val, data_buf, 4);
8944 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8946 /* advance to the next dword */
8947 offset += sizeof(u32);
8948 data_buf += sizeof(u32);
8949 written_so_far += sizeof(u32);
8953 /* disable access to nvram interface */
8954 bnx2x_disable_nvram_access(bp);
8955 bnx2x_release_nvram_lock(bp);
8960 static int bnx2x_set_eeprom(struct net_device *dev,
8961 struct ethtool_eeprom *eeprom, u8 *eebuf)
8963 struct bnx2x *bp = netdev_priv(dev);
8964 int port = BP_PORT(bp);
8967 if (!netif_running(dev))
8970 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8971 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8972 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8973 eeprom->len, eeprom->len);
8975 /* parameters already validated in ethtool_set_eeprom */
8977 /* PHY eeprom can be accessed only by the PMF */
8978 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
8982 if (eeprom->magic == 0x50485950) {
8983 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
8984 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8986 bnx2x_acquire_phy_lock(bp);
8987 rc |= bnx2x_link_reset(&bp->link_params,
8989 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8990 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
8991 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
8992 MISC_REGISTERS_GPIO_HIGH, port);
8993 bnx2x_release_phy_lock(bp);
8994 bnx2x_link_report(bp);
8996 } else if (eeprom->magic == 0x50485952) {
8997 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
8998 if (bp->state == BNX2X_STATE_OPEN) {
8999 bnx2x_acquire_phy_lock(bp);
9000 rc |= bnx2x_link_reset(&bp->link_params,
9003 rc |= bnx2x_phy_init(&bp->link_params,
9005 bnx2x_release_phy_lock(bp);
9006 bnx2x_calc_fc_adv(bp);
9008 } else if (eeprom->magic == 0x53985943) {
9009 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9010 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9011 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9013 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9015 /* DSP Remove Download Mode */
9016 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9017 MISC_REGISTERS_GPIO_LOW, port);
9019 bnx2x_acquire_phy_lock(bp);
9021 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9023 /* wait 0.5 sec to allow it to run */
9025 bnx2x_ext_phy_hw_reset(bp, port);
9027 bnx2x_release_phy_lock(bp);
9030 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9035 static int bnx2x_get_coalesce(struct net_device *dev,
9036 struct ethtool_coalesce *coal)
9038 struct bnx2x *bp = netdev_priv(dev);
9040 memset(coal, 0, sizeof(struct ethtool_coalesce));
9042 coal->rx_coalesce_usecs = bp->rx_ticks;
9043 coal->tx_coalesce_usecs = bp->tx_ticks;
9048 static int bnx2x_set_coalesce(struct net_device *dev,
9049 struct ethtool_coalesce *coal)
9051 struct bnx2x *bp = netdev_priv(dev);
9053 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
9054 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9055 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9057 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
9058 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9059 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9061 if (netif_running(dev))
9062 bnx2x_update_coalesce(bp);
9067 static void bnx2x_get_ringparam(struct net_device *dev,
9068 struct ethtool_ringparam *ering)
9070 struct bnx2x *bp = netdev_priv(dev);
9072 ering->rx_max_pending = MAX_RX_AVAIL;
9073 ering->rx_mini_max_pending = 0;
9074 ering->rx_jumbo_max_pending = 0;
9076 ering->rx_pending = bp->rx_ring_size;
9077 ering->rx_mini_pending = 0;
9078 ering->rx_jumbo_pending = 0;
9080 ering->tx_max_pending = MAX_TX_AVAIL;
9081 ering->tx_pending = bp->tx_ring_size;
9084 static int bnx2x_set_ringparam(struct net_device *dev,
9085 struct ethtool_ringparam *ering)
9087 struct bnx2x *bp = netdev_priv(dev);
9090 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9091 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
9095 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9096 (ering->tx_pending > MAX_TX_AVAIL) ||
9097 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9100 bp->rx_ring_size = ering->rx_pending;
9101 bp->tx_ring_size = ering->tx_pending;
9103 if (netif_running(dev)) {
9104 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9105 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9111 static void bnx2x_get_pauseparam(struct net_device *dev,
9112 struct ethtool_pauseparam *epause)
9114 struct bnx2x *bp = netdev_priv(dev);
9116 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9117 BNX2X_FLOW_CTRL_AUTO) &&
9118 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9120 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9121 BNX2X_FLOW_CTRL_RX);
9122 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9123 BNX2X_FLOW_CTRL_TX);
9125 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9126 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9127 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9130 static int bnx2x_set_pauseparam(struct net_device *dev,
9131 struct ethtool_pauseparam *epause)
9133 struct bnx2x *bp = netdev_priv(dev);
9138 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9139 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9140 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9142 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9144 if (epause->rx_pause)
9145 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9147 if (epause->tx_pause)
9148 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9150 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9151 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9153 if (epause->autoneg) {
9154 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9155 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9159 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9160 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9164 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9166 if (netif_running(dev)) {
9167 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9174 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9176 struct bnx2x *bp = netdev_priv(dev);
9180 if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
9183 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9184 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
9188 /* TPA requires Rx CSUM offloading */
9189 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9190 if (!bp->disable_tpa) {
9191 if (!(dev->features & NETIF_F_LRO)) {
9192 dev->features |= NETIF_F_LRO;
9193 bp->flags |= TPA_ENABLE_FLAG;
9198 } else if (dev->features & NETIF_F_LRO) {
9199 dev->features &= ~NETIF_F_LRO;
9200 bp->flags &= ~TPA_ENABLE_FLAG;
9204 if (data & ETH_FLAG_RXHASH)
9205 dev->features |= NETIF_F_RXHASH;
9207 dev->features &= ~NETIF_F_RXHASH;
9209 if (changed && netif_running(dev)) {
9210 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9211 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9217 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9219 struct bnx2x *bp = netdev_priv(dev);
9224 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9226 struct bnx2x *bp = netdev_priv(dev);
9229 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9230 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
9236 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9237 TPA'ed packets will be discarded due to wrong TCP CSUM */
9239 u32 flags = ethtool_op_get_flags(dev);
9241 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9247 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9250 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9251 dev->features |= NETIF_F_TSO6;
9253 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9254 dev->features &= ~NETIF_F_TSO6;
9260 static const struct {
9261 char string[ETH_GSTRING_LEN];
9262 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9263 { "register_test (offline)" },
9264 { "memory_test (offline)" },
9265 { "loopback_test (offline)" },
9266 { "nvram_test (online)" },
9267 { "interrupt_test (online)" },
9268 { "link_test (online)" },
9269 { "idle check (online)" }
9272 static int bnx2x_test_registers(struct bnx2x *bp)
9274 int idx, i, rc = -ENODEV;
9276 int port = BP_PORT(bp);
9277 static const struct {
9282 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9283 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9284 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9285 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9286 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9287 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9288 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9289 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9290 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9291 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9292 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9293 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9294 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9295 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9296 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9297 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9298 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9299 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9300 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9301 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9302 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9303 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9304 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9305 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9306 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9307 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9308 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9309 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9310 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9311 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9312 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9313 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9314 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9315 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9316 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9317 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9318 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9320 { 0xffffffff, 0, 0x00000000 }
9323 if (!netif_running(bp->dev))
9326 /* Repeat the test twice:
9327 First by writing 0x00000000, second by writing 0xffffffff */
9328 for (idx = 0; idx < 2; idx++) {
9335 wr_val = 0xffffffff;
9339 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9340 u32 offset, mask, save_val, val;
9342 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9343 mask = reg_tbl[i].mask;
9345 save_val = REG_RD(bp, offset);
9347 REG_WR(bp, offset, (wr_val & mask));
9348 val = REG_RD(bp, offset);
9350 /* Restore the original register's value */
9351 REG_WR(bp, offset, save_val);
9353 /* verify value is as expected */
9354 if ((val & mask) != (wr_val & mask)) {
9356 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
9357 offset, val, wr_val, mask);
9369 static int bnx2x_test_memory(struct bnx2x *bp)
9371 int i, j, rc = -ENODEV;
9373 static const struct {
9377 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9378 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9379 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9380 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9381 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9382 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9383 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9387 static const struct {
9393 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9394 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9395 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9396 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9397 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9398 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9400 { NULL, 0xffffffff, 0, 0 }
9403 if (!netif_running(bp->dev))
9406 /* Go through all the memories */
9407 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9408 for (j = 0; j < mem_tbl[i].size; j++)
9409 REG_RD(bp, mem_tbl[i].offset + j*4);
9411 /* Check the parity status */
9412 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9413 val = REG_RD(bp, prty_tbl[i].offset);
9414 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9415 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9417 "%s is 0x%x\n", prty_tbl[i].name, val);
9428 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9433 while (bnx2x_link_test(bp) && cnt--)
9437 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9439 unsigned int pkt_size, num_pkts, i;
9440 struct sk_buff *skb;
9441 unsigned char *packet;
9442 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9443 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
9444 u16 tx_start_idx, tx_idx;
9445 u16 rx_start_idx, rx_idx;
9446 u16 pkt_prod, bd_prod;
9447 struct sw_tx_bd *tx_buf;
9448 struct eth_tx_start_bd *tx_start_bd;
9449 struct eth_tx_parse_bd *pbd = NULL;
9451 union eth_rx_cqe *cqe;
9453 struct sw_rx_bd *rx_buf;
9457 /* check the loopback mode */
9458 switch (loopback_mode) {
9459 case BNX2X_PHY_LOOPBACK:
9460 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9463 case BNX2X_MAC_LOOPBACK:
9464 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9465 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9471 /* prepare the loopback packet */
9472 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9473 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9474 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9477 goto test_loopback_exit;
9479 packet = skb_put(skb, pkt_size);
9480 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9481 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9482 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9483 for (i = ETH_HLEN; i < pkt_size; i++)
9484 packet[i] = (unsigned char) (i & 0xff);
9486 /* send the loopback packet */
9488 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9489 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9491 pkt_prod = fp_tx->tx_pkt_prod++;
9492 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9493 tx_buf->first_bd = fp_tx->tx_bd_prod;
9497 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9498 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
9499 mapping = dma_map_single(&bp->pdev->dev, skb->data,
9500 skb_headlen(skb), DMA_TO_DEVICE);
9501 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9502 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9503 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9504 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9505 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
9506 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9507 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
9508 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9510 /* turn on parsing and get a BD */
9511 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9512 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
9514 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9518 fp_tx->tx_db.data.prod += 2;
9520 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
9525 fp_tx->tx_bd_prod += 2; /* start + pbd */
9529 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9530 if (tx_idx != tx_start_idx + num_pkts)
9531 goto test_loopback_exit;
9533 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9534 if (rx_idx != rx_start_idx + num_pkts)
9535 goto test_loopback_exit;
9537 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
9538 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9539 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9540 goto test_loopback_rx_exit;
9542 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9543 if (len != pkt_size)
9544 goto test_loopback_rx_exit;
9546 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
9548 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9549 for (i = ETH_HLEN; i < pkt_size; i++)
9550 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9551 goto test_loopback_rx_exit;
9555 test_loopback_rx_exit:
9557 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
9558 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
9559 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
9560 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
9562 /* Update producers */
9563 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
9564 fp_rx->rx_sge_prod);
9567 bp->link_params.loopback_mode = LOOPBACK_NONE;
9572 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9579 if (!netif_running(bp->dev))
9580 return BNX2X_LOOPBACK_FAILED;
9582 bnx2x_netif_stop(bp, 1);
9583 bnx2x_acquire_phy_lock(bp);
9585 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9587 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9588 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9591 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9593 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9594 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9597 bnx2x_release_phy_lock(bp);
9598 bnx2x_netif_start(bp);
9603 #define CRC32_RESIDUAL 0xdebb20e3
9605 static int bnx2x_test_nvram(struct bnx2x *bp)
9607 static const struct {
9611 { 0, 0x14 }, /* bootstrap */
9612 { 0x14, 0xec }, /* dir */
9613 { 0x100, 0x350 }, /* manuf_info */
9614 { 0x450, 0xf0 }, /* feature_info */
9615 { 0x640, 0x64 }, /* upgrade_key_info */
9617 { 0x708, 0x70 }, /* manuf_key_info */
9621 __be32 buf[0x350 / 4];
9622 u8 *data = (u8 *)buf;
9629 rc = bnx2x_nvram_read(bp, 0, data, 4);
9631 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9632 goto test_nvram_exit;
9635 magic = be32_to_cpu(buf[0]);
9636 if (magic != 0x669955aa) {
9637 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9639 goto test_nvram_exit;
9642 for (i = 0; nvram_tbl[i].size; i++) {
9644 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9648 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9649 goto test_nvram_exit;
9652 crc = ether_crc_le(nvram_tbl[i].size, data);
9653 if (crc != CRC32_RESIDUAL) {
9655 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
9657 goto test_nvram_exit;
9665 static int bnx2x_test_intr(struct bnx2x *bp)
9667 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9670 if (!netif_running(bp->dev))
9673 config->hdr.length = 0;
9675 /* use last unicast entries */
9676 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
9678 config->hdr.offset = BP_FUNC(bp);
9679 config->hdr.client_id = bp->fp->cl_id;
9680 config->hdr.reserved1 = 0;
9682 bp->set_mac_pending++;
9684 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9685 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9686 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9688 for (i = 0; i < 10; i++) {
9689 if (!bp->set_mac_pending)
9692 msleep_interruptible(10);
9701 static void bnx2x_self_test(struct net_device *dev,
9702 struct ethtool_test *etest, u64 *buf)
9704 struct bnx2x *bp = netdev_priv(dev);
9706 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9707 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
9708 etest->flags |= ETH_TEST_FL_FAILED;
9712 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9714 if (!netif_running(dev))
9717 /* offline tests are not supported in MF mode */
9719 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9721 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9722 int port = BP_PORT(bp);
9726 /* save current value of input enable for TX port IF */
9727 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9728 /* disable input for TX port IF */
9729 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9731 link_up = (bnx2x_link_test(bp) == 0);
9732 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9733 bnx2x_nic_load(bp, LOAD_DIAG);
9734 /* wait until link state is restored */
9735 bnx2x_wait_for_link(bp, link_up);
9737 if (bnx2x_test_registers(bp) != 0) {
9739 etest->flags |= ETH_TEST_FL_FAILED;
9741 if (bnx2x_test_memory(bp) != 0) {
9743 etest->flags |= ETH_TEST_FL_FAILED;
9745 buf[2] = bnx2x_test_loopback(bp, link_up);
9747 etest->flags |= ETH_TEST_FL_FAILED;
9749 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9751 /* restore input for TX port IF */
9752 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9754 bnx2x_nic_load(bp, LOAD_NORMAL);
9755 /* wait until link state is restored */
9756 bnx2x_wait_for_link(bp, link_up);
9758 if (bnx2x_test_nvram(bp) != 0) {
9760 etest->flags |= ETH_TEST_FL_FAILED;
9762 if (bnx2x_test_intr(bp) != 0) {
9764 etest->flags |= ETH_TEST_FL_FAILED;
9767 if (bnx2x_link_test(bp) != 0) {
9769 etest->flags |= ETH_TEST_FL_FAILED;
9772 #ifdef BNX2X_EXTRA_DEBUG
9773 bnx2x_panic_dump(bp);
9777 static const struct {
9780 u8 string[ETH_GSTRING_LEN];
9781 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9782 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9783 { Q_STATS_OFFSET32(error_bytes_received_hi),
9784 8, "[%d]: rx_error_bytes" },
9785 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9786 8, "[%d]: rx_ucast_packets" },
9787 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9788 8, "[%d]: rx_mcast_packets" },
9789 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9790 8, "[%d]: rx_bcast_packets" },
9791 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9792 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9793 4, "[%d]: rx_phy_ip_err_discards"},
9794 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9795 4, "[%d]: rx_skb_alloc_discard" },
9796 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9798 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9799 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9800 8, "[%d]: tx_ucast_packets" },
9801 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
9802 8, "[%d]: tx_mcast_packets" },
9803 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
9804 8, "[%d]: tx_bcast_packets" }
9807 static const struct {
9811 #define STATS_FLAGS_PORT 1
9812 #define STATS_FLAGS_FUNC 2
9813 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9814 u8 string[ETH_GSTRING_LEN];
9815 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9816 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9817 8, STATS_FLAGS_BOTH, "rx_bytes" },
9818 { STATS_OFFSET32(error_bytes_received_hi),
9819 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9820 { STATS_OFFSET32(total_unicast_packets_received_hi),
9821 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9822 { STATS_OFFSET32(total_multicast_packets_received_hi),
9823 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9824 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9825 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9826 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9827 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9828 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9829 8, STATS_FLAGS_PORT, "rx_align_errors" },
9830 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9831 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9832 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9833 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9834 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9835 8, STATS_FLAGS_PORT, "rx_fragments" },
9836 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9837 8, STATS_FLAGS_PORT, "rx_jabbers" },
9838 { STATS_OFFSET32(no_buff_discard_hi),
9839 8, STATS_FLAGS_BOTH, "rx_discards" },
9840 { STATS_OFFSET32(mac_filter_discard),
9841 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9842 { STATS_OFFSET32(xxoverflow_discard),
9843 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9844 { STATS_OFFSET32(brb_drop_hi),
9845 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9846 { STATS_OFFSET32(brb_truncate_hi),
9847 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9848 { STATS_OFFSET32(pause_frames_received_hi),
9849 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9850 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9851 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9852 { STATS_OFFSET32(nig_timer_max),
9853 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9854 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9855 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9856 { STATS_OFFSET32(rx_skb_alloc_failed),
9857 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9858 { STATS_OFFSET32(hw_csum_err),
9859 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9861 { STATS_OFFSET32(total_bytes_transmitted_hi),
9862 8, STATS_FLAGS_BOTH, "tx_bytes" },
9863 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9864 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9865 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9866 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
9867 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
9868 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
9869 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
9870 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
9871 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9872 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9873 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9874 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9875 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9876 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9877 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9878 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9879 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9880 8, STATS_FLAGS_PORT, "tx_deferred" },
9881 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9882 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9883 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9884 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9885 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9886 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9887 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9888 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9889 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9890 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9891 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9892 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9893 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9894 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9895 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9896 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9897 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9898 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9899 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9900 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9901 { STATS_OFFSET32(pause_frames_sent_hi),
9902 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9905 #define IS_PORT_STAT(i) \
9906 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9907 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9908 #define IS_E1HMF_MODE_STAT(bp) \
9909 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
9911 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
9913 struct bnx2x *bp = netdev_priv(dev);
9916 switch (stringset) {
9919 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
9920 if (!IS_E1HMF_MODE_STAT(bp))
9921 num_stats += BNX2X_NUM_STATS;
9923 if (IS_E1HMF_MODE_STAT(bp)) {
9925 for (i = 0; i < BNX2X_NUM_STATS; i++)
9926 if (IS_FUNC_STAT(i))
9929 num_stats = BNX2X_NUM_STATS;
9934 return BNX2X_NUM_TESTS;
9941 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9943 struct bnx2x *bp = netdev_priv(dev);
9946 switch (stringset) {
9950 for_each_queue(bp, i) {
9951 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9952 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9953 bnx2x_q_stats_arr[j].string, i);
9954 k += BNX2X_NUM_Q_STATS;
9956 if (IS_E1HMF_MODE_STAT(bp))
9958 for (j = 0; j < BNX2X_NUM_STATS; j++)
9959 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9960 bnx2x_stats_arr[j].string);
9962 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9963 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9965 strcpy(buf + j*ETH_GSTRING_LEN,
9966 bnx2x_stats_arr[i].string);
9973 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9978 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9979 struct ethtool_stats *stats, u64 *buf)
9981 struct bnx2x *bp = netdev_priv(dev);
9982 u32 *hw_stats, *offset;
9987 for_each_queue(bp, i) {
9988 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9989 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9990 if (bnx2x_q_stats_arr[j].size == 0) {
9991 /* skip this counter */
9995 offset = (hw_stats +
9996 bnx2x_q_stats_arr[j].offset);
9997 if (bnx2x_q_stats_arr[j].size == 4) {
9998 /* 4-byte counter */
9999 buf[k + j] = (u64) *offset;
10002 /* 8-byte counter */
10003 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10005 k += BNX2X_NUM_Q_STATS;
10007 if (IS_E1HMF_MODE_STAT(bp))
10009 hw_stats = (u32 *)&bp->eth_stats;
10010 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10011 if (bnx2x_stats_arr[j].size == 0) {
10012 /* skip this counter */
10016 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10017 if (bnx2x_stats_arr[j].size == 4) {
10018 /* 4-byte counter */
10019 buf[k + j] = (u64) *offset;
10022 /* 8-byte counter */
10023 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10026 hw_stats = (u32 *)&bp->eth_stats;
10027 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10028 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10030 if (bnx2x_stats_arr[i].size == 0) {
10031 /* skip this counter */
10036 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10037 if (bnx2x_stats_arr[i].size == 4) {
10038 /* 4-byte counter */
10039 buf[j] = (u64) *offset;
10043 /* 8-byte counter */
10044 buf[j] = HILO_U64(*offset, *(offset + 1));
10050 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10052 struct bnx2x *bp = netdev_priv(dev);
10055 if (!netif_running(dev))
10064 for (i = 0; i < (data * 2); i++) {
10066 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10069 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10071 msleep_interruptible(500);
10072 if (signal_pending(current))
10076 if (bp->link_vars.link_up)
10077 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10078 bp->link_vars.line_speed);
10083 static const struct ethtool_ops bnx2x_ethtool_ops = {
10084 .get_settings = bnx2x_get_settings,
10085 .set_settings = bnx2x_set_settings,
10086 .get_drvinfo = bnx2x_get_drvinfo,
10087 .get_regs_len = bnx2x_get_regs_len,
10088 .get_regs = bnx2x_get_regs,
10089 .get_wol = bnx2x_get_wol,
10090 .set_wol = bnx2x_set_wol,
10091 .get_msglevel = bnx2x_get_msglevel,
10092 .set_msglevel = bnx2x_set_msglevel,
10093 .nway_reset = bnx2x_nway_reset,
10094 .get_link = bnx2x_get_link,
10095 .get_eeprom_len = bnx2x_get_eeprom_len,
10096 .get_eeprom = bnx2x_get_eeprom,
10097 .set_eeprom = bnx2x_set_eeprom,
10098 .get_coalesce = bnx2x_get_coalesce,
10099 .set_coalesce = bnx2x_set_coalesce,
10100 .get_ringparam = bnx2x_get_ringparam,
10101 .set_ringparam = bnx2x_set_ringparam,
10102 .get_pauseparam = bnx2x_get_pauseparam,
10103 .set_pauseparam = bnx2x_set_pauseparam,
10104 .get_rx_csum = bnx2x_get_rx_csum,
10105 .set_rx_csum = bnx2x_set_rx_csum,
10106 .get_tx_csum = ethtool_op_get_tx_csum,
10107 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10108 .set_flags = bnx2x_set_flags,
10109 .get_flags = ethtool_op_get_flags,
10110 .get_sg = ethtool_op_get_sg,
10111 .set_sg = ethtool_op_set_sg,
10112 .get_tso = ethtool_op_get_tso,
10113 .set_tso = bnx2x_set_tso,
10114 .self_test = bnx2x_self_test,
10115 .get_sset_count = bnx2x_get_sset_count,
10116 .get_strings = bnx2x_get_strings,
10117 .phys_id = bnx2x_phys_id,
10118 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10121 /* end of ethtool_ops */
10124 /* called with rtnl_lock */
10125 static int bnx2x_open(struct net_device *dev)
10127 struct bnx2x *bp = netdev_priv(dev);
10129 netif_carrier_off(dev);
10131 bnx2x_set_power_state(bp, PCI_D0);
10133 if (!bnx2x_reset_is_done(bp)) {
10135 /* Reset MCP mail box sequence if there is on going
10140 /* If it's the first function to load and reset done
10141 * is still not cleared it may mean that. We don't
10142 * check the attention state here because it may have
10143 * already been cleared by a "common" reset but we
10144 * shell proceed with "process kill" anyway.
10146 if ((bnx2x_get_load_cnt(bp) == 0) &&
10147 bnx2x_trylock_hw_lock(bp,
10148 HW_LOCK_RESOURCE_RESERVED_08) &&
10149 (!bnx2x_leader_reset(bp))) {
10150 DP(NETIF_MSG_HW, "Recovered in open\n");
10154 bnx2x_set_power_state(bp, PCI_D3hot);
10156 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
10157 " completed yet. Try again later. If u still see this"
10158 " message after a few retries then power cycle is"
10159 " required.\n", bp->dev->name);
10165 bp->recovery_state = BNX2X_RECOVERY_DONE;
10167 return bnx2x_nic_load(bp, LOAD_OPEN);
10170 /* called with rtnl_lock */
10171 static int bnx2x_close(struct net_device *dev)
10173 struct bnx2x *bp = netdev_priv(dev);
10175 /* Unload the driver, release IRQs */
10176 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10177 bnx2x_set_power_state(bp, PCI_D3hot);
10182 /* called with netif_tx_lock from dev_mcast.c */
10183 void bnx2x_set_rx_mode(struct net_device *dev)
10185 struct bnx2x *bp = netdev_priv(dev);
10186 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10187 int port = BP_PORT(bp);
10189 if (bp->state != BNX2X_STATE_OPEN) {
10190 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10194 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10196 if (dev->flags & IFF_PROMISC)
10197 rx_mode = BNX2X_RX_MODE_PROMISC;
10199 else if ((dev->flags & IFF_ALLMULTI) ||
10200 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
10202 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10204 else { /* some multicasts */
10205 if (CHIP_IS_E1(bp)) {
10206 int i, old, offset;
10207 struct netdev_hw_addr *ha;
10208 struct mac_configuration_cmd *config =
10209 bnx2x_sp(bp, mcast_config);
10212 netdev_for_each_mc_addr(ha, dev) {
10213 config->config_table[i].
10214 cam_entry.msb_mac_addr =
10215 swab16(*(u16 *)&ha->addr[0]);
10216 config->config_table[i].
10217 cam_entry.middle_mac_addr =
10218 swab16(*(u16 *)&ha->addr[2]);
10219 config->config_table[i].
10220 cam_entry.lsb_mac_addr =
10221 swab16(*(u16 *)&ha->addr[4]);
10222 config->config_table[i].cam_entry.flags =
10224 config->config_table[i].
10225 target_table_entry.flags = 0;
10226 config->config_table[i].target_table_entry.
10227 clients_bit_vector =
10228 cpu_to_le32(1 << BP_L_ID(bp));
10229 config->config_table[i].
10230 target_table_entry.vlan_id = 0;
10233 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10234 config->config_table[i].
10235 cam_entry.msb_mac_addr,
10236 config->config_table[i].
10237 cam_entry.middle_mac_addr,
10238 config->config_table[i].
10239 cam_entry.lsb_mac_addr);
10242 old = config->hdr.length;
10244 for (; i < old; i++) {
10245 if (CAM_IS_INVALID(config->
10246 config_table[i])) {
10247 /* already invalidated */
10251 CAM_INVALIDATE(config->
10256 if (CHIP_REV_IS_SLOW(bp))
10257 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10259 offset = BNX2X_MAX_MULTICAST*(1 + port);
10261 config->hdr.length = i;
10262 config->hdr.offset = offset;
10263 config->hdr.client_id = bp->fp->cl_id;
10264 config->hdr.reserved1 = 0;
10266 bp->set_mac_pending++;
10269 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10270 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10271 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10274 /* Accept one or more multicasts */
10275 struct netdev_hw_addr *ha;
10276 u32 mc_filter[MC_HASH_SIZE];
10277 u32 crc, bit, regidx;
10280 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10282 netdev_for_each_mc_addr(ha, dev) {
10283 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10286 crc = crc32c_le(0, ha->addr, ETH_ALEN);
10287 bit = (crc >> 24) & 0xff;
10290 mc_filter[regidx] |= (1 << bit);
10293 for (i = 0; i < MC_HASH_SIZE; i++)
10294 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10299 bp->rx_mode = rx_mode;
10300 bnx2x_set_storm_rx_mode(bp);
10304 /* called with rtnl_lock */
10305 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
10306 int devad, u16 addr)
10308 struct bnx2x *bp = netdev_priv(netdev);
10311 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10313 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
10314 prtad, devad, addr);
10316 if (prtad != bp->mdio.prtad) {
10317 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
10318 prtad, bp->mdio.prtad);
10322 /* The HW expects different devad if CL22 is used */
10323 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
10325 bnx2x_acquire_phy_lock(bp);
10326 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
10327 devad, addr, &value);
10328 bnx2x_release_phy_lock(bp);
10329 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
10336 /* called with rtnl_lock */
10337 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
10338 u16 addr, u16 value)
10340 struct bnx2x *bp = netdev_priv(netdev);
10341 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10344 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
10345 " value 0x%x\n", prtad, devad, addr, value);
10347 if (prtad != bp->mdio.prtad) {
10348 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
10349 prtad, bp->mdio.prtad);
10353 /* The HW expects different devad if CL22 is used */
10354 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
10356 bnx2x_acquire_phy_lock(bp);
10357 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
10358 devad, addr, value);
10359 bnx2x_release_phy_lock(bp);
10363 /* called with rtnl_lock */
10364 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10366 struct bnx2x *bp = netdev_priv(dev);
10367 struct mii_ioctl_data *mdio = if_mii(ifr);
10369 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
10370 mdio->phy_id, mdio->reg_num, mdio->val_in);
10372 if (!netif_running(dev))
10375 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
10378 #ifdef CONFIG_NET_POLL_CONTROLLER
10379 static void poll_bnx2x(struct net_device *dev)
10381 struct bnx2x *bp = netdev_priv(dev);
10383 disable_irq(bp->pdev->irq);
10384 bnx2x_interrupt(bp->pdev->irq, dev);
10385 enable_irq(bp->pdev->irq);
10389 static const struct net_device_ops bnx2x_netdev_ops = {
10390 .ndo_open = bnx2x_open,
10391 .ndo_stop = bnx2x_close,
10392 .ndo_start_xmit = bnx2x_start_xmit,
10393 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10394 .ndo_set_mac_address = bnx2x_change_mac_addr,
10395 .ndo_validate_addr = eth_validate_addr,
10396 .ndo_do_ioctl = bnx2x_ioctl,
10397 .ndo_change_mtu = bnx2x_change_mtu,
10398 .ndo_tx_timeout = bnx2x_tx_timeout,
10400 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10402 #ifdef CONFIG_NET_POLL_CONTROLLER
10403 .ndo_poll_controller = poll_bnx2x,
10407 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10408 struct net_device *dev)
10413 SET_NETDEV_DEV(dev, &pdev->dev);
10414 bp = netdev_priv(dev);
10419 bp->func = PCI_FUNC(pdev->devfn);
10421 rc = pci_enable_device(pdev);
10423 dev_err(&bp->pdev->dev,
10424 "Cannot enable PCI device, aborting\n");
10428 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10429 dev_err(&bp->pdev->dev,
10430 "Cannot find PCI device base address, aborting\n");
10432 goto err_out_disable;
10435 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10436 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
10437 " base address, aborting\n");
10439 goto err_out_disable;
10442 if (atomic_read(&pdev->enable_cnt) == 1) {
10443 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10445 dev_err(&bp->pdev->dev,
10446 "Cannot obtain PCI resources, aborting\n");
10447 goto err_out_disable;
10450 pci_set_master(pdev);
10451 pci_save_state(pdev);
10454 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10455 if (bp->pm_cap == 0) {
10456 dev_err(&bp->pdev->dev,
10457 "Cannot find power management capability, aborting\n");
10459 goto err_out_release;
10462 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10463 if (bp->pcie_cap == 0) {
10464 dev_err(&bp->pdev->dev,
10465 "Cannot find PCI Express capability, aborting\n");
10467 goto err_out_release;
10470 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
10471 bp->flags |= USING_DAC_FLAG;
10472 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
10473 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
10474 " failed, aborting\n");
10476 goto err_out_release;
10479 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
10480 dev_err(&bp->pdev->dev,
10481 "System does not support DMA, aborting\n");
10483 goto err_out_release;
10486 dev->mem_start = pci_resource_start(pdev, 0);
10487 dev->base_addr = dev->mem_start;
10488 dev->mem_end = pci_resource_end(pdev, 0);
10490 dev->irq = pdev->irq;
10492 bp->regview = pci_ioremap_bar(pdev, 0);
10493 if (!bp->regview) {
10494 dev_err(&bp->pdev->dev,
10495 "Cannot map register space, aborting\n");
10497 goto err_out_release;
10500 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10501 min_t(u64, BNX2X_DB_SIZE,
10502 pci_resource_len(pdev, 2)));
10503 if (!bp->doorbells) {
10504 dev_err(&bp->pdev->dev,
10505 "Cannot map doorbell space, aborting\n");
10507 goto err_out_unmap;
10510 bnx2x_set_power_state(bp, PCI_D0);
10512 /* clean indirect addresses */
10513 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10514 PCICFG_VENDOR_ID_OFFSET);
10515 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10516 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10517 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10518 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10520 /* Reset the load counter */
10521 bnx2x_clear_load_cnt(bp);
10523 dev->watchdog_timeo = TX_TIMEOUT;
10525 dev->netdev_ops = &bnx2x_netdev_ops;
10526 dev->ethtool_ops = &bnx2x_ethtool_ops;
10527 dev->features |= NETIF_F_SG;
10528 dev->features |= NETIF_F_HW_CSUM;
10529 if (bp->flags & USING_DAC_FLAG)
10530 dev->features |= NETIF_F_HIGHDMA;
10531 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10532 dev->features |= NETIF_F_TSO6;
10534 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10535 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10537 dev->vlan_features |= NETIF_F_SG;
10538 dev->vlan_features |= NETIF_F_HW_CSUM;
10539 if (bp->flags & USING_DAC_FLAG)
10540 dev->vlan_features |= NETIF_F_HIGHDMA;
10541 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10542 dev->vlan_features |= NETIF_F_TSO6;
10545 /* get_port_hwinfo() will set prtad and mmds properly */
10546 bp->mdio.prtad = MDIO_PRTAD_NONE;
10548 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10549 bp->mdio.dev = dev;
10550 bp->mdio.mdio_read = bnx2x_mdio_read;
10551 bp->mdio.mdio_write = bnx2x_mdio_write;
10557 iounmap(bp->regview);
10558 bp->regview = NULL;
10560 if (bp->doorbells) {
10561 iounmap(bp->doorbells);
10562 bp->doorbells = NULL;
10566 if (atomic_read(&pdev->enable_cnt) == 1)
10567 pci_release_regions(pdev);
10570 pci_disable_device(pdev);
10571 pci_set_drvdata(pdev, NULL);
10577 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
10578 int *width, int *speed)
10580 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10582 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10584 /* return value of 1=2.5GHz 2=5GHz */
10585 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10588 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
10590 const struct firmware *firmware = bp->firmware;
10591 struct bnx2x_fw_file_hdr *fw_hdr;
10592 struct bnx2x_fw_file_section *sections;
10593 u32 offset, len, num_ops;
10598 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
10601 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
10602 sections = (struct bnx2x_fw_file_section *)fw_hdr;
10604 /* Make sure none of the offsets and sizes make us read beyond
10605 * the end of the firmware data */
10606 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
10607 offset = be32_to_cpu(sections[i].offset);
10608 len = be32_to_cpu(sections[i].len);
10609 if (offset + len > firmware->size) {
10610 dev_err(&bp->pdev->dev,
10611 "Section %d length is out of bounds\n", i);
10616 /* Likewise for the init_ops offsets */
10617 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
10618 ops_offsets = (u16 *)(firmware->data + offset);
10619 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
10621 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
10622 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
10623 dev_err(&bp->pdev->dev,
10624 "Section offset %d is out of bounds\n", i);
10629 /* Check FW version */
10630 offset = be32_to_cpu(fw_hdr->fw_version.offset);
10631 fw_ver = firmware->data + offset;
10632 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
10633 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
10634 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
10635 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
10636 dev_err(&bp->pdev->dev,
10637 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
10638 fw_ver[0], fw_ver[1], fw_ver[2],
10639 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
10640 BCM_5710_FW_MINOR_VERSION,
10641 BCM_5710_FW_REVISION_VERSION,
10642 BCM_5710_FW_ENGINEERING_VERSION);
10649 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
10651 const __be32 *source = (const __be32 *)_source;
10652 u32 *target = (u32 *)_target;
10655 for (i = 0; i < n/4; i++)
10656 target[i] = be32_to_cpu(source[i]);
10660 Ops array is stored in the following format:
10661 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
10663 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
10665 const __be32 *source = (const __be32 *)_source;
10666 struct raw_op *target = (struct raw_op *)_target;
10669 for (i = 0, j = 0; i < n/8; i++, j += 2) {
10670 tmp = be32_to_cpu(source[j]);
10671 target[i].op = (tmp >> 24) & 0xff;
10672 target[i].offset = tmp & 0xffffff;
10673 target[i].raw_data = be32_to_cpu(source[j + 1]);
10677 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
10679 const __be16 *source = (const __be16 *)_source;
10680 u16 *target = (u16 *)_target;
10683 for (i = 0; i < n/2; i++)
10684 target[i] = be16_to_cpu(source[i]);
10687 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
10689 u32 len = be32_to_cpu(fw_hdr->arr.len); \
10690 bp->arr = kmalloc(len, GFP_KERNEL); \
10692 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
10695 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
10696 (u8 *)bp->arr, len); \
10699 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
10701 const char *fw_file_name;
10702 struct bnx2x_fw_file_hdr *fw_hdr;
10705 if (CHIP_IS_E1(bp))
10706 fw_file_name = FW_FILE_NAME_E1;
10707 else if (CHIP_IS_E1H(bp))
10708 fw_file_name = FW_FILE_NAME_E1H;
10710 dev_err(dev, "Unsupported chip revision\n");
10714 dev_info(dev, "Loading %s\n", fw_file_name);
10716 rc = request_firmware(&bp->firmware, fw_file_name, dev);
10718 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
10719 goto request_firmware_exit;
10722 rc = bnx2x_check_firmware(bp);
10724 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
10725 goto request_firmware_exit;
10728 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
10730 /* Initialize the pointers to the init arrays */
10732 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
10735 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
10738 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
10741 /* STORMs firmware */
10742 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10743 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
10744 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
10745 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
10746 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10747 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
10748 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
10749 be32_to_cpu(fw_hdr->usem_pram_data.offset);
10750 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10751 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
10752 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
10753 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
10754 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10755 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
10756 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
10757 be32_to_cpu(fw_hdr->csem_pram_data.offset);
10761 init_offsets_alloc_err:
10762 kfree(bp->init_ops);
10763 init_ops_alloc_err:
10764 kfree(bp->init_data);
10765 request_firmware_exit:
10766 release_firmware(bp->firmware);
10772 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10773 const struct pci_device_id *ent)
10775 struct net_device *dev = NULL;
10777 int pcie_width, pcie_speed;
10780 /* dev zeroed in init_etherdev */
10781 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
10783 dev_err(&pdev->dev, "Cannot allocate net device\n");
10787 bp = netdev_priv(dev);
10788 bp->msg_enable = debug;
10790 pci_set_drvdata(pdev, dev);
10792 rc = bnx2x_init_dev(pdev, dev);
10798 rc = bnx2x_init_bp(bp);
10800 goto init_one_exit;
10802 /* Set init arrays */
10803 rc = bnx2x_init_firmware(bp, &pdev->dev);
10805 dev_err(&pdev->dev, "Error loading firmware\n");
10806 goto init_one_exit;
10809 rc = register_netdev(dev);
10811 dev_err(&pdev->dev, "Cannot register net device\n");
10812 goto init_one_exit;
10815 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
10816 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
10817 " IRQ %d, ", board_info[ent->driver_data].name,
10818 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10819 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
10820 dev->base_addr, bp->pdev->irq);
10821 pr_cont("node addr %pM\n", dev->dev_addr);
10827 iounmap(bp->regview);
10830 iounmap(bp->doorbells);
10834 if (atomic_read(&pdev->enable_cnt) == 1)
10835 pci_release_regions(pdev);
10837 pci_disable_device(pdev);
10838 pci_set_drvdata(pdev, NULL);
10843 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10845 struct net_device *dev = pci_get_drvdata(pdev);
10849 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
10852 bp = netdev_priv(dev);
10854 unregister_netdev(dev);
10856 /* Make sure RESET task is not scheduled before continuing */
10857 cancel_delayed_work_sync(&bp->reset_task);
10859 kfree(bp->init_ops_offsets);
10860 kfree(bp->init_ops);
10861 kfree(bp->init_data);
10862 release_firmware(bp->firmware);
10865 iounmap(bp->regview);
10868 iounmap(bp->doorbells);
10872 if (atomic_read(&pdev->enable_cnt) == 1)
10873 pci_release_regions(pdev);
10875 pci_disable_device(pdev);
10876 pci_set_drvdata(pdev, NULL);
10879 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10883 bp->state = BNX2X_STATE_ERROR;
10885 bp->rx_mode = BNX2X_RX_MODE_NONE;
10887 bnx2x_netif_stop(bp, 0);
10888 netif_carrier_off(bp->dev);
10890 del_timer_sync(&bp->timer);
10891 bp->stats_state = STATS_STATE_DISABLED;
10892 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10895 bnx2x_free_irq(bp, false);
10897 if (CHIP_IS_E1(bp)) {
10898 struct mac_configuration_cmd *config =
10899 bnx2x_sp(bp, mcast_config);
10901 for (i = 0; i < config->hdr.length; i++)
10902 CAM_INVALIDATE(config->config_table[i]);
10905 /* Free SKBs, SGEs, TPA pool and driver internals */
10906 bnx2x_free_skbs(bp);
10907 for_each_queue(bp, i)
10908 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10909 for_each_queue(bp, i)
10910 netif_napi_del(&bnx2x_fp(bp, i, napi));
10911 bnx2x_free_mem(bp);
10913 bp->state = BNX2X_STATE_CLOSED;
10918 static void bnx2x_eeh_recover(struct bnx2x *bp)
10922 mutex_init(&bp->port.phy_mutex);
10924 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10925 bp->link_params.shmem_base = bp->common.shmem_base;
10926 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10928 if (!bp->common.shmem_base ||
10929 (bp->common.shmem_base < 0xA0000) ||
10930 (bp->common.shmem_base >= 0xC0000)) {
10931 BNX2X_DEV_INFO("MCP not active\n");
10932 bp->flags |= NO_MCP_FLAG;
10936 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10937 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10938 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10939 BNX2X_ERR("BAD MCP validity signature\n");
10941 if (!BP_NOMCP(bp)) {
10942 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10943 & DRV_MSG_SEQ_NUMBER_MASK);
10944 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10949 * bnx2x_io_error_detected - called when PCI error is detected
10950 * @pdev: Pointer to PCI device
10951 * @state: The current pci connection state
10953 * This function is called after a PCI bus error affecting
10954 * this device has been detected.
10956 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10957 pci_channel_state_t state)
10959 struct net_device *dev = pci_get_drvdata(pdev);
10960 struct bnx2x *bp = netdev_priv(dev);
10964 netif_device_detach(dev);
10966 if (state == pci_channel_io_perm_failure) {
10968 return PCI_ERS_RESULT_DISCONNECT;
10971 if (netif_running(dev))
10972 bnx2x_eeh_nic_unload(bp);
10974 pci_disable_device(pdev);
10978 /* Request a slot reset */
10979 return PCI_ERS_RESULT_NEED_RESET;
10983 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10984 * @pdev: Pointer to PCI device
10986 * Restart the card from scratch, as if from a cold-boot.
10988 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10990 struct net_device *dev = pci_get_drvdata(pdev);
10991 struct bnx2x *bp = netdev_priv(dev);
10995 if (pci_enable_device(pdev)) {
10996 dev_err(&pdev->dev,
10997 "Cannot re-enable PCI device after reset\n");
10999 return PCI_ERS_RESULT_DISCONNECT;
11002 pci_set_master(pdev);
11003 pci_restore_state(pdev);
11005 if (netif_running(dev))
11006 bnx2x_set_power_state(bp, PCI_D0);
11010 return PCI_ERS_RESULT_RECOVERED;
11014 * bnx2x_io_resume - called when traffic can start flowing again
11015 * @pdev: Pointer to PCI device
11017 * This callback is called when the error recovery driver tells us that
11018 * its OK to resume normal operation.
11020 static void bnx2x_io_resume(struct pci_dev *pdev)
11022 struct net_device *dev = pci_get_drvdata(pdev);
11023 struct bnx2x *bp = netdev_priv(dev);
11025 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11026 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11032 bnx2x_eeh_recover(bp);
11034 if (netif_running(dev))
11035 bnx2x_nic_load(bp, LOAD_NORMAL);
11037 netif_device_attach(dev);
11042 static struct pci_error_handlers bnx2x_err_handler = {
11043 .error_detected = bnx2x_io_error_detected,
11044 .slot_reset = bnx2x_io_slot_reset,
11045 .resume = bnx2x_io_resume,
11048 static struct pci_driver bnx2x_pci_driver = {
11049 .name = DRV_MODULE_NAME,
11050 .id_table = bnx2x_pci_tbl,
11051 .probe = bnx2x_init_one,
11052 .remove = __devexit_p(bnx2x_remove_one),
11053 .suspend = bnx2x_suspend,
11054 .resume = bnx2x_resume,
11055 .err_handler = &bnx2x_err_handler,
11058 static int __init bnx2x_init(void)
11062 pr_info("%s", version);
11064 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11065 if (bnx2x_wq == NULL) {
11066 pr_err("Cannot create workqueue\n");
11070 ret = pci_register_driver(&bnx2x_pci_driver);
11072 pr_err("Cannot register driver\n");
11073 destroy_workqueue(bnx2x_wq);
11078 static void __exit bnx2x_cleanup(void)
11080 pci_unregister_driver(&bnx2x_pci_driver);
11082 destroy_workqueue(bnx2x_wq);
11085 module_init(bnx2x_init);
11086 module_exit(bnx2x_cleanup);
11090 /* count denotes the number of new completions we have seen */
11091 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
11093 struct eth_spe *spe;
11095 #ifdef BNX2X_STOP_ON_ERROR
11096 if (unlikely(bp->panic))
11100 spin_lock_bh(&bp->spq_lock);
11101 bp->cnic_spq_pending -= count;
11103 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
11104 bp->cnic_spq_pending++) {
11106 if (!bp->cnic_kwq_pending)
11109 spe = bnx2x_sp_get_next(bp);
11110 *spe = *bp->cnic_kwq_cons;
11112 bp->cnic_kwq_pending--;
11114 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
11115 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
11117 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
11118 bp->cnic_kwq_cons = bp->cnic_kwq;
11120 bp->cnic_kwq_cons++;
11122 bnx2x_sp_prod_update(bp);
11123 spin_unlock_bh(&bp->spq_lock);
11126 static int bnx2x_cnic_sp_queue(struct net_device *dev,
11127 struct kwqe_16 *kwqes[], u32 count)
11129 struct bnx2x *bp = netdev_priv(dev);
11132 #ifdef BNX2X_STOP_ON_ERROR
11133 if (unlikely(bp->panic))
11137 spin_lock_bh(&bp->spq_lock);
11139 for (i = 0; i < count; i++) {
11140 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
11142 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
11145 *bp->cnic_kwq_prod = *spe;
11147 bp->cnic_kwq_pending++;
11149 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
11150 spe->hdr.conn_and_cmd_data, spe->hdr.type,
11151 spe->data.mac_config_addr.hi,
11152 spe->data.mac_config_addr.lo,
11153 bp->cnic_kwq_pending);
11155 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
11156 bp->cnic_kwq_prod = bp->cnic_kwq;
11158 bp->cnic_kwq_prod++;
11161 spin_unlock_bh(&bp->spq_lock);
11163 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
11164 bnx2x_cnic_sp_post(bp, 0);
11169 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
11171 struct cnic_ops *c_ops;
11174 mutex_lock(&bp->cnic_mutex);
11175 c_ops = bp->cnic_ops;
11177 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
11178 mutex_unlock(&bp->cnic_mutex);
11183 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
11185 struct cnic_ops *c_ops;
11189 c_ops = rcu_dereference(bp->cnic_ops);
11191 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
11198 * for commands that have no data
11200 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
11202 struct cnic_ctl_info ctl = {0};
11206 return bnx2x_cnic_ctl_send(bp, &ctl);
11209 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
11211 struct cnic_ctl_info ctl;
11213 /* first we tell CNIC and only then we count this as a completion */
11214 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
11215 ctl.data.comp.cid = cid;
11217 bnx2x_cnic_ctl_send_bh(bp, &ctl);
11218 bnx2x_cnic_sp_post(bp, 1);
11221 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
11223 struct bnx2x *bp = netdev_priv(dev);
11226 switch (ctl->cmd) {
11227 case DRV_CTL_CTXTBL_WR_CMD: {
11228 u32 index = ctl->data.io.offset;
11229 dma_addr_t addr = ctl->data.io.dma_addr;
11231 bnx2x_ilt_wr(bp, index, addr);
11235 case DRV_CTL_COMPLETION_CMD: {
11236 int count = ctl->data.comp.comp_count;
11238 bnx2x_cnic_sp_post(bp, count);
11242 /* rtnl_lock is held. */
11243 case DRV_CTL_START_L2_CMD: {
11244 u32 cli = ctl->data.ring.client_id;
11246 bp->rx_mode_cl_mask |= (1 << cli);
11247 bnx2x_set_storm_rx_mode(bp);
11251 /* rtnl_lock is held. */
11252 case DRV_CTL_STOP_L2_CMD: {
11253 u32 cli = ctl->data.ring.client_id;
11255 bp->rx_mode_cl_mask &= ~(1 << cli);
11256 bnx2x_set_storm_rx_mode(bp);
11261 BNX2X_ERR("unknown command %x\n", ctl->cmd);
11268 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
11270 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11272 if (bp->flags & USING_MSIX_FLAG) {
11273 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
11274 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
11275 cp->irq_arr[0].vector = bp->msix_table[1].vector;
11277 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
11278 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
11280 cp->irq_arr[0].status_blk = bp->cnic_sb;
11281 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
11282 cp->irq_arr[1].status_blk = bp->def_status_blk;
11283 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
11288 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
11291 struct bnx2x *bp = netdev_priv(dev);
11292 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11297 if (atomic_read(&bp->intr_sem) != 0)
11300 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
11304 bp->cnic_kwq_cons = bp->cnic_kwq;
11305 bp->cnic_kwq_prod = bp->cnic_kwq;
11306 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
11308 bp->cnic_spq_pending = 0;
11309 bp->cnic_kwq_pending = 0;
11311 bp->cnic_data = data;
11314 cp->drv_state = CNIC_DRV_STATE_REGD;
11316 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
11318 bnx2x_setup_cnic_irq_info(bp);
11319 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
11320 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
11321 rcu_assign_pointer(bp->cnic_ops, ops);
11326 static int bnx2x_unregister_cnic(struct net_device *dev)
11328 struct bnx2x *bp = netdev_priv(dev);
11329 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11331 mutex_lock(&bp->cnic_mutex);
11332 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
11333 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
11334 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
11337 rcu_assign_pointer(bp->cnic_ops, NULL);
11338 mutex_unlock(&bp->cnic_mutex);
11340 kfree(bp->cnic_kwq);
11341 bp->cnic_kwq = NULL;
11346 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
11348 struct bnx2x *bp = netdev_priv(dev);
11349 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11351 cp->drv_owner = THIS_MODULE;
11352 cp->chip_id = CHIP_ID(bp);
11353 cp->pdev = bp->pdev;
11354 cp->io_base = bp->regview;
11355 cp->io_base2 = bp->doorbells;
11356 cp->max_kwqe_pending = 8;
11357 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
11358 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
11359 cp->ctx_tbl_len = CNIC_ILT_LINES;
11360 cp->starting_cid = BCM_CNIC_CID_START;
11361 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
11362 cp->drv_ctl = bnx2x_drv_ctl;
11363 cp->drv_register_cnic = bnx2x_register_cnic;
11364 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
11368 EXPORT_SYMBOL(bnx2x_cnic_probe);
11370 #endif /* BCM_CNIC */