1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
64 #define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT (5*HZ)
75 static char version[] __devinitdata =
76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86 static int multi_mode = 1;
87 module_param(multi_mode, int, 0);
88 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
91 static int num_queues;
92 module_param(num_queues, int, 0);
93 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 static int dropless_fc;
106 module_param(dropless_fc, int, 0);
107 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110 module_param(poll, int, 0);
111 MODULE_PARM_DESC(poll, " Use polling (for debug)");
113 static int mrrs = -1;
114 module_param(mrrs, int, 0);
115 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118 module_param(debug, int, 0);
119 MODULE_PARM_DESC(debug, " Default debug msglevel");
121 static struct workqueue_struct *bnx2x_wq;
123 enum bnx2x_board_type {
129 /* indexed by board_type, above */
132 } board_info[] __devinitdata = {
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
153 * locking is done by mcp
155 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
175 const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
182 /* copy command into DMAE command memory and set DMAE command go */
183 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
188 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
195 REG_WR(bp, dmae_reg_go_c[idx], 1);
198 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 struct dmae_command dmae;
202 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
205 if (!bp->dmae_ready) {
206 u32 *data = bnx2x_sp(bp, wb_data[0]);
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr, len32);
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
214 memset(&dmae, 0, sizeof(struct dmae_command));
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
220 DMAE_CMD_ENDIANITY_B_DW_SWAP |
222 DMAE_CMD_ENDIANITY_DW_SWAP |
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226 dmae.src_addr_lo = U64_LO(dma_addr);
227 dmae.src_addr_hi = U64_HI(dma_addr);
228 dmae.dst_addr_lo = dst_addr >> 2;
229 dmae.dst_addr_hi = 0;
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_val = DMAE_COMP_VAL;
235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
246 mutex_lock(&bp->dmae_mutex);
250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258 BNX2X_ERR("DMAE timeout!\n");
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp))
269 mutex_unlock(&bp->dmae_mutex);
272 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
274 struct dmae_command dmae;
275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
278 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr, len32);
284 for (i = 0; i < len32; i++)
285 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
289 memset(&dmae, 0, sizeof(struct dmae_command));
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
297 DMAE_CMD_ENDIANITY_DW_SWAP |
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
318 mutex_lock(&bp->dmae_mutex);
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
327 while (*wb_comp != DMAE_COMP_VAL) {
330 BNX2X_ERR("DMAE timeout!\n");
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
344 mutex_unlock(&bp->dmae_mutex);
347 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
353 while (len > dmae_wr_max) {
354 bnx2x_write_dmae(bp, phys_addr + offset,
355 addr + offset, dmae_wr_max);
356 offset += dmae_wr_max * 4;
360 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
363 /* used only for slowpath so not inlined */
364 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
368 wb_write[0] = val_hi;
369 wb_write[1] = val_lo;
370 REG_WR_DMAE(bp, reg, wb_write, 2);
374 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
378 REG_RD_DMAE(bp, reg, wb_data, 2);
380 return HILO_U64(wb_data[0], wb_data[1]);
384 static int bnx2x_mc_assert(struct bnx2x *bp)
388 u32 row0, row1, row2, row3;
391 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396 /* print the asserts */
397 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400 XSTORM_ASSERT_LIST_OFFSET(i));
401 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i, row3, row2, row1, row0);
419 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424 /* print the asserts */
425 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428 TSTORM_ASSERT_LIST_OFFSET(i));
429 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i, row3, row2, row1, row0);
447 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452 /* print the asserts */
453 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456 CSTORM_ASSERT_LIST_OFFSET(i));
457 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i, row3, row2, row1, row0);
475 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480 /* print the asserts */
481 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484 USTORM_ASSERT_LIST_OFFSET(i));
485 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486 USTORM_ASSERT_LIST_OFFSET(i) + 4);
487 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i) + 8);
489 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i, row3, row2, row1, row0);
505 static void bnx2x_fw_dump(struct bnx2x *bp)
513 BNX2X_ERR("NO MCP - can not dump\n");
517 addr = bp->common.shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
520 pr_err("begin fw dump (mark 0x%x)\n", mark);
523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
524 for (word = 0; word < 8; word++)
525 data[word] = htonl(REG_RD(bp, offset + 4*word));
527 pr_cont("%s", (char *)data);
529 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
530 for (word = 0; word < 8; word++)
531 data[word] = htonl(REG_RD(bp, offset + 4*word));
533 pr_cont("%s", (char *)data);
535 pr_err("end of fw dump\n");
538 void bnx2x_panic_dump(struct bnx2x *bp)
543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
546 BNX2X_ERR("begin crash dump -----------------\n");
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
557 for_each_queue(bp, i) {
558 struct bnx2x_fastpath *fp = &bp->fp[i];
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
563 i, fp->rx_bd_prod, fp->rx_bd_cons,
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
574 for_each_queue(bp, i) {
575 struct bnx2x_fastpath *fp = &bp->fp[i];
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
584 fp->status_blk->c_status_block.status_block_index,
585 fp->tx_db.data.prod);
590 for_each_queue(bp, i) {
591 struct bnx2x_fastpath *fp = &bp->fp[i];
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
595 for (j = start; j != end; j = RX_BD(j + 1)) {
596 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
603 start = RX_SGE(fp->rx_sge_prod);
604 end = RX_SGE(fp->last_max_sge);
605 for (j = start; j != end; j = RX_SGE(j + 1)) {
606 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i, j, rx_sge[1], rx_sge[0], sw_page->page);
613 start = RCQ_BD(fp->rx_comp_cons - 10);
614 end = RCQ_BD(fp->rx_comp_cons + 503);
615 for (j = start; j != end; j = RCQ_BD(j + 1)) {
616 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
624 for_each_queue(bp, i) {
625 struct bnx2x_fastpath *fp = &bp->fp[i];
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629 for (j = start; j != end; j = TX_BD(j + 1)) {
630 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i, j, sw_bd->skb, sw_bd->first_bd);
636 start = TX_BD(fp->tx_bd_cons - 10);
637 end = TX_BD(fp->tx_bd_cons + 254);
638 for (j = start; j != end; j = TX_BD(j + 1)) {
639 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
648 BNX2X_ERR("end crash dump -----------------\n");
651 void bnx2x_int_enable(struct bnx2x *bp)
653 int port = BP_PORT(bp);
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655 u32 val = REG_RD(bp, addr);
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661 HC_CONFIG_0_REG_INT_LINE_EN_0);
662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
678 REG_WR(bp, addr, val);
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
686 REG_WR(bp, addr, val);
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
693 if (CHIP_IS_E1H(bp)) {
694 /* init leading/trailing edge */
696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
698 /* enable nig and gpio3 attention */
703 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
707 /* Make sure that interrupts are indeed enabled from here on */
711 static void bnx2x_int_disable(struct bnx2x *bp)
713 int port = BP_PORT(bp);
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr);
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719 HC_CONFIG_0_REG_INT_LINE_EN_0 |
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
725 /* flush all outstanding writes */
728 REG_WR(bp, addr, val);
729 if (REG_RD(bp, addr) != val)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
733 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
738 /* disable interrupt handling */
739 atomic_inc(&bp->intr_sem);
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp);
746 /* make sure all ISRs are done */
748 synchronize_irq(bp->msix_table[0].vector);
753 for_each_queue(bp, i)
754 synchronize_irq(bp->msix_table[i + offset].vector);
756 synchronize_irq(bp->pdev->irq);
758 /* make sure sp_task is not running */
759 cancel_delayed_work(&bp->sp_task);
760 flush_workqueue(bnx2x_wq);
766 * General service functions
769 /* Return true if succeeded to acquire the lock */
770 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
773 u32 resource_bit = (1 << resource);
774 int func = BP_FUNC(bp);
775 u32 hw_lock_control_reg;
777 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
779 /* Validating that the resource is within range */
780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
790 hw_lock_control_reg =
791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
793 /* Try to acquire the lock */
794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795 lock_status = REG_RD(bp, hw_lock_control_reg);
796 if (lock_status & resource_bit)
799 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
805 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
808 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
809 union eth_rx_cqe *rr_cqe)
811 struct bnx2x *bp = fp->bp;
812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
817 fp->index, cid, command, bp->state,
818 rr_cqe->ramrod_cqe.ramrod_type);
823 switch (command | fp->state) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
828 fp->state = BNX2X_FP_STATE_OPEN;
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
834 fp->state = BNX2X_FP_STATE_HALTED;
838 BNX2X_ERR("unexpected MC reply (%d) "
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
843 mb(); /* force bnx2x_wait_ramrod() to see the change */
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED;
859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874 bp->set_mac_pending--;
878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880 bp->set_mac_pending--;
885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
889 mb(); /* force bnx2x_wait_ramrod() to see the change */
892 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
894 struct bnx2x *bp = netdev_priv(dev_instance);
895 u16 status = bnx2x_ack_int(bp);
899 /* Return here if interrupt is shared and it's not for us */
900 if (unlikely(status == 0)) {
901 DP(NETIF_MSG_INTR, "not our interrupt!\n");
904 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
906 /* Return here if interrupt is disabled */
907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
912 #ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp->panic))
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i];
920 mask = 0x2 << fp->sb_id;
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
926 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block.
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
935 mask = 0x2 << CNIC_SB_ID(bp);
936 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL;
940 c_ops = rcu_dereference(bp->cnic_ops);
942 c_ops->cnic_handler(bp->cnic_data, NULL);
949 if (unlikely(status & 0x1)) {
950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
957 if (unlikely(status))
958 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
964 /* end of fast path */
970 * General service functions
973 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
976 u32 resource_bit = (1 << resource);
977 int func = BP_FUNC(bp);
978 u32 hw_lock_control_reg;
981 /* Validating that the resource is within range */
982 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource, HW_LOCK_MAX_RESOURCE_VALUE);
990 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
992 hw_lock_control_reg =
993 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
996 /* Validating that the resource is not already taken */
997 lock_status = REG_RD(bp, hw_lock_control_reg);
998 if (lock_status & resource_bit) {
999 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status, resource_bit);
1004 /* Try for 5 second every 5ms */
1005 for (cnt = 0; cnt < 1000; cnt++) {
1006 /* Try to acquire the lock */
1007 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008 lock_status = REG_RD(bp, hw_lock_control_reg);
1009 if (lock_status & resource_bit)
1014 DP(NETIF_MSG_HW, "Timeout\n");
1018 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1021 u32 resource_bit = (1 << resource);
1022 int func = BP_FUNC(bp);
1023 u32 hw_lock_control_reg;
1025 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1027 /* Validating that the resource is within range */
1028 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1042 /* Validating that the resource is currently taken */
1043 lock_status = REG_RD(bp, hw_lock_control_reg);
1044 if (!(lock_status & resource_bit)) {
1045 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status, resource_bit);
1050 REG_WR(bp, hw_lock_control_reg, resource_bit);
1055 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060 int gpio_shift = gpio_num +
1061 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062 u32 gpio_mask = (1 << gpio_shift);
1066 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1071 /* read GPIO value */
1072 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1074 /* get the requested pin value */
1075 if ((gpio_reg & gpio_mask) == gpio_mask)
1080 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1085 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1089 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1090 int gpio_shift = gpio_num +
1091 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092 u32 gpio_mask = (1 << gpio_shift);
1095 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1100 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num, gpio_shift);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num, gpio_shift);
1116 /* clear FLOAT and set SET */
1117 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1122 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num, gpio_shift);
1125 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1132 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1133 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1138 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143 int gpio_shift = gpio_num +
1144 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145 u32 gpio_mask = (1 << gpio_shift);
1148 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1155 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num, gpio_shift);
1161 /* clear SET and set CLR */
1162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num, gpio_shift);
1169 /* clear CLR and set SET */
1170 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1178 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1184 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1186 u32 spio_mask = (1 << spio_num);
1189 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190 (spio_num > MISC_REGISTERS_SPIO_7)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1195 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1196 /* read SPIO and mask except the float bits */
1197 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1201 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202 /* clear FLOAT and set CLR */
1203 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1208 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209 /* clear FLOAT and set SET */
1210 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1217 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1224 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1230 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1232 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1240 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1245 bp->port.advertising |= ADVERTISED_Asym_Pause;
1249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1256 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1258 if (!BP_NOMCP(bp)) {
1261 /* Initialize link parameters structure variables */
1262 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */
1264 if (bp->dev->mtu > 5000)
1265 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1267 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1269 bnx2x_acquire_phy_lock(bp);
1271 if (load_mode == LOAD_DIAG)
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1276 bnx2x_release_phy_lock(bp);
1278 bnx2x_calc_fc_adv(bp);
1280 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1282 bnx2x_link_report(bp);
1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1291 void bnx2x_link_set(struct bnx2x *bp)
1293 if (!BP_NOMCP(bp)) {
1294 bnx2x_acquire_phy_lock(bp);
1295 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1296 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1297 bnx2x_release_phy_lock(bp);
1299 bnx2x_calc_fc_adv(bp);
1301 BNX2X_ERR("Bootcode is missing - can not set link\n");
1304 static void bnx2x__link_reset(struct bnx2x *bp)
1306 if (!BP_NOMCP(bp)) {
1307 bnx2x_acquire_phy_lock(bp);
1308 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1309 bnx2x_release_phy_lock(bp);
1311 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1314 u8 bnx2x_link_test(struct bnx2x *bp)
1318 if (!BP_NOMCP(bp)) {
1319 bnx2x_acquire_phy_lock(bp);
1320 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1321 bnx2x_release_phy_lock(bp);
1323 BNX2X_ERR("Bootcode is missing - can not test link\n");
1328 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1330 u32 r_param = bp->link_vars.line_speed / 8;
1331 u32 fair_periodic_timeout_usec;
1334 memset(&(bp->cmng.rs_vars), 0,
1335 sizeof(struct rate_shaping_vars_per_port));
1336 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1338 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1339 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1341 /* this is the threshold below which no timer arming will occur
1342 1.25 coefficient is for the threshold to be a little bigger
1343 than the real time, to compensate for timer in-accuracy */
1344 bp->cmng.rs_vars.rs_threshold =
1345 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1347 /* resolution of fairness timer */
1348 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1349 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1350 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1352 /* this is the threshold below which we won't arm the timer anymore */
1353 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1355 /* we multiply by 1e3/8 to get bytes/msec.
1356 We don't want the credits to pass a credit
1357 of the t_fair*FAIR_MEM (algorithm resolution) */
1358 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1359 /* since each tick is 4 usec */
1360 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1363 /* Calculates the sum of vn_min_rates.
1364 It's needed for further normalizing of the min_rates.
1366 sum of vn_min_rates.
1368 0 - if all the min_rates are 0.
1369 In the later case fainess algorithm should be deactivated.
1370 If not all min_rates are zero then those that are zeroes will be set to 1.
1372 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1375 int port = BP_PORT(bp);
1378 bp->vn_weight_sum = 0;
1379 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1380 int func = 2*vn + port;
1381 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1382 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1383 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1385 /* Skip hidden vns */
1386 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1389 /* If min rate is zero - set it to 1 */
1391 vn_min_rate = DEF_MIN_RATE;
1395 bp->vn_weight_sum += vn_min_rate;
1398 /* ... only if all min rates are zeros - disable fairness */
1400 bp->cmng.flags.cmng_enables &=
1401 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1402 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1403 " fairness will be disabled\n");
1405 bp->cmng.flags.cmng_enables |=
1406 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1409 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1411 struct rate_shaping_vars_per_vn m_rs_vn;
1412 struct fairness_vars_per_vn m_fair_vn;
1413 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1414 u16 vn_min_rate, vn_max_rate;
1417 /* If function is hidden - set min and max to zeroes */
1418 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1423 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1424 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1425 /* If min rate is zero - set it to 1 */
1427 vn_min_rate = DEF_MIN_RATE;
1428 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1429 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1432 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1433 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1435 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1436 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1438 /* global vn counter - maximal Mbps for this vn */
1439 m_rs_vn.vn_counter.rate = vn_max_rate;
1441 /* quota - number of bytes transmitted in this period */
1442 m_rs_vn.vn_counter.quota =
1443 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1445 if (bp->vn_weight_sum) {
1446 /* credit for each period of the fairness algorithm:
1447 number of bytes in T_FAIR (the vn share the port rate).
1448 vn_weight_sum should not be larger than 10000, thus
1449 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1451 m_fair_vn.vn_credit_delta =
1452 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1453 (8 * bp->vn_weight_sum))),
1454 (bp->cmng.fair_vars.fair_threshold * 2));
1455 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1456 m_fair_vn.vn_credit_delta);
1459 /* Store it to internal memory */
1460 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1461 REG_WR(bp, BAR_XSTRORM_INTMEM +
1462 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1463 ((u32 *)(&m_rs_vn))[i]);
1465 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1466 REG_WR(bp, BAR_XSTRORM_INTMEM +
1467 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1468 ((u32 *)(&m_fair_vn))[i]);
1472 /* This function is called upon link interrupt */
1473 static void bnx2x_link_attn(struct bnx2x *bp)
1475 u32 prev_link_status = bp->link_vars.link_status;
1476 /* Make sure that we are synced with the current statistics */
1477 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1479 bnx2x_link_update(&bp->link_params, &bp->link_vars);
1481 if (bp->link_vars.link_up) {
1483 /* dropless flow control */
1484 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1485 int port = BP_PORT(bp);
1486 u32 pause_enabled = 0;
1488 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1491 REG_WR(bp, BAR_USTRORM_INTMEM +
1492 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1496 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1497 struct host_port_stats *pstats;
1499 pstats = bnx2x_sp(bp, port_stats);
1500 /* reset old bmac stats */
1501 memset(&(pstats->mac_stx[0]), 0,
1502 sizeof(struct mac_stx));
1504 if (bp->state == BNX2X_STATE_OPEN)
1505 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1508 /* indicate link status only if link status actually changed */
1509 if (prev_link_status != bp->link_vars.link_status)
1510 bnx2x_link_report(bp);
1513 int port = BP_PORT(bp);
1517 /* Set the attention towards other drivers on the same port */
1518 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1519 if (vn == BP_E1HVN(bp))
1522 func = ((vn << 1) | port);
1523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1524 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1527 if (bp->link_vars.link_up) {
1530 /* Init rate shaping and fairness contexts */
1531 bnx2x_init_port_minmax(bp);
1533 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1534 bnx2x_init_vn_minmax(bp, 2*vn + port);
1536 /* Store it to internal memory */
1538 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1539 REG_WR(bp, BAR_XSTRORM_INTMEM +
1540 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1541 ((u32 *)(&bp->cmng))[i]);
1546 void bnx2x__link_status_update(struct bnx2x *bp)
1548 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1551 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1553 if (bp->link_vars.link_up)
1554 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1556 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1558 bnx2x_calc_vn_weight_sum(bp);
1560 /* indicate link status */
1561 bnx2x_link_report(bp);
1564 static void bnx2x_pmf_update(struct bnx2x *bp)
1566 int port = BP_PORT(bp);
1570 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1572 /* enable nig attention */
1573 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1574 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1577 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1585 * General service functions
1588 /* send the MCP a request, block until there is a reply */
1589 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1591 int func = BP_FUNC(bp);
1592 u32 seq = ++bp->fw_seq;
1595 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1597 mutex_lock(&bp->fw_mb_mutex);
1598 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1599 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1602 /* let the FW do it's magic ... */
1605 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1607 /* Give the FW up to 5 second (500*10ms) */
1608 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1610 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1611 cnt*delay, rc, seq);
1613 /* is this a reply to our command? */
1614 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1615 rc &= FW_MSG_CODE_MASK;
1618 BNX2X_ERR("FW failed to respond!\n");
1622 mutex_unlock(&bp->fw_mb_mutex);
1627 static void bnx2x_e1h_disable(struct bnx2x *bp)
1629 int port = BP_PORT(bp);
1631 netif_tx_disable(bp->dev);
1633 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1635 netif_carrier_off(bp->dev);
1638 static void bnx2x_e1h_enable(struct bnx2x *bp)
1640 int port = BP_PORT(bp);
1642 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1644 /* Tx queue should be only reenabled */
1645 netif_tx_wake_all_queues(bp->dev);
1648 * Should not call netif_carrier_on since it will be called if the link
1649 * is up when checking for link state
1653 static void bnx2x_update_min_max(struct bnx2x *bp)
1655 int port = BP_PORT(bp);
1658 /* Init rate shaping and fairness contexts */
1659 bnx2x_init_port_minmax(bp);
1661 bnx2x_calc_vn_weight_sum(bp);
1663 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1664 bnx2x_init_vn_minmax(bp, 2*vn + port);
1669 /* Set the attention towards other drivers on the same port */
1670 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1671 if (vn == BP_E1HVN(bp))
1674 func = ((vn << 1) | port);
1675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1676 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1679 /* Store it to internal memory */
1680 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1681 REG_WR(bp, BAR_XSTRORM_INTMEM +
1682 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1683 ((u32 *)(&bp->cmng))[i]);
1687 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1689 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1691 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1694 * This is the only place besides the function initialization
1695 * where the bp->flags can change so it is done without any
1698 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1699 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1700 bp->flags |= MF_FUNC_DIS;
1702 bnx2x_e1h_disable(bp);
1704 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1705 bp->flags &= ~MF_FUNC_DIS;
1707 bnx2x_e1h_enable(bp);
1709 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1711 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1713 bnx2x_update_min_max(bp);
1714 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1717 /* Report results to MCP */
1719 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1721 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1724 /* must be called under the spq lock */
1725 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1727 struct eth_spe *next_spe = bp->spq_prod_bd;
1729 if (bp->spq_prod_bd == bp->spq_last_bd) {
1730 bp->spq_prod_bd = bp->spq;
1731 bp->spq_prod_idx = 0;
1732 DP(NETIF_MSG_TIMER, "end of spq\n");
1740 /* must be called under the spq lock */
1741 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1743 int func = BP_FUNC(bp);
1745 /* Make sure that BD data is updated before writing the producer */
1748 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1753 /* the slow path queue is odd since completions arrive on the fastpath ring */
1754 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1755 u32 data_hi, u32 data_lo, int common)
1757 struct eth_spe *spe;
1759 #ifdef BNX2X_STOP_ON_ERROR
1760 if (unlikely(bp->panic))
1764 spin_lock_bh(&bp->spq_lock);
1766 if (!bp->spq_left) {
1767 BNX2X_ERR("BUG! SPQ ring full!\n");
1768 spin_unlock_bh(&bp->spq_lock);
1773 spe = bnx2x_sp_get_next(bp);
1775 /* CID needs port number to be encoded int it */
1776 spe->hdr.conn_and_cmd_data =
1777 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1779 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1782 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1784 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1785 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1789 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1790 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1791 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1792 (u32)(U64_LO(bp->spq_mapping) +
1793 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1794 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1796 bnx2x_sp_prod_update(bp);
1797 spin_unlock_bh(&bp->spq_lock);
1801 /* acquire split MCP access lock register */
1802 static int bnx2x_acquire_alr(struct bnx2x *bp)
1808 for (j = 0; j < 1000; j++) {
1810 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1811 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1812 if (val & (1L << 31))
1817 if (!(val & (1L << 31))) {
1818 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1825 /* release split MCP access lock register */
1826 static void bnx2x_release_alr(struct bnx2x *bp)
1828 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1831 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1833 struct host_def_status_block *def_sb = bp->def_status_blk;
1836 barrier(); /* status block is written to by the chip */
1837 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1838 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1841 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1842 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1845 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1846 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1849 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1850 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1853 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1854 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1861 * slow path service functions
1864 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1866 int port = BP_PORT(bp);
1867 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1868 COMMAND_REG_ATTN_BITS_SET);
1869 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1870 MISC_REG_AEU_MASK_ATTN_FUNC_0;
1871 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1872 NIG_REG_MASK_INTERRUPT_PORT0;
1876 if (bp->attn_state & asserted)
1877 BNX2X_ERR("IGU ERROR\n");
1879 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1880 aeu_mask = REG_RD(bp, aeu_addr);
1882 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
1883 aeu_mask, asserted);
1884 aeu_mask &= ~(asserted & 0x3ff);
1885 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1887 REG_WR(bp, aeu_addr, aeu_mask);
1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1890 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1891 bp->attn_state |= asserted;
1892 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1894 if (asserted & ATTN_HARD_WIRED_MASK) {
1895 if (asserted & ATTN_NIG_FOR_FUNC) {
1897 bnx2x_acquire_phy_lock(bp);
1899 /* save nig interrupt mask */
1900 nig_mask = REG_RD(bp, nig_int_mask_addr);
1901 REG_WR(bp, nig_int_mask_addr, 0);
1903 bnx2x_link_attn(bp);
1905 /* handle unicore attn? */
1907 if (asserted & ATTN_SW_TIMER_4_FUNC)
1908 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1910 if (asserted & GPIO_2_FUNC)
1911 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1913 if (asserted & GPIO_3_FUNC)
1914 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1916 if (asserted & GPIO_4_FUNC)
1917 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1920 if (asserted & ATTN_GENERAL_ATTN_1) {
1921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1924 if (asserted & ATTN_GENERAL_ATTN_2) {
1925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1928 if (asserted & ATTN_GENERAL_ATTN_3) {
1929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1933 if (asserted & ATTN_GENERAL_ATTN_4) {
1934 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1935 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1937 if (asserted & ATTN_GENERAL_ATTN_5) {
1938 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1939 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1941 if (asserted & ATTN_GENERAL_ATTN_6) {
1942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1947 } /* if hardwired */
1949 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1951 REG_WR(bp, hc_addr, asserted);
1953 /* now set back the mask */
1954 if (asserted & ATTN_NIG_FOR_FUNC) {
1955 REG_WR(bp, nig_int_mask_addr, nig_mask);
1956 bnx2x_release_phy_lock(bp);
1960 static inline void bnx2x_fan_failure(struct bnx2x *bp)
1962 int port = BP_PORT(bp);
1964 /* mark the failure */
1965 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1966 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1967 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1968 bp->link_params.ext_phy_config);
1970 /* log the failure */
1971 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1972 " the driver to shutdown the card to prevent permanent"
1973 " damage. Please contact OEM Support for assistance\n");
1976 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1978 int port = BP_PORT(bp);
1980 u32 val, swap_val, swap_override;
1982 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1983 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1985 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1987 val = REG_RD(bp, reg_offset);
1988 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1989 REG_WR(bp, reg_offset, val);
1991 BNX2X_ERR("SPIO5 hw attention\n");
1993 /* Fan failure attention */
1994 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1995 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1996 /* Low power mode is controlled by GPIO 2 */
1997 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1998 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1999 /* The PHY reset is controlled by GPIO 1 */
2000 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2001 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2005 /* The PHY reset is controlled by GPIO 1 */
2006 /* fake the port number to cancel the swap done in
2008 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2009 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2010 port = (swap_val && swap_override) ^ 1;
2011 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2012 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2018 bnx2x_fan_failure(bp);
2021 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2022 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2023 bnx2x_acquire_phy_lock(bp);
2024 bnx2x_handle_module_detect_int(&bp->link_params);
2025 bnx2x_release_phy_lock(bp);
2028 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2030 val = REG_RD(bp, reg_offset);
2031 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2032 REG_WR(bp, reg_offset, val);
2034 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2035 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2040 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2044 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2046 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2047 BNX2X_ERR("DB hw attention 0x%x\n", val);
2048 /* DORQ discard attention */
2050 BNX2X_ERR("FATAL error from DORQ\n");
2053 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2055 int port = BP_PORT(bp);
2058 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2059 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2061 val = REG_RD(bp, reg_offset);
2062 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2063 REG_WR(bp, reg_offset, val);
2065 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2066 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2071 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2075 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2077 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2078 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2079 /* CFC error attention */
2081 BNX2X_ERR("FATAL error from CFC\n");
2084 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2086 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2087 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2088 /* RQ_USDMDP_FIFO_OVERFLOW */
2090 BNX2X_ERR("FATAL error from PXP\n");
2093 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2095 int port = BP_PORT(bp);
2098 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2099 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2101 val = REG_RD(bp, reg_offset);
2102 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2103 REG_WR(bp, reg_offset, val);
2105 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2106 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2111 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2115 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2117 if (attn & BNX2X_PMF_LINK_ASSERT) {
2118 int func = BP_FUNC(bp);
2120 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2121 bp->mf_config = SHMEM_RD(bp,
2122 mf_cfg.func_mf_config[func].config);
2123 val = SHMEM_RD(bp, func_mb[func].drv_status);
2124 if (val & DRV_STATUS_DCC_EVENT_MASK)
2126 (val & DRV_STATUS_DCC_EVENT_MASK));
2127 bnx2x__link_status_update(bp);
2128 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2129 bnx2x_pmf_update(bp);
2131 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2133 BNX2X_ERR("MC assert!\n");
2134 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2136 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2137 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2140 } else if (attn & BNX2X_MCP_ASSERT) {
2142 BNX2X_ERR("MCP assert!\n");
2143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2147 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2150 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2151 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2152 if (attn & BNX2X_GRC_TIMEOUT) {
2153 val = CHIP_IS_E1H(bp) ?
2154 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2155 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2157 if (attn & BNX2X_GRC_RSV) {
2158 val = CHIP_IS_E1H(bp) ?
2159 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2160 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2162 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2166 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2167 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2168 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2169 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2170 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2171 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2173 * should be run under rtnl lock
2175 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2177 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2178 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2179 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2185 * should be run under rtnl lock
2187 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2189 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2191 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2197 * should be run under rtnl lock
2199 bool bnx2x_reset_is_done(struct bnx2x *bp)
2201 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2202 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2203 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2207 * should be run under rtnl lock
2209 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2211 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2213 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2215 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2216 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2222 * should be run under rtnl lock
2224 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2226 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2228 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2230 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2231 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2239 * should be run under rtnl lock
2241 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2243 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2246 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2248 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2249 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2252 static inline void _print_next_block(int idx, const char *blk)
2259 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2263 for (i = 0; sig; i++) {
2264 cur_bit = ((u32)0x1 << i);
2265 if (sig & cur_bit) {
2267 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2268 _print_next_block(par_num++, "BRB");
2270 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2271 _print_next_block(par_num++, "PARSER");
2273 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2274 _print_next_block(par_num++, "TSDM");
2276 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2277 _print_next_block(par_num++, "SEARCHER");
2279 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2280 _print_next_block(par_num++, "TSEMI");
2292 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2296 for (i = 0; sig; i++) {
2297 cur_bit = ((u32)0x1 << i);
2298 if (sig & cur_bit) {
2300 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2301 _print_next_block(par_num++, "PBCLIENT");
2303 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2304 _print_next_block(par_num++, "QM");
2306 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2307 _print_next_block(par_num++, "XSDM");
2309 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2310 _print_next_block(par_num++, "XSEMI");
2312 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2313 _print_next_block(par_num++, "DOORBELLQ");
2315 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2316 _print_next_block(par_num++, "VAUX PCI CORE");
2318 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2319 _print_next_block(par_num++, "DEBUG");
2321 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2322 _print_next_block(par_num++, "USDM");
2324 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2325 _print_next_block(par_num++, "USEMI");
2327 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2328 _print_next_block(par_num++, "UPB");
2330 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2331 _print_next_block(par_num++, "CSDM");
2343 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2347 for (i = 0; sig; i++) {
2348 cur_bit = ((u32)0x1 << i);
2349 if (sig & cur_bit) {
2351 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2352 _print_next_block(par_num++, "CSEMI");
2354 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2355 _print_next_block(par_num++, "PXP");
2357 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2358 _print_next_block(par_num++,
2359 "PXPPCICLOCKCLIENT");
2361 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2362 _print_next_block(par_num++, "CFC");
2364 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2365 _print_next_block(par_num++, "CDU");
2367 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2368 _print_next_block(par_num++, "IGU");
2370 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2371 _print_next_block(par_num++, "MISC");
2383 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2387 for (i = 0; sig; i++) {
2388 cur_bit = ((u32)0x1 << i);
2389 if (sig & cur_bit) {
2391 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2392 _print_next_block(par_num++, "MCP ROM");
2394 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2395 _print_next_block(par_num++, "MCP UMP RX");
2397 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2398 _print_next_block(par_num++, "MCP UMP TX");
2400 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2401 _print_next_block(par_num++, "MCP SCPAD");
2413 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2416 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2417 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2419 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2420 "[0]:0x%08x [1]:0x%08x "
2421 "[2]:0x%08x [3]:0x%08x\n",
2422 sig0 & HW_PRTY_ASSERT_SET_0,
2423 sig1 & HW_PRTY_ASSERT_SET_1,
2424 sig2 & HW_PRTY_ASSERT_SET_2,
2425 sig3 & HW_PRTY_ASSERT_SET_3);
2426 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2428 par_num = bnx2x_print_blocks_with_parity0(
2429 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2430 par_num = bnx2x_print_blocks_with_parity1(
2431 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2432 par_num = bnx2x_print_blocks_with_parity2(
2433 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2434 par_num = bnx2x_print_blocks_with_parity3(
2435 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2442 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2444 struct attn_route attn;
2445 int port = BP_PORT(bp);
2447 attn.sig[0] = REG_RD(bp,
2448 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2450 attn.sig[1] = REG_RD(bp,
2451 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2453 attn.sig[2] = REG_RD(bp,
2454 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2456 attn.sig[3] = REG_RD(bp,
2457 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2460 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2464 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2466 struct attn_route attn, *group_mask;
2467 int port = BP_PORT(bp);
2473 /* need to take HW lock because MCP or other port might also
2474 try to handle this event */
2475 bnx2x_acquire_alr(bp);
2477 if (bnx2x_chk_parity_attn(bp)) {
2478 bp->recovery_state = BNX2X_RECOVERY_INIT;
2479 bnx2x_set_reset_in_progress(bp);
2480 schedule_delayed_work(&bp->reset_task, 0);
2481 /* Disable HW interrupts */
2482 bnx2x_int_disable(bp);
2483 bnx2x_release_alr(bp);
2484 /* In case of parity errors don't handle attentions so that
2485 * other function would "see" parity errors.
2490 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2491 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2492 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2493 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2494 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2495 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2497 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2498 if (deasserted & (1 << index)) {
2499 group_mask = &bp->attn_group[index];
2501 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2502 index, group_mask->sig[0], group_mask->sig[1],
2503 group_mask->sig[2], group_mask->sig[3]);
2505 bnx2x_attn_int_deasserted3(bp,
2506 attn.sig[3] & group_mask->sig[3]);
2507 bnx2x_attn_int_deasserted1(bp,
2508 attn.sig[1] & group_mask->sig[1]);
2509 bnx2x_attn_int_deasserted2(bp,
2510 attn.sig[2] & group_mask->sig[2]);
2511 bnx2x_attn_int_deasserted0(bp,
2512 attn.sig[0] & group_mask->sig[0]);
2516 bnx2x_release_alr(bp);
2518 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2521 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2523 REG_WR(bp, reg_addr, val);
2525 if (~bp->attn_state & deasserted)
2526 BNX2X_ERR("IGU ERROR\n");
2528 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2529 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2531 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2532 aeu_mask = REG_RD(bp, reg_addr);
2534 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2535 aeu_mask, deasserted);
2536 aeu_mask |= (deasserted & 0x3ff);
2537 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2539 REG_WR(bp, reg_addr, aeu_mask);
2540 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2542 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2543 bp->attn_state &= ~deasserted;
2544 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2547 static void bnx2x_attn_int(struct bnx2x *bp)
2549 /* read local copy of bits */
2550 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2552 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2554 u32 attn_state = bp->attn_state;
2556 /* look for changed bits */
2557 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2558 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2561 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2562 attn_bits, attn_ack, asserted, deasserted);
2564 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2565 BNX2X_ERR("BAD attention state\n");
2567 /* handle bits that were raised */
2569 bnx2x_attn_int_asserted(bp, asserted);
2572 bnx2x_attn_int_deasserted(bp, deasserted);
2575 static void bnx2x_sp_task(struct work_struct *work)
2577 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2580 /* Return here if interrupt is disabled */
2581 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2582 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2586 status = bnx2x_update_dsb_idx(bp);
2587 /* if (status == 0) */
2588 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2590 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2598 /* CStorm events: STAT_QUERY */
2600 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2604 if (unlikely(status))
2605 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2608 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2610 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2612 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2614 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2616 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2620 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2622 struct net_device *dev = dev_instance;
2623 struct bnx2x *bp = netdev_priv(dev);
2625 /* Return here if interrupt is disabled */
2626 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2627 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2631 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2633 #ifdef BNX2X_STOP_ON_ERROR
2634 if (unlikely(bp->panic))
2640 struct cnic_ops *c_ops;
2643 c_ops = rcu_dereference(bp->cnic_ops);
2645 c_ops->cnic_handler(bp->cnic_data, NULL);
2649 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2654 /* end of slow path */
2656 static void bnx2x_timer(unsigned long data)
2658 struct bnx2x *bp = (struct bnx2x *) data;
2660 if (!netif_running(bp->dev))
2663 if (atomic_read(&bp->intr_sem) != 0)
2667 struct bnx2x_fastpath *fp = &bp->fp[0];
2671 rc = bnx2x_rx_int(fp, 1000);
2674 if (!BP_NOMCP(bp)) {
2675 int func = BP_FUNC(bp);
2679 ++bp->fw_drv_pulse_wr_seq;
2680 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2681 /* TBD - add SYSTEM_TIME */
2682 drv_pulse = bp->fw_drv_pulse_wr_seq;
2683 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2685 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2686 MCP_PULSE_SEQ_MASK);
2687 /* The delta between driver pulse and mcp response
2688 * should be 1 (before mcp response) or 0 (after mcp response)
2690 if ((drv_pulse != mcp_pulse) &&
2691 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2692 /* someone lost a heartbeat... */
2693 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2694 drv_pulse, mcp_pulse);
2698 if (bp->state == BNX2X_STATE_OPEN)
2699 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2702 mod_timer(&bp->timer, jiffies + bp->current_interval);
2705 /* end of Statistics */
2710 * nic init service functions
2713 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2715 int port = BP_PORT(bp);
2718 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2719 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2720 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2721 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2722 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2723 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2726 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2727 dma_addr_t mapping, int sb_id)
2729 int port = BP_PORT(bp);
2730 int func = BP_FUNC(bp);
2735 section = ((u64)mapping) + offsetof(struct host_status_block,
2737 sb->u_status_block.status_block_id = sb_id;
2739 REG_WR(bp, BAR_CSTRORM_INTMEM +
2740 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2741 REG_WR(bp, BAR_CSTRORM_INTMEM +
2742 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2744 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2745 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2747 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2748 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2749 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2752 section = ((u64)mapping) + offsetof(struct host_status_block,
2754 sb->c_status_block.status_block_id = sb_id;
2756 REG_WR(bp, BAR_CSTRORM_INTMEM +
2757 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2758 REG_WR(bp, BAR_CSTRORM_INTMEM +
2759 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2761 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2762 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2764 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2765 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2766 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2768 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2771 static void bnx2x_zero_def_sb(struct bnx2x *bp)
2773 int func = BP_FUNC(bp);
2775 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2776 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2777 sizeof(struct tstorm_def_status_block)/4);
2778 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2779 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2780 sizeof(struct cstorm_def_status_block_u)/4);
2781 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2782 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2783 sizeof(struct cstorm_def_status_block_c)/4);
2784 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2785 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2786 sizeof(struct xstorm_def_status_block)/4);
2789 static void bnx2x_init_def_sb(struct bnx2x *bp,
2790 struct host_def_status_block *def_sb,
2791 dma_addr_t mapping, int sb_id)
2793 int port = BP_PORT(bp);
2794 int func = BP_FUNC(bp);
2795 int index, val, reg_offset;
2799 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2800 atten_status_block);
2801 def_sb->atten_status_block.status_block_id = sb_id;
2805 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2806 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2808 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2809 bp->attn_group[index].sig[0] = REG_RD(bp,
2810 reg_offset + 0x10*index);
2811 bp->attn_group[index].sig[1] = REG_RD(bp,
2812 reg_offset + 0x4 + 0x10*index);
2813 bp->attn_group[index].sig[2] = REG_RD(bp,
2814 reg_offset + 0x8 + 0x10*index);
2815 bp->attn_group[index].sig[3] = REG_RD(bp,
2816 reg_offset + 0xc + 0x10*index);
2819 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2820 HC_REG_ATTN_MSG0_ADDR_L);
2822 REG_WR(bp, reg_offset, U64_LO(section));
2823 REG_WR(bp, reg_offset + 4, U64_HI(section));
2825 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2827 val = REG_RD(bp, reg_offset);
2829 REG_WR(bp, reg_offset, val);
2832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2833 u_def_status_block);
2834 def_sb->u_def_status_block.status_block_id = sb_id;
2836 REG_WR(bp, BAR_CSTRORM_INTMEM +
2837 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2838 REG_WR(bp, BAR_CSTRORM_INTMEM +
2839 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2841 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2842 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2844 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2846 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2850 c_def_status_block);
2851 def_sb->c_def_status_block.status_block_id = sb_id;
2853 REG_WR(bp, BAR_CSTRORM_INTMEM +
2854 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2855 REG_WR(bp, BAR_CSTRORM_INTMEM +
2856 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2858 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2859 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2861 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2862 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2863 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2866 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2867 t_def_status_block);
2868 def_sb->t_def_status_block.status_block_id = sb_id;
2870 REG_WR(bp, BAR_TSTRORM_INTMEM +
2871 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2872 REG_WR(bp, BAR_TSTRORM_INTMEM +
2873 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2875 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2876 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2878 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2879 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2880 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2883 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2884 x_def_status_block);
2885 def_sb->x_def_status_block.status_block_id = sb_id;
2887 REG_WR(bp, BAR_XSTRORM_INTMEM +
2888 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2889 REG_WR(bp, BAR_XSTRORM_INTMEM +
2890 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2892 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2893 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2895 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2896 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2897 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2899 bp->stats_pending = 0;
2900 bp->set_mac_pending = 0;
2902 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2905 void bnx2x_update_coalesce(struct bnx2x *bp)
2907 int port = BP_PORT(bp);
2910 for_each_queue(bp, i) {
2911 int sb_id = bp->fp[i].sb_id;
2913 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2914 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2915 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2916 U_SB_ETH_RX_CQ_INDEX),
2917 bp->rx_ticks/(4 * BNX2X_BTR));
2918 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2919 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2920 U_SB_ETH_RX_CQ_INDEX),
2921 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2923 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2924 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2925 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2926 C_SB_ETH_TX_CQ_INDEX),
2927 bp->tx_ticks/(4 * BNX2X_BTR));
2928 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2929 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2930 C_SB_ETH_TX_CQ_INDEX),
2931 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2935 static void bnx2x_init_sp_ring(struct bnx2x *bp)
2937 int func = BP_FUNC(bp);
2939 spin_lock_init(&bp->spq_lock);
2941 bp->spq_left = MAX_SPQ_PENDING;
2942 bp->spq_prod_idx = 0;
2943 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2944 bp->spq_prod_bd = bp->spq;
2945 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2947 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2948 U64_LO(bp->spq_mapping));
2950 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2951 U64_HI(bp->spq_mapping));
2953 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2957 static void bnx2x_init_context(struct bnx2x *bp)
2962 for_each_queue(bp, i) {
2963 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2964 struct bnx2x_fastpath *fp = &bp->fp[i];
2965 u8 cl_id = fp->cl_id;
2967 context->ustorm_st_context.common.sb_index_numbers =
2968 BNX2X_RX_SB_INDEX_NUM;
2969 context->ustorm_st_context.common.clientId = cl_id;
2970 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2971 context->ustorm_st_context.common.flags =
2972 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2973 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2974 context->ustorm_st_context.common.statistics_counter_id =
2976 context->ustorm_st_context.common.mc_alignment_log_size =
2977 BNX2X_RX_ALIGN_SHIFT;
2978 context->ustorm_st_context.common.bd_buff_size =
2980 context->ustorm_st_context.common.bd_page_base_hi =
2981 U64_HI(fp->rx_desc_mapping);
2982 context->ustorm_st_context.common.bd_page_base_lo =
2983 U64_LO(fp->rx_desc_mapping);
2984 if (!fp->disable_tpa) {
2985 context->ustorm_st_context.common.flags |=
2986 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
2987 context->ustorm_st_context.common.sge_buff_size =
2988 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2990 context->ustorm_st_context.common.sge_page_base_hi =
2991 U64_HI(fp->rx_sge_mapping);
2992 context->ustorm_st_context.common.sge_page_base_lo =
2993 U64_LO(fp->rx_sge_mapping);
2995 context->ustorm_st_context.common.max_sges_for_packet =
2996 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2997 context->ustorm_st_context.common.max_sges_for_packet =
2998 ((context->ustorm_st_context.common.
2999 max_sges_for_packet + PAGES_PER_SGE - 1) &
3000 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3003 context->ustorm_ag_context.cdu_usage =
3004 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3005 CDU_REGION_NUMBER_UCM_AG,
3006 ETH_CONNECTION_TYPE);
3008 context->xstorm_ag_context.cdu_reserved =
3009 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3010 CDU_REGION_NUMBER_XCM_AG,
3011 ETH_CONNECTION_TYPE);
3015 for_each_queue(bp, i) {
3016 struct bnx2x_fastpath *fp = &bp->fp[i];
3017 struct eth_context *context =
3018 bnx2x_sp(bp, context[i].eth);
3020 context->cstorm_st_context.sb_index_number =
3021 C_SB_ETH_TX_CQ_INDEX;
3022 context->cstorm_st_context.status_block_id = fp->sb_id;
3024 context->xstorm_st_context.tx_bd_page_base_hi =
3025 U64_HI(fp->tx_desc_mapping);
3026 context->xstorm_st_context.tx_bd_page_base_lo =
3027 U64_LO(fp->tx_desc_mapping);
3028 context->xstorm_st_context.statistics_data = (fp->cl_id |
3029 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3033 static void bnx2x_init_ind_table(struct bnx2x *bp)
3035 int func = BP_FUNC(bp);
3038 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3042 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
3043 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3044 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3045 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3046 bp->fp->cl_id + (i % bp->num_queues));
3049 void bnx2x_set_client_config(struct bnx2x *bp)
3051 struct tstorm_eth_client_config tstorm_client = {0};
3052 int port = BP_PORT(bp);
3055 tstorm_client.mtu = bp->dev->mtu;
3056 tstorm_client.config_flags =
3057 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3058 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3060 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3061 tstorm_client.config_flags |=
3062 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3063 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3067 for_each_queue(bp, i) {
3068 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3070 REG_WR(bp, BAR_TSTRORM_INTMEM +
3071 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3072 ((u32 *)&tstorm_client)[0]);
3073 REG_WR(bp, BAR_TSTRORM_INTMEM +
3074 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3075 ((u32 *)&tstorm_client)[1]);
3078 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3079 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3082 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3084 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3085 int mode = bp->rx_mode;
3086 int mask = bp->rx_mode_cl_mask;
3087 int func = BP_FUNC(bp);
3088 int port = BP_PORT(bp);
3090 /* All but management unicast packets should pass to the host as well */
3092 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3093 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3094 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3095 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3097 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
3100 case BNX2X_RX_MODE_NONE: /* no Rx */
3101 tstorm_mac_filter.ucast_drop_all = mask;
3102 tstorm_mac_filter.mcast_drop_all = mask;
3103 tstorm_mac_filter.bcast_drop_all = mask;
3106 case BNX2X_RX_MODE_NORMAL:
3107 tstorm_mac_filter.bcast_accept_all = mask;
3110 case BNX2X_RX_MODE_ALLMULTI:
3111 tstorm_mac_filter.mcast_accept_all = mask;
3112 tstorm_mac_filter.bcast_accept_all = mask;
3115 case BNX2X_RX_MODE_PROMISC:
3116 tstorm_mac_filter.ucast_accept_all = mask;
3117 tstorm_mac_filter.mcast_accept_all = mask;
3118 tstorm_mac_filter.bcast_accept_all = mask;
3119 /* pass management unicast packets as well */
3120 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3124 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3129 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3132 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3133 REG_WR(bp, BAR_TSTRORM_INTMEM +
3134 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3135 ((u32 *)&tstorm_mac_filter)[i]);
3137 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3138 ((u32 *)&tstorm_mac_filter)[i]); */
3141 if (mode != BNX2X_RX_MODE_NONE)
3142 bnx2x_set_client_config(bp);
3145 static void bnx2x_init_internal_common(struct bnx2x *bp)
3149 /* Zero this manually as its initialization is
3150 currently missing in the initTool */
3151 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3152 REG_WR(bp, BAR_USTRORM_INTMEM +
3153 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3156 static void bnx2x_init_internal_port(struct bnx2x *bp)
3158 int port = BP_PORT(bp);
3161 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3163 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3164 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3165 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3168 static void bnx2x_init_internal_func(struct bnx2x *bp)
3170 struct tstorm_eth_function_common_config tstorm_config = {0};
3171 struct stats_indication_flags stats_flags = {0};
3172 int port = BP_PORT(bp);
3173 int func = BP_FUNC(bp);
3178 tstorm_config.config_flags = RSS_FLAGS(bp);
3181 tstorm_config.rss_result_mask = MULTI_MASK;
3183 /* Enable TPA if needed */
3184 if (bp->flags & TPA_ENABLE_FLAG)
3185 tstorm_config.config_flags |=
3186 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3189 tstorm_config.config_flags |=
3190 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3192 tstorm_config.leading_client_id = BP_L_ID(bp);
3194 REG_WR(bp, BAR_TSTRORM_INTMEM +
3195 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3196 (*(u32 *)&tstorm_config));
3198 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3199 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3200 bnx2x_set_storm_rx_mode(bp);
3202 for_each_queue(bp, i) {
3203 u8 cl_id = bp->fp[i].cl_id;
3205 /* reset xstorm per client statistics */
3206 offset = BAR_XSTRORM_INTMEM +
3207 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3209 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3210 REG_WR(bp, offset + j*4, 0);
3212 /* reset tstorm per client statistics */
3213 offset = BAR_TSTRORM_INTMEM +
3214 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3216 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3217 REG_WR(bp, offset + j*4, 0);
3219 /* reset ustorm per client statistics */
3220 offset = BAR_USTRORM_INTMEM +
3221 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3223 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3224 REG_WR(bp, offset + j*4, 0);
3227 /* Init statistics related context */
3228 stats_flags.collect_eth = 1;
3230 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3231 ((u32 *)&stats_flags)[0]);
3232 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3233 ((u32 *)&stats_flags)[1]);
3235 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3236 ((u32 *)&stats_flags)[0]);
3237 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3238 ((u32 *)&stats_flags)[1]);
3240 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3241 ((u32 *)&stats_flags)[0]);
3242 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3243 ((u32 *)&stats_flags)[1]);
3245 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3246 ((u32 *)&stats_flags)[0]);
3247 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3248 ((u32 *)&stats_flags)[1]);
3250 REG_WR(bp, BAR_XSTRORM_INTMEM +
3251 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3252 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3253 REG_WR(bp, BAR_XSTRORM_INTMEM +
3254 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3255 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3257 REG_WR(bp, BAR_TSTRORM_INTMEM +
3258 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3259 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3260 REG_WR(bp, BAR_TSTRORM_INTMEM +
3261 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3262 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3264 REG_WR(bp, BAR_USTRORM_INTMEM +
3265 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3266 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3267 REG_WR(bp, BAR_USTRORM_INTMEM +
3268 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3269 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3271 if (CHIP_IS_E1H(bp)) {
3272 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3274 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3276 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3278 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3281 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3285 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3286 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3287 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3288 for_each_queue(bp, i) {
3289 struct bnx2x_fastpath *fp = &bp->fp[i];
3291 REG_WR(bp, BAR_USTRORM_INTMEM +
3292 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3293 U64_LO(fp->rx_comp_mapping));
3294 REG_WR(bp, BAR_USTRORM_INTMEM +
3295 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3296 U64_HI(fp->rx_comp_mapping));
3299 REG_WR(bp, BAR_USTRORM_INTMEM +
3300 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3301 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3302 REG_WR(bp, BAR_USTRORM_INTMEM +
3303 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3304 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3306 REG_WR16(bp, BAR_USTRORM_INTMEM +
3307 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3311 /* dropless flow control */
3312 if (CHIP_IS_E1H(bp)) {
3313 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3315 rx_pause.bd_thr_low = 250;
3316 rx_pause.cqe_thr_low = 250;
3318 rx_pause.sge_thr_low = 0;
3319 rx_pause.bd_thr_high = 350;
3320 rx_pause.cqe_thr_high = 350;
3321 rx_pause.sge_thr_high = 0;
3323 for_each_queue(bp, i) {
3324 struct bnx2x_fastpath *fp = &bp->fp[i];
3326 if (!fp->disable_tpa) {
3327 rx_pause.sge_thr_low = 150;
3328 rx_pause.sge_thr_high = 250;
3332 offset = BAR_USTRORM_INTMEM +
3333 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3336 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3338 REG_WR(bp, offset + j*4,
3339 ((u32 *)&rx_pause)[j]);
3343 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3345 /* Init rate shaping and fairness contexts */
3349 /* During init there is no active link
3350 Until link is up, set link rate to 10Gbps */
3351 bp->link_vars.line_speed = SPEED_10000;
3352 bnx2x_init_port_minmax(bp);
3356 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3357 bnx2x_calc_vn_weight_sum(bp);
3359 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3360 bnx2x_init_vn_minmax(bp, 2*vn + port);
3362 /* Enable rate shaping and fairness */
3363 bp->cmng.flags.cmng_enables |=
3364 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3367 /* rate shaping and fairness are disabled */
3369 "single function mode minmax will be disabled\n");
3373 /* Store cmng structures to internal memory */
3375 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3376 REG_WR(bp, BAR_XSTRORM_INTMEM +
3377 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3378 ((u32 *)(&bp->cmng))[i]);
3381 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3383 switch (load_code) {
3384 case FW_MSG_CODE_DRV_LOAD_COMMON:
3385 bnx2x_init_internal_common(bp);
3388 case FW_MSG_CODE_DRV_LOAD_PORT:
3389 bnx2x_init_internal_port(bp);
3392 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3393 bnx2x_init_internal_func(bp);
3397 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3402 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3406 for_each_queue(bp, i) {
3407 struct bnx2x_fastpath *fp = &bp->fp[i];
3410 fp->state = BNX2X_FP_STATE_CLOSED;
3412 fp->cl_id = BP_L_ID(bp) + i;
3414 fp->sb_id = fp->cl_id + 1;
3416 fp->sb_id = fp->cl_id;
3419 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3420 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3421 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3423 bnx2x_update_fpsb_idx(fp);
3426 /* ensure status block indices were read */
3430 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3432 bnx2x_update_dsb_idx(bp);
3433 bnx2x_update_coalesce(bp);
3434 bnx2x_init_rx_rings(bp);
3435 bnx2x_init_tx_ring(bp);
3436 bnx2x_init_sp_ring(bp);
3437 bnx2x_init_context(bp);
3438 bnx2x_init_internal(bp, load_code);
3439 bnx2x_init_ind_table(bp);
3440 bnx2x_stats_init(bp);
3442 /* At this point, we are ready for interrupts */
3443 atomic_set(&bp->intr_sem, 0);
3445 /* flush all before enabling interrupts */
3449 bnx2x_int_enable(bp);
3451 /* Check for SPIO5 */
3452 bnx2x_attn_int_deasserted0(bp,
3453 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3454 AEU_INPUTS_ATTN_BITS_SPIO5);
3457 /* end of nic init */
3460 * gzip service functions
3463 static int bnx2x_gunzip_init(struct bnx2x *bp)
3465 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3466 &bp->gunzip_mapping, GFP_KERNEL);
3467 if (bp->gunzip_buf == NULL)
3470 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3471 if (bp->strm == NULL)
3474 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3476 if (bp->strm->workspace == NULL)
3486 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3487 bp->gunzip_mapping);
3488 bp->gunzip_buf = NULL;
3491 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3492 " un-compression\n");
3496 static void bnx2x_gunzip_end(struct bnx2x *bp)
3498 kfree(bp->strm->workspace);
3503 if (bp->gunzip_buf) {
3504 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3505 bp->gunzip_mapping);
3506 bp->gunzip_buf = NULL;
3510 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3514 /* check gzip header */
3515 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3516 BNX2X_ERR("Bad gzip header\n");
3524 if (zbuf[3] & FNAME)
3525 while ((zbuf[n++] != 0) && (n < len));
3527 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3528 bp->strm->avail_in = len - n;
3529 bp->strm->next_out = bp->gunzip_buf;
3530 bp->strm->avail_out = FW_BUF_SIZE;
3532 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3536 rc = zlib_inflate(bp->strm, Z_FINISH);
3537 if ((rc != Z_OK) && (rc != Z_STREAM_END))
3538 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3541 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3542 if (bp->gunzip_outlen & 0x3)
3543 netdev_err(bp->dev, "Firmware decompression error:"
3544 " gunzip_outlen (%d) not aligned\n",
3546 bp->gunzip_outlen >>= 2;
3548 zlib_inflateEnd(bp->strm);
3550 if (rc == Z_STREAM_END)
3556 /* nic load/unload */
3559 * General service functions
3562 /* send a NIG loopback debug packet */
3563 static void bnx2x_lb_pckt(struct bnx2x *bp)
3567 /* Ethernet source and destination addresses */
3568 wb_write[0] = 0x55555555;
3569 wb_write[1] = 0x55555555;
3570 wb_write[2] = 0x20; /* SOP */
3571 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3573 /* NON-IP protocol */
3574 wb_write[0] = 0x09000000;
3575 wb_write[1] = 0x55555555;
3576 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
3577 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3580 /* some of the internal memories
3581 * are not directly readable from the driver
3582 * to test them we send debug packets
3584 static int bnx2x_int_mem_test(struct bnx2x *bp)
3590 if (CHIP_REV_IS_FPGA(bp))
3592 else if (CHIP_REV_IS_EMUL(bp))
3597 DP(NETIF_MSG_HW, "start part1\n");
3599 /* Disable inputs of parser neighbor blocks */
3600 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3601 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3602 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3603 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3605 /* Write 0 to parser credits for CFC search request */
3606 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3608 /* send Ethernet packet */
3611 /* TODO do i reset NIG statistic? */
3612 /* Wait until NIG register shows 1 packet of size 0x10 */
3613 count = 1000 * factor;
3616 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3617 val = *bnx2x_sp(bp, wb_data[0]);
3625 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3629 /* Wait until PRS register shows 1 packet */
3630 count = 1000 * factor;
3632 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3640 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3644 /* Reset and init BRB, PRS */
3645 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3647 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3649 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3650 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3652 DP(NETIF_MSG_HW, "part2\n");
3654 /* Disable inputs of parser neighbor blocks */
3655 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3656 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3657 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3658 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3660 /* Write 0 to parser credits for CFC search request */
3661 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3663 /* send 10 Ethernet packets */
3664 for (i = 0; i < 10; i++)
3667 /* Wait until NIG register shows 10 + 1
3668 packets of size 11*0x10 = 0xb0 */
3669 count = 1000 * factor;
3672 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3673 val = *bnx2x_sp(bp, wb_data[0]);
3681 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3685 /* Wait until PRS register shows 2 packets */
3686 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3688 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3690 /* Write 1 to parser credits for CFC search request */
3691 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3693 /* Wait until PRS register shows 3 packets */
3694 msleep(10 * factor);
3695 /* Wait until NIG register shows 1 packet of size 0x10 */
3696 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3698 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3700 /* clear NIG EOP FIFO */
3701 for (i = 0; i < 11; i++)
3702 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3703 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3705 BNX2X_ERR("clear of NIG failed\n");
3709 /* Reset and init BRB, PRS, NIG */
3710 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3712 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3714 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3715 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3718 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3721 /* Enable inputs of parser neighbor blocks */
3722 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3723 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3724 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3725 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3727 DP(NETIF_MSG_HW, "done\n");
3732 static void enable_blocks_attention(struct bnx2x *bp)
3734 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3735 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3736 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3737 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3738 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3739 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3740 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3741 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3742 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3743 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3744 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3745 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3746 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3747 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3748 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3749 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3750 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3751 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3752 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3753 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3754 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3755 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3756 if (CHIP_REV_IS_FPGA(bp))
3757 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3759 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3760 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3761 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3762 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3763 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3764 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3765 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3766 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3767 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3768 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
3771 static const struct {
3774 } bnx2x_parity_mask[] = {
3775 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3776 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3777 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3778 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3779 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3780 {QM_REG_QM_PRTY_MASK, 0x0},
3781 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3782 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3783 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3784 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3785 {CDU_REG_CDU_PRTY_MASK, 0x0},
3786 {CFC_REG_CFC_PRTY_MASK, 0x0},
3787 {DBG_REG_DBG_PRTY_MASK, 0x0},
3788 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3789 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3790 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3791 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3792 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3793 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3794 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3795 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3796 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3797 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3798 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3799 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3800 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3801 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3802 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3805 static void enable_blocks_parity(struct bnx2x *bp)
3807 int i, mask_arr_len =
3808 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3810 for (i = 0; i < mask_arr_len; i++)
3811 REG_WR(bp, bnx2x_parity_mask[i].addr,
3812 bnx2x_parity_mask[i].mask);
3816 static void bnx2x_reset_common(struct bnx2x *bp)
3819 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3821 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3824 static void bnx2x_init_pxp(struct bnx2x *bp)
3827 int r_order, w_order;
3829 pci_read_config_word(bp->pdev,
3830 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3831 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3832 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3834 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3836 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3840 bnx2x_init_pxp_arb(bp, r_order, w_order);
3843 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3853 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3854 SHARED_HW_CFG_FAN_FAILURE_MASK;
3856 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3860 * The fan failure mechanism is usually related to the PHY type since
3861 * the power consumption of the board is affected by the PHY. Currently,
3862 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3864 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3865 for (port = PORT_0; port < PORT_MAX; port++) {
3867 SHMEM_RD(bp, dev_info.port_hw_config[port].
3868 external_phy_config) &
3869 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3872 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
3874 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
3876 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3879 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3881 if (is_required == 0)
3884 /* Fan failure is indicated by SPIO 5 */
3885 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3886 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3888 /* set to active low mode */
3889 val = REG_RD(bp, MISC_REG_SPIO_INT);
3890 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3891 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3892 REG_WR(bp, MISC_REG_SPIO_INT, val);
3894 /* enable interrupt to signal the IGU */
3895 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3896 val |= (1 << MISC_REGISTERS_SPIO_5);
3897 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3900 static int bnx2x_init_common(struct bnx2x *bp)
3907 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
3909 bnx2x_reset_common(bp);
3910 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3911 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3913 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3914 if (CHIP_IS_E1H(bp))
3915 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3917 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3919 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3921 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3922 if (CHIP_IS_E1(bp)) {
3923 /* enable HW interrupt from PXP on USDM overflow
3924 bit 16 on INT_MASK_0 */
3925 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3928 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3932 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3933 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3934 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3935 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3936 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3937 /* make sure this value is 0 */
3938 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3940 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3941 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3942 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3943 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3944 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3947 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3949 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3950 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3951 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3954 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3955 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3957 /* let the HW do it's magic ... */
3959 /* finish PXP init */
3960 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3962 BNX2X_ERR("PXP2 CFG failed\n");
3965 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3967 BNX2X_ERR("PXP2 RD_INIT failed\n");
3971 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3972 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3974 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3976 /* clean the DMAE memory */
3978 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3980 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3981 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3982 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3983 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
3985 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3986 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3987 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3988 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3990 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
3995 for (i = 0; i < 64; i++) {
3996 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3997 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3999 if (CHIP_IS_E1H(bp)) {
4000 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4001 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4006 /* soft reset pulse */
4007 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4008 REG_WR(bp, QM_REG_SOFT_RESET, 0);
4011 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
4014 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4015 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4016 if (!CHIP_REV_IS_SLOW(bp)) {
4017 /* enable hw interrupt from doorbell Q */
4018 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4021 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4022 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4023 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4026 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4028 if (CHIP_IS_E1H(bp))
4029 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4031 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4032 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4033 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4034 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4036 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4037 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4038 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4039 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4041 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4042 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4043 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4044 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4047 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4049 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4052 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4053 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4054 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4056 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4057 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4058 REG_WR(bp, i, random32());
4059 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4061 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4062 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4063 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4064 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4065 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4066 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4067 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4068 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4069 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4070 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4072 REG_WR(bp, SRC_REG_SOFT_RST, 0);
4074 if (sizeof(union cdu_context) != 1024)
4075 /* we currently assume that a context is 1024 bytes */
4076 dev_alert(&bp->pdev->dev, "please adjust the size "
4077 "of cdu_context(%ld)\n",
4078 (long)sizeof(union cdu_context));
4080 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4081 val = (4 << 24) + (0 << 12) + 1024;
4082 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4084 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4085 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4086 /* enable context validation interrupt from CFC */
4087 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4089 /* set the thresholds to prevent CFC/CDU race */
4090 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4092 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4093 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4095 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4096 /* Reset PCIE errors for debug */
4097 REG_WR(bp, 0x2814, 0xffffffff);
4098 REG_WR(bp, 0x3820, 0xffffffff);
4100 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4101 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4102 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4103 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4105 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4106 if (CHIP_IS_E1H(bp)) {
4107 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4108 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4111 if (CHIP_REV_IS_SLOW(bp))
4114 /* finish CFC init */
4115 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4117 BNX2X_ERR("CFC LL_INIT failed\n");
4120 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4122 BNX2X_ERR("CFC AC_INIT failed\n");
4125 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4127 BNX2X_ERR("CFC CAM_INIT failed\n");
4130 REG_WR(bp, CFC_REG_DEBUG0, 0);
4132 /* read NIG statistic
4133 to see if this is our first up since powerup */
4134 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4135 val = *bnx2x_sp(bp, wb_data[0]);
4137 /* do internal memory self test */
4138 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4139 BNX2X_ERR("internal mem self test failed\n");
4143 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4147 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4148 bp->port.need_hw_lock = 1;
4155 bnx2x_setup_fan_failure_detection(bp);
4157 /* clear PXP2 attentions */
4158 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4160 enable_blocks_attention(bp);
4161 if (CHIP_PARITY_SUPPORTED(bp))
4162 enable_blocks_parity(bp);
4164 if (!BP_NOMCP(bp)) {
4165 bnx2x_acquire_phy_lock(bp);
4166 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4167 bnx2x_release_phy_lock(bp);
4169 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4174 static int bnx2x_init_port(struct bnx2x *bp)
4176 int port = BP_PORT(bp);
4177 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4181 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
4183 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4185 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4186 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4188 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4189 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4190 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4191 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4194 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4196 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4197 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4198 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4201 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4203 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4204 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4205 /* no pause for emulation and FPGA */
4210 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4211 else if (bp->dev->mtu > 4096) {
4212 if (bp->flags & ONE_PORT_FLAG)
4216 /* (24*1024 + val*4)/256 */
4217 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4220 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4221 high = low + 56; /* 14*1024/256 */
4223 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4224 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4227 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4229 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4230 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4231 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4232 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4234 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4235 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4236 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4237 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4239 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4240 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4242 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4244 /* configure PBF to work without PAUSE mtu 9000 */
4245 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4247 /* update threshold */
4248 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4249 /* update init credit */
4250 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4253 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4255 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4258 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4260 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4261 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4263 if (CHIP_IS_E1(bp)) {
4264 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4265 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4267 bnx2x_init_block(bp, HC_BLOCK, init_stage);
4269 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4270 /* init aeu_mask_attn_func_0/1:
4271 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4272 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4273 * bits 4-7 are used for "per vn group attention" */
4274 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4275 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4277 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4278 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4279 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4280 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4281 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4283 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4285 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4287 if (CHIP_IS_E1H(bp)) {
4288 /* 0x2 disable e1hov, 0x1 enable */
4289 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4290 (IS_E1HMF(bp) ? 0x1 : 0x2));
4293 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4294 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4295 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4299 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4300 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4302 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4303 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4305 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4307 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4308 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4310 /* The GPIO should be swapped if the swap register is
4312 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4313 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4315 /* Select function upon port-swap configuration */
4317 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4318 aeu_gpio_mask = (swap_val && swap_override) ?
4319 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4320 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4322 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4323 aeu_gpio_mask = (swap_val && swap_override) ?
4324 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4325 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4327 val = REG_RD(bp, offset);
4328 /* add GPIO3 to group */
4329 val |= aeu_gpio_mask;
4330 REG_WR(bp, offset, val);
4332 bp->port.need_hw_lock = 1;
4335 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4336 bp->port.need_hw_lock = 1;
4337 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4338 /* add SPIO 5 to group 0 */
4340 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4341 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4342 val = REG_RD(bp, reg_addr);
4343 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4344 REG_WR(bp, reg_addr, val);
4347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4348 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4349 bp->port.need_hw_lock = 1;
4355 bnx2x__link_reset(bp);
4360 #define ILT_PER_FUNC (768/2)
4361 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4362 /* the phys address is shifted right 12 bits and has an added
4363 1=valid bit added to the 53rd bit
4364 then since this is a wide register(TM)
4365 we split it into two 32 bit writes
4367 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4368 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4369 #define PXP_ONE_ILT(x) (((x) << 10) | x)
4370 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4373 #define CNIC_ILT_LINES 127
4374 #define CNIC_CTX_PER_ILT 16
4376 #define CNIC_ILT_LINES 0
4379 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4383 if (CHIP_IS_E1H(bp))
4384 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4386 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4388 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4391 static int bnx2x_init_func(struct bnx2x *bp)
4393 int port = BP_PORT(bp);
4394 int func = BP_FUNC(bp);
4398 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
4400 /* set MSI reconfigure capability */
4401 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4402 val = REG_RD(bp, addr);
4403 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4404 REG_WR(bp, addr, val);
4406 i = FUNC_ILT_BASE(func);
4408 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4409 if (CHIP_IS_E1H(bp)) {
4410 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4411 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4413 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4414 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4417 i += 1 + CNIC_ILT_LINES;
4418 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4420 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4422 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4423 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4427 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4429 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4431 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4432 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4436 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4438 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4440 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4441 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4444 /* tell the searcher where the T2 table is */
4445 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4447 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4448 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4450 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4451 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4452 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4454 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4457 if (CHIP_IS_E1H(bp)) {
4458 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4459 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4460 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4461 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4462 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4463 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4464 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4465 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4466 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4468 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4469 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4472 /* HC init per function */
4473 if (CHIP_IS_E1H(bp)) {
4474 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4476 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4477 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4479 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4481 /* Reset PCIE errors for debug */
4482 REG_WR(bp, 0x2114, 0xffffffff);
4483 REG_WR(bp, 0x2120, 0xffffffff);
4488 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4492 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4493 BP_FUNC(bp), load_code);
4496 mutex_init(&bp->dmae_mutex);
4497 rc = bnx2x_gunzip_init(bp);
4501 switch (load_code) {
4502 case FW_MSG_CODE_DRV_LOAD_COMMON:
4503 rc = bnx2x_init_common(bp);
4508 case FW_MSG_CODE_DRV_LOAD_PORT:
4510 rc = bnx2x_init_port(bp);
4515 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4517 rc = bnx2x_init_func(bp);
4523 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4527 if (!BP_NOMCP(bp)) {
4528 int func = BP_FUNC(bp);
4530 bp->fw_drv_pulse_wr_seq =
4531 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4532 DRV_PULSE_SEQ_MASK);
4533 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4536 /* this needs to be done before gunzip end */
4537 bnx2x_zero_def_sb(bp);
4538 for_each_queue(bp, i)
4539 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4541 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4545 bnx2x_gunzip_end(bp);
4550 void bnx2x_free_mem(struct bnx2x *bp)
4553 #define BNX2X_PCI_FREE(x, y, size) \
4556 dma_free_coherent(&bp->pdev->dev, size, x, y); \
4562 #define BNX2X_FREE(x) \
4574 for_each_queue(bp, i) {
4577 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4578 bnx2x_fp(bp, i, status_blk_mapping),
4579 sizeof(struct host_status_block));
4582 for_each_queue(bp, i) {
4584 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4585 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4586 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4587 bnx2x_fp(bp, i, rx_desc_mapping),
4588 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4590 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4591 bnx2x_fp(bp, i, rx_comp_mapping),
4592 sizeof(struct eth_fast_path_rx_cqe) *
4596 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4597 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4598 bnx2x_fp(bp, i, rx_sge_mapping),
4599 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4602 for_each_queue(bp, i) {
4604 /* fastpath tx rings: tx_buf tx_desc */
4605 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4606 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4607 bnx2x_fp(bp, i, tx_desc_mapping),
4608 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4610 /* end of fastpath */
4612 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4613 sizeof(struct host_def_status_block));
4615 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4616 sizeof(struct bnx2x_slowpath));
4619 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4620 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4621 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4622 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4623 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4624 sizeof(struct host_status_block));
4626 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4628 #undef BNX2X_PCI_FREE
4632 int bnx2x_alloc_mem(struct bnx2x *bp)
4635 #define BNX2X_PCI_ALLOC(x, y, size) \
4637 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4639 goto alloc_mem_err; \
4640 memset(x, 0, size); \
4643 #define BNX2X_ALLOC(x, size) \
4645 x = vmalloc(size); \
4647 goto alloc_mem_err; \
4648 memset(x, 0, size); \
4655 for_each_queue(bp, i) {
4656 bnx2x_fp(bp, i, bp) = bp;
4659 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4660 &bnx2x_fp(bp, i, status_blk_mapping),
4661 sizeof(struct host_status_block));
4664 for_each_queue(bp, i) {
4666 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4667 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4668 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4669 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4670 &bnx2x_fp(bp, i, rx_desc_mapping),
4671 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4673 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4674 &bnx2x_fp(bp, i, rx_comp_mapping),
4675 sizeof(struct eth_fast_path_rx_cqe) *
4679 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4680 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4681 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4682 &bnx2x_fp(bp, i, rx_sge_mapping),
4683 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4686 for_each_queue(bp, i) {
4688 /* fastpath tx rings: tx_buf tx_desc */
4689 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4690 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4691 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4692 &bnx2x_fp(bp, i, tx_desc_mapping),
4693 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4695 /* end of fastpath */
4697 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4698 sizeof(struct host_def_status_block));
4700 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4701 sizeof(struct bnx2x_slowpath));
4704 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4706 /* allocate searcher T2 table
4707 we allocate 1/4 of alloc num for T2
4708 (which is not entered into the ILT) */
4709 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4711 /* Initialize T2 (for 1024 connections) */
4712 for (i = 0; i < 16*1024; i += 64)
4713 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4715 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4716 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4718 /* QM queues (128*MAX_CONN) */
4719 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4721 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4722 sizeof(struct host_status_block));
4725 /* Slow path ring */
4726 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4734 #undef BNX2X_PCI_ALLOC
4740 * Init service functions
4744 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4746 * @param bp driver descriptor
4747 * @param set set or clear an entry (1 or 0)
4748 * @param mac pointer to a buffer containing a MAC
4749 * @param cl_bit_vec bit vector of clients to register a MAC for
4750 * @param cam_offset offset in a CAM to use
4751 * @param with_bcast set broadcast MAC as well
4753 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4754 u32 cl_bit_vec, u8 cam_offset,
4757 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4758 int port = BP_PORT(bp);
4761 * unicasts 0-31:port0 32-63:port1
4762 * multicast 64-127:port0 128-191:port1
4764 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4765 config->hdr.offset = cam_offset;
4766 config->hdr.client_id = 0xff;
4767 config->hdr.reserved1 = 0;
4770 config->config_table[0].cam_entry.msb_mac_addr =
4771 swab16(*(u16 *)&mac[0]);
4772 config->config_table[0].cam_entry.middle_mac_addr =
4773 swab16(*(u16 *)&mac[2]);
4774 config->config_table[0].cam_entry.lsb_mac_addr =
4775 swab16(*(u16 *)&mac[4]);
4776 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4778 config->config_table[0].target_table_entry.flags = 0;
4780 CAM_INVALIDATE(config->config_table[0]);
4781 config->config_table[0].target_table_entry.clients_bit_vector =
4782 cpu_to_le32(cl_bit_vec);
4783 config->config_table[0].target_table_entry.vlan_id = 0;
4785 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4786 (set ? "setting" : "clearing"),
4787 config->config_table[0].cam_entry.msb_mac_addr,
4788 config->config_table[0].cam_entry.middle_mac_addr,
4789 config->config_table[0].cam_entry.lsb_mac_addr);
4793 config->config_table[1].cam_entry.msb_mac_addr =
4794 cpu_to_le16(0xffff);
4795 config->config_table[1].cam_entry.middle_mac_addr =
4796 cpu_to_le16(0xffff);
4797 config->config_table[1].cam_entry.lsb_mac_addr =
4798 cpu_to_le16(0xffff);
4799 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4801 config->config_table[1].target_table_entry.flags =
4802 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4804 CAM_INVALIDATE(config->config_table[1]);
4805 config->config_table[1].target_table_entry.clients_bit_vector =
4806 cpu_to_le32(cl_bit_vec);
4807 config->config_table[1].target_table_entry.vlan_id = 0;
4810 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4811 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4812 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4816 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4818 * @param bp driver descriptor
4819 * @param set set or clear an entry (1 or 0)
4820 * @param mac pointer to a buffer containing a MAC
4821 * @param cl_bit_vec bit vector of clients to register a MAC for
4822 * @param cam_offset offset in a CAM to use
4824 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4825 u32 cl_bit_vec, u8 cam_offset)
4827 struct mac_configuration_cmd_e1h *config =
4828 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4830 config->hdr.length = 1;
4831 config->hdr.offset = cam_offset;
4832 config->hdr.client_id = 0xff;
4833 config->hdr.reserved1 = 0;
4836 config->config_table[0].msb_mac_addr =
4837 swab16(*(u16 *)&mac[0]);
4838 config->config_table[0].middle_mac_addr =
4839 swab16(*(u16 *)&mac[2]);
4840 config->config_table[0].lsb_mac_addr =
4841 swab16(*(u16 *)&mac[4]);
4842 config->config_table[0].clients_bit_vector =
4843 cpu_to_le32(cl_bit_vec);
4844 config->config_table[0].vlan_id = 0;
4845 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4847 config->config_table[0].flags = BP_PORT(bp);
4849 config->config_table[0].flags =
4850 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4852 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
4853 (set ? "setting" : "clearing"),
4854 config->config_table[0].msb_mac_addr,
4855 config->config_table[0].middle_mac_addr,
4856 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4858 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4859 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4860 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4863 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4864 int *state_p, int poll)
4866 /* can take a while if any port is running */
4869 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4870 poll ? "polling" : "waiting", state, idx);
4875 bnx2x_rx_int(bp->fp, 10);
4876 /* if index is different from 0
4877 * the reply for some commands will
4878 * be on the non default queue
4881 bnx2x_rx_int(&bp->fp[idx], 10);
4884 mb(); /* state is changed by bnx2x_sp_event() */
4885 if (*state_p == state) {
4886 #ifdef BNX2X_STOP_ON_ERROR
4887 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4899 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4900 poll ? "polling" : "waiting", state, idx);
4901 #ifdef BNX2X_STOP_ON_ERROR
4908 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4910 bp->set_mac_pending++;
4913 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4914 (1 << bp->fp->cl_id), BP_FUNC(bp));
4916 /* Wait for a completion */
4917 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4920 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4922 bp->set_mac_pending++;
4925 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4926 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4929 /* Wait for a completion */
4930 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4935 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4936 * MAC(s). This function will wait until the ramdord completion
4939 * @param bp driver handle
4940 * @param set set or clear the CAM entry
4942 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4944 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4946 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4948 bp->set_mac_pending++;
4951 /* Send a SET_MAC ramrod */
4953 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4954 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4957 /* CAM allocation for E1H
4958 * unicasts: by func number
4959 * multicast: 20+FUNC*20, 20 each
4961 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4962 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4964 /* Wait for a completion when setting */
4965 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4971 int bnx2x_setup_leading(struct bnx2x *bp)
4975 /* reset IGU state */
4976 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4979 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4981 /* Wait for completion */
4982 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4987 int bnx2x_setup_multi(struct bnx2x *bp, int index)
4989 struct bnx2x_fastpath *fp = &bp->fp[index];
4991 /* reset IGU state */
4992 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4995 fp->state = BNX2X_FP_STATE_OPENING;
4996 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4999 /* Wait for completion */
5000 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
5005 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
5008 switch (bp->multi_mode) {
5009 case ETH_RSS_MODE_DISABLED:
5013 case ETH_RSS_MODE_REGULAR:
5015 bp->num_queues = min_t(u32, num_queues,
5016 BNX2X_MAX_QUEUES(bp));
5018 bp->num_queues = min_t(u32, num_online_cpus(),
5019 BNX2X_MAX_QUEUES(bp));
5031 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5033 struct bnx2x_fastpath *fp = &bp->fp[index];
5036 /* halt the connection */
5037 fp->state = BNX2X_FP_STATE_HALTING;
5038 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5040 /* Wait for completion */
5041 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5043 if (rc) /* timeout */
5046 /* delete cfc entry */
5047 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5049 /* Wait for completion */
5050 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5055 static int bnx2x_stop_leading(struct bnx2x *bp)
5057 __le16 dsb_sp_prod_idx;
5058 /* if the other port is handling traffic,
5059 this can take a lot of time */
5065 /* Send HALT ramrod */
5066 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5067 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5069 /* Wait for completion */
5070 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5071 &(bp->fp[0].state), 1);
5072 if (rc) /* timeout */
5075 dsb_sp_prod_idx = *bp->dsb_sp_prod;
5077 /* Send PORT_DELETE ramrod */
5078 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5080 /* Wait for completion to arrive on default status block
5081 we are going to reset the chip anyway
5082 so there is not much to do if this times out
5084 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5086 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5087 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5088 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5089 #ifdef BNX2X_STOP_ON_ERROR
5097 rmb(); /* Refresh the dsb_sp_prod */
5099 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5100 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5105 static void bnx2x_reset_func(struct bnx2x *bp)
5107 int port = BP_PORT(bp);
5108 int func = BP_FUNC(bp);
5112 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5113 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5116 /* Disable Timer scan */
5117 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5119 * Wait for at least 10ms and up to 2 second for the timers scan to
5122 for (i = 0; i < 200; i++) {
5124 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5129 base = FUNC_ILT_BASE(func);
5130 for (i = base; i < base + ILT_PER_FUNC; i++)
5131 bnx2x_ilt_wr(bp, i, 0);
5134 static void bnx2x_reset_port(struct bnx2x *bp)
5136 int port = BP_PORT(bp);
5139 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5141 /* Do not rcv packets to BRB */
5142 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5143 /* Do not direct rcv packets that are not for MCP to the BRB */
5144 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5145 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5148 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5151 /* Check for BRB port occupancy */
5152 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5154 DP(NETIF_MSG_IFDOWN,
5155 "BRB1 is not empty %d blocks are occupied\n", val);
5157 /* TODO: Close Doorbell port? */
5160 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5162 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5163 BP_FUNC(bp), reset_code);
5165 switch (reset_code) {
5166 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5167 bnx2x_reset_port(bp);
5168 bnx2x_reset_func(bp);
5169 bnx2x_reset_common(bp);
5172 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5173 bnx2x_reset_port(bp);
5174 bnx2x_reset_func(bp);
5177 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5178 bnx2x_reset_func(bp);
5182 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5187 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5189 int port = BP_PORT(bp);
5193 /* Wait until tx fastpath tasks complete */
5194 for_each_queue(bp, i) {
5195 struct bnx2x_fastpath *fp = &bp->fp[i];
5198 while (bnx2x_has_tx_work_unload(fp)) {
5202 BNX2X_ERR("timeout waiting for queue[%d]\n",
5204 #ifdef BNX2X_STOP_ON_ERROR
5215 /* Give HW time to discard old tx messages */
5218 if (CHIP_IS_E1(bp)) {
5219 struct mac_configuration_cmd *config =
5220 bnx2x_sp(bp, mcast_config);
5222 bnx2x_set_eth_mac_addr_e1(bp, 0);
5224 for (i = 0; i < config->hdr.length; i++)
5225 CAM_INVALIDATE(config->config_table[i]);
5227 config->hdr.length = i;
5228 if (CHIP_REV_IS_SLOW(bp))
5229 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5231 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5232 config->hdr.client_id = bp->fp->cl_id;
5233 config->hdr.reserved1 = 0;
5235 bp->set_mac_pending++;
5238 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5239 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5240 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5243 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5245 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5247 for (i = 0; i < MC_HASH_SIZE; i++)
5248 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5250 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5253 /* Clear iSCSI L2 MAC */
5254 mutex_lock(&bp->cnic_mutex);
5255 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5256 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5257 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5259 mutex_unlock(&bp->cnic_mutex);
5262 if (unload_mode == UNLOAD_NORMAL)
5263 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5265 else if (bp->flags & NO_WOL_FLAG)
5266 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5269 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5270 u8 *mac_addr = bp->dev->dev_addr;
5272 /* The mac address is written to entries 1-4 to
5273 preserve entry 0 which is used by the PMF */
5274 u8 entry = (BP_E1HVN(bp) + 1)*8;
5276 val = (mac_addr[0] << 8) | mac_addr[1];
5277 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5279 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5280 (mac_addr[4] << 8) | mac_addr[5];
5281 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5283 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5286 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5288 /* Close multi and leading connections
5289 Completions for ramrods are collected in a synchronous way */
5290 for_each_nondefault_queue(bp, i)
5291 if (bnx2x_stop_multi(bp, i))
5294 rc = bnx2x_stop_leading(bp);
5296 BNX2X_ERR("Stop leading failed!\n");
5297 #ifdef BNX2X_STOP_ON_ERROR
5306 reset_code = bnx2x_fw_command(bp, reset_code);
5308 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
5309 load_count[0], load_count[1], load_count[2]);
5311 load_count[1 + port]--;
5312 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
5313 load_count[0], load_count[1], load_count[2]);
5314 if (load_count[0] == 0)
5315 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5316 else if (load_count[1 + port] == 0)
5317 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5319 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5322 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5323 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5324 bnx2x__link_reset(bp);
5326 /* Reset the chip */
5327 bnx2x_reset_chip(bp, reset_code);
5329 /* Report UNLOAD_DONE to MCP */
5331 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5335 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5339 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5341 if (CHIP_IS_E1(bp)) {
5342 int port = BP_PORT(bp);
5343 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5344 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5346 val = REG_RD(bp, addr);
5348 REG_WR(bp, addr, val);
5349 } else if (CHIP_IS_E1H(bp)) {
5350 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5351 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5352 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5353 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5358 /* Close gates #2, #3 and #4: */
5359 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5363 /* Gates #2 and #4a are closed/opened for "not E1" only */
5364 if (!CHIP_IS_E1(bp)) {
5366 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5367 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5368 close ? (val | 0x1) : (val & (~(u32)1)));
5370 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5371 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5372 close ? (val | 0x1) : (val & (~(u32)1)));
5376 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5377 val = REG_RD(bp, addr);
5378 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5380 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5381 close ? "closing" : "opening");
5385 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5387 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5389 /* Do some magic... */
5390 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5391 *magic_val = val & SHARED_MF_CLP_MAGIC;
5392 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5395 /* Restore the value of the `magic' bit.
5397 * @param pdev Device handle.
5398 * @param magic_val Old value of the `magic' bit.
5400 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5402 /* Restore the `magic' bit value... */
5403 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5404 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5405 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5406 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5407 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5408 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5411 /* Prepares for MCP reset: takes care of CLP configurations.
5414 * @param magic_val Old value of 'magic' bit.
5416 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5419 u32 validity_offset;
5421 DP(NETIF_MSG_HW, "Starting\n");
5423 /* Set `magic' bit in order to save MF config */
5424 if (!CHIP_IS_E1(bp))
5425 bnx2x_clp_reset_prep(bp, magic_val);
5427 /* Get shmem offset */
5428 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5429 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5431 /* Clear validity map flags */
5433 REG_WR(bp, shmem + validity_offset, 0);
5436 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5437 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
5439 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5440 * depending on the HW type.
5444 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5446 /* special handling for emulation and FPGA,
5447 wait 10 times longer */
5448 if (CHIP_REV_IS_SLOW(bp))
5449 msleep(MCP_ONE_TIMEOUT*10);
5451 msleep(MCP_ONE_TIMEOUT);
5454 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5456 u32 shmem, cnt, validity_offset, val;
5461 /* Get shmem offset */
5462 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5464 BNX2X_ERR("Shmem 0 return failure\n");
5469 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5471 /* Wait for MCP to come up */
5472 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5473 /* TBD: its best to check validity map of last port.
5474 * currently checks on port 0.
5476 val = REG_RD(bp, shmem + validity_offset);
5477 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5478 shmem + validity_offset, val);
5480 /* check that shared memory is valid. */
5481 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5482 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5485 bnx2x_mcp_wait_one(bp);
5488 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5490 /* Check that shared memory is valid. This indicates that MCP is up. */
5491 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5492 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5493 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5499 /* Restore the `magic' bit value */
5500 if (!CHIP_IS_E1(bp))
5501 bnx2x_clp_reset_done(bp, magic_val);
5506 static void bnx2x_pxp_prep(struct bnx2x *bp)
5508 if (!CHIP_IS_E1(bp)) {
5509 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5510 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5511 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5517 * Reset the whole chip except for:
5519 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5522 * - MISC (including AEU)
5526 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5528 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5531 MISC_REGISTERS_RESET_REG_1_RST_HC |
5532 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5533 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5536 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5537 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5538 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5539 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5540 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5541 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5542 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5543 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5545 reset_mask1 = 0xffffffff;
5548 reset_mask2 = 0xffff;
5550 reset_mask2 = 0x1ffff;
5552 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5553 reset_mask1 & (~not_reset_mask1));
5554 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5555 reset_mask2 & (~not_reset_mask2));
5560 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5561 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5565 static int bnx2x_process_kill(struct bnx2x *bp)
5569 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5572 /* Empty the Tetris buffer, wait for 1s */
5574 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5575 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5576 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5577 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5578 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5579 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5580 ((port_is_idle_0 & 0x1) == 0x1) &&
5581 ((port_is_idle_1 & 0x1) == 0x1) &&
5582 (pgl_exp_rom2 == 0xffffffff))
5585 } while (cnt-- > 0);
5588 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5590 " outstanding read requests after 1s!\n");
5591 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5592 " port_is_idle_0=0x%08x,"
5593 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5594 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5601 /* Close gates #2, #3 and #4 */
5602 bnx2x_set_234_gates(bp, true);
5604 /* TBD: Indicate that "process kill" is in progress to MCP */
5606 /* Clear "unprepared" bit */
5607 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5610 /* Make sure all is written to the chip before the reset */
5613 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5614 * PSWHST, GRC and PSWRD Tetris buffer.
5618 /* Prepare to chip reset: */
5620 bnx2x_reset_mcp_prep(bp, &val);
5626 /* reset the chip */
5627 bnx2x_process_kill_chip_reset(bp);
5630 /* Recover after reset: */
5632 if (bnx2x_reset_mcp_comp(bp, val))
5638 /* Open the gates #2, #3 and #4 */
5639 bnx2x_set_234_gates(bp, false);
5641 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5642 * reset state, re-enable attentions. */
5647 static int bnx2x_leader_reset(struct bnx2x *bp)
5650 /* Try to recover after the failure */
5651 if (bnx2x_process_kill(bp)) {
5652 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5655 goto exit_leader_reset;
5658 /* Clear "reset is in progress" bit and update the driver state */
5659 bnx2x_set_reset_done(bp);
5660 bp->recovery_state = BNX2X_RECOVERY_DONE;
5664 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5669 /* Assumption: runs under rtnl lock. This together with the fact
5670 * that it's called only from bnx2x_reset_task() ensure that it
5671 * will never be called when netif_running(bp->dev) is false.
5673 static void bnx2x_parity_recover(struct bnx2x *bp)
5675 DP(NETIF_MSG_HW, "Handling parity\n");
5677 switch (bp->recovery_state) {
5678 case BNX2X_RECOVERY_INIT:
5679 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5680 /* Try to get a LEADER_LOCK HW lock */
5681 if (bnx2x_trylock_hw_lock(bp,
5682 HW_LOCK_RESOURCE_RESERVED_08))
5685 /* Stop the driver */
5686 /* If interface has been removed - break */
5687 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5690 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5691 /* Ensure "is_leader" and "recovery_state"
5692 * update values are seen on other CPUs
5697 case BNX2X_RECOVERY_WAIT:
5698 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5699 if (bp->is_leader) {
5700 u32 load_counter = bnx2x_get_load_cnt(bp);
5702 /* Wait until all other functions get
5705 schedule_delayed_work(&bp->reset_task,
5709 /* If all other functions got down -
5710 * try to bring the chip back to
5711 * normal. In any case it's an exit
5712 * point for a leader.
5714 if (bnx2x_leader_reset(bp) ||
5715 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5716 printk(KERN_ERR"%s: Recovery "
5717 "has failed. Power cycle is "
5718 "needed.\n", bp->dev->name);
5719 /* Disconnect this device */
5720 netif_device_detach(bp->dev);
5721 /* Block ifup for all function
5722 * of this ASIC until
5723 * "process kill" or power
5726 bnx2x_set_reset_in_progress(bp);
5727 /* Shut down the power */
5728 bnx2x_set_power_state(bp,
5735 } else { /* non-leader */
5736 if (!bnx2x_reset_is_done(bp)) {
5737 /* Try to get a LEADER_LOCK HW lock as
5738 * long as a former leader may have
5739 * been unloaded by the user or
5740 * released a leadership by another
5743 if (bnx2x_trylock_hw_lock(bp,
5744 HW_LOCK_RESOURCE_RESERVED_08)) {
5745 /* I'm a leader now! Restart a
5752 schedule_delayed_work(&bp->reset_task,
5756 } else { /* A leader has completed
5757 * the "process kill". It's an exit
5758 * point for a non-leader.
5760 bnx2x_nic_load(bp, LOAD_NORMAL);
5761 bp->recovery_state =
5762 BNX2X_RECOVERY_DONE;
5773 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5774 * scheduled on a general queue in order to prevent a dead lock.
5776 static void bnx2x_reset_task(struct work_struct *work)
5778 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5780 #ifdef BNX2X_STOP_ON_ERROR
5781 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5782 " so reset not done to allow debug dump,\n"
5783 KERN_ERR " you will need to reboot when done\n");
5789 if (!netif_running(bp->dev))
5790 goto reset_task_exit;
5792 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5793 bnx2x_parity_recover(bp);
5795 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5796 bnx2x_nic_load(bp, LOAD_NORMAL);
5803 /* end of nic load/unload */
5806 * Init service functions
5809 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5812 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5813 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5814 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5815 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5816 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5817 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5818 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5819 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5821 BNX2X_ERR("Unsupported function index: %d\n", func);
5826 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5828 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5830 /* Flush all outstanding writes */
5833 /* Pretend to be function 0 */
5835 /* Flush the GRC transaction (in the chip) */
5836 new_val = REG_RD(bp, reg);
5838 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5843 /* From now we are in the "like-E1" mode */
5844 bnx2x_int_disable(bp);
5846 /* Flush all outstanding writes */
5849 /* Restore the original funtion settings */
5850 REG_WR(bp, reg, orig_func);
5851 new_val = REG_RD(bp, reg);
5852 if (new_val != orig_func) {
5853 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5854 orig_func, new_val);
5859 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5861 if (CHIP_IS_E1H(bp))
5862 bnx2x_undi_int_disable_e1h(bp, func);
5864 bnx2x_int_disable(bp);
5867 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5871 /* Check if there is any driver already loaded */
5872 val = REG_RD(bp, MISC_REG_UNPREPARED);
5874 /* Check if it is the UNDI driver
5875 * UNDI driver initializes CID offset for normal bell to 0x7
5877 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5878 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5880 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5882 int func = BP_FUNC(bp);
5886 /* clear the UNDI indication */
5887 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5889 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5891 /* try unload UNDI on port 0 */
5894 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5895 DRV_MSG_SEQ_NUMBER_MASK);
5896 reset_code = bnx2x_fw_command(bp, reset_code);
5898 /* if UNDI is loaded on the other port */
5899 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5901 /* send "DONE" for previous unload */
5902 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5904 /* unload UNDI on port 1 */
5907 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5908 DRV_MSG_SEQ_NUMBER_MASK);
5909 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5911 bnx2x_fw_command(bp, reset_code);
5914 /* now it's safe to release the lock */
5915 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5917 bnx2x_undi_int_disable(bp, func);
5919 /* close input traffic and wait for it */
5920 /* Do not rcv packets to BRB */
5922 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5923 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5924 /* Do not direct rcv packets that are not for MCP to
5927 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5928 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5931 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5932 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5935 /* save NIG port swap info */
5936 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5937 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5940 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5943 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5945 /* take the NIG out of reset and restore swap values */
5947 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5948 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5949 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5950 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5952 /* send unload done to the MCP */
5953 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5955 /* restore our func and fw_seq */
5958 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5959 DRV_MSG_SEQ_NUMBER_MASK);
5962 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5966 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5968 u32 val, val2, val3, val4, id;
5971 /* Get the chip revision id and number. */
5972 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5973 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5974 id = ((val & 0xffff) << 16);
5975 val = REG_RD(bp, MISC_REG_CHIP_REV);
5976 id |= ((val & 0xf) << 12);
5977 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5978 id |= ((val & 0xff) << 4);
5979 val = REG_RD(bp, MISC_REG_BOND_ID);
5981 bp->common.chip_id = id;
5982 bp->link_params.chip_id = bp->common.chip_id;
5983 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5985 val = (REG_RD(bp, 0x2874) & 0x55);
5986 if ((bp->common.chip_id & 0x1) ||
5987 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5988 bp->flags |= ONE_PORT_FLAG;
5989 BNX2X_DEV_INFO("single port device\n");
5992 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5993 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5994 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5995 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5996 bp->common.flash_size, bp->common.flash_size);
5998 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5999 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
6000 bp->link_params.shmem_base = bp->common.shmem_base;
6001 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6002 bp->common.shmem_base, bp->common.shmem2_base);
6004 if (!bp->common.shmem_base ||
6005 (bp->common.shmem_base < 0xA0000) ||
6006 (bp->common.shmem_base >= 0xC0000)) {
6007 BNX2X_DEV_INFO("MCP not active\n");
6008 bp->flags |= NO_MCP_FLAG;
6012 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6013 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6014 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6015 BNX2X_ERROR("BAD MCP validity signature\n");
6017 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6018 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6020 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6021 SHARED_HW_CFG_LED_MODE_MASK) >>
6022 SHARED_HW_CFG_LED_MODE_SHIFT);
6024 bp->link_params.feature_config_flags = 0;
6025 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6026 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6027 bp->link_params.feature_config_flags |=
6028 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6030 bp->link_params.feature_config_flags &=
6031 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6033 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6034 bp->common.bc_ver = val;
6035 BNX2X_DEV_INFO("bc_ver %X\n", val);
6036 if (val < BNX2X_BC_VER) {
6037 /* for now only warn
6038 * later we might need to enforce this */
6039 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6040 "please upgrade BC\n", BNX2X_BC_VER, val);
6042 bp->link_params.feature_config_flags |=
6043 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6044 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6046 if (BP_E1HVN(bp) == 0) {
6047 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6048 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6050 /* no WOL capability for E1HVN != 0 */
6051 bp->flags |= NO_WOL_FLAG;
6053 BNX2X_DEV_INFO("%sWoL capable\n",
6054 (bp->flags & NO_WOL_FLAG) ? "not " : "");
6056 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6057 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6058 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6059 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6061 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6062 val, val2, val3, val4);
6065 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6068 int port = BP_PORT(bp);
6071 switch (switch_cfg) {
6073 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6076 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6077 switch (ext_phy_type) {
6078 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6079 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6082 bp->port.supported |= (SUPPORTED_10baseT_Half |
6083 SUPPORTED_10baseT_Full |
6084 SUPPORTED_100baseT_Half |
6085 SUPPORTED_100baseT_Full |
6086 SUPPORTED_1000baseT_Full |
6087 SUPPORTED_2500baseX_Full |
6092 SUPPORTED_Asym_Pause);
6095 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6096 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6099 bp->port.supported |= (SUPPORTED_10baseT_Half |
6100 SUPPORTED_10baseT_Full |
6101 SUPPORTED_100baseT_Half |
6102 SUPPORTED_100baseT_Full |
6103 SUPPORTED_1000baseT_Full |
6108 SUPPORTED_Asym_Pause);
6112 BNX2X_ERR("NVRAM config error. "
6113 "BAD SerDes ext_phy_config 0x%x\n",
6114 bp->link_params.ext_phy_config);
6118 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6120 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6123 case SWITCH_CFG_10G:
6124 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6127 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6128 switch (ext_phy_type) {
6129 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6130 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6133 bp->port.supported |= (SUPPORTED_10baseT_Half |
6134 SUPPORTED_10baseT_Full |
6135 SUPPORTED_100baseT_Half |
6136 SUPPORTED_100baseT_Full |
6137 SUPPORTED_1000baseT_Full |
6138 SUPPORTED_2500baseX_Full |
6139 SUPPORTED_10000baseT_Full |
6144 SUPPORTED_Asym_Pause);
6147 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6148 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6151 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6152 SUPPORTED_1000baseT_Full |
6156 SUPPORTED_Asym_Pause);
6159 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6160 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6163 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6164 SUPPORTED_2500baseX_Full |
6165 SUPPORTED_1000baseT_Full |
6169 SUPPORTED_Asym_Pause);
6172 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6173 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6176 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6179 SUPPORTED_Asym_Pause);
6182 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6183 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6186 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6187 SUPPORTED_1000baseT_Full |
6190 SUPPORTED_Asym_Pause);
6193 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6194 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
6197 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6198 SUPPORTED_1000baseT_Full |
6202 SUPPORTED_Asym_Pause);
6205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6206 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6209 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6210 SUPPORTED_1000baseT_Full |
6214 SUPPORTED_Asym_Pause);
6217 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6218 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6221 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6225 SUPPORTED_Asym_Pause);
6228 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6229 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6230 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM848xx)\n",
6233 bp->port.supported |= (SUPPORTED_10baseT_Half |
6234 SUPPORTED_10baseT_Full |
6235 SUPPORTED_100baseT_Half |
6236 SUPPORTED_100baseT_Full |
6237 SUPPORTED_1000baseT_Full |
6238 SUPPORTED_10000baseT_Full |
6242 SUPPORTED_Asym_Pause);
6245 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6246 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6247 bp->link_params.ext_phy_config);
6251 BNX2X_ERR("NVRAM config error. "
6252 "BAD XGXS ext_phy_config 0x%x\n",
6253 bp->link_params.ext_phy_config);
6257 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6259 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6264 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6265 bp->port.link_config);
6268 bp->link_params.phy_addr = bp->port.phy_addr;
6270 /* mask what we support according to speed_cap_mask */
6271 if (!(bp->link_params.speed_cap_mask &
6272 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6273 bp->port.supported &= ~SUPPORTED_10baseT_Half;
6275 if (!(bp->link_params.speed_cap_mask &
6276 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6277 bp->port.supported &= ~SUPPORTED_10baseT_Full;
6279 if (!(bp->link_params.speed_cap_mask &
6280 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6281 bp->port.supported &= ~SUPPORTED_100baseT_Half;
6283 if (!(bp->link_params.speed_cap_mask &
6284 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6285 bp->port.supported &= ~SUPPORTED_100baseT_Full;
6287 if (!(bp->link_params.speed_cap_mask &
6288 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6289 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6290 SUPPORTED_1000baseT_Full);
6292 if (!(bp->link_params.speed_cap_mask &
6293 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6294 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
6296 if (!(bp->link_params.speed_cap_mask &
6297 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6298 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
6300 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
6303 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6305 bp->link_params.req_duplex = DUPLEX_FULL;
6307 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6308 case PORT_FEATURE_LINK_SPEED_AUTO:
6309 if (bp->port.supported & SUPPORTED_Autoneg) {
6310 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6311 bp->port.advertising = bp->port.supported;
6314 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6316 if ((ext_phy_type ==
6317 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6319 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6320 /* force 10G, no AN */
6321 bp->link_params.req_line_speed = SPEED_10000;
6322 bp->port.advertising =
6323 (ADVERTISED_10000baseT_Full |
6327 BNX2X_ERR("NVRAM config error. "
6328 "Invalid link_config 0x%x"
6329 " Autoneg not supported\n",
6330 bp->port.link_config);
6335 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6336 if (bp->port.supported & SUPPORTED_10baseT_Full) {
6337 bp->link_params.req_line_speed = SPEED_10;
6338 bp->port.advertising = (ADVERTISED_10baseT_Full |
6341 BNX2X_ERROR("NVRAM config error. "
6342 "Invalid link_config 0x%x"
6343 " speed_cap_mask 0x%x\n",
6344 bp->port.link_config,
6345 bp->link_params.speed_cap_mask);
6350 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6351 if (bp->port.supported & SUPPORTED_10baseT_Half) {
6352 bp->link_params.req_line_speed = SPEED_10;
6353 bp->link_params.req_duplex = DUPLEX_HALF;
6354 bp->port.advertising = (ADVERTISED_10baseT_Half |
6357 BNX2X_ERROR("NVRAM config error. "
6358 "Invalid link_config 0x%x"
6359 " speed_cap_mask 0x%x\n",
6360 bp->port.link_config,
6361 bp->link_params.speed_cap_mask);
6366 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6367 if (bp->port.supported & SUPPORTED_100baseT_Full) {
6368 bp->link_params.req_line_speed = SPEED_100;
6369 bp->port.advertising = (ADVERTISED_100baseT_Full |
6372 BNX2X_ERROR("NVRAM config error. "
6373 "Invalid link_config 0x%x"
6374 " speed_cap_mask 0x%x\n",
6375 bp->port.link_config,
6376 bp->link_params.speed_cap_mask);
6381 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6382 if (bp->port.supported & SUPPORTED_100baseT_Half) {
6383 bp->link_params.req_line_speed = SPEED_100;
6384 bp->link_params.req_duplex = DUPLEX_HALF;
6385 bp->port.advertising = (ADVERTISED_100baseT_Half |
6388 BNX2X_ERROR("NVRAM config error. "
6389 "Invalid link_config 0x%x"
6390 " speed_cap_mask 0x%x\n",
6391 bp->port.link_config,
6392 bp->link_params.speed_cap_mask);
6397 case PORT_FEATURE_LINK_SPEED_1G:
6398 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
6399 bp->link_params.req_line_speed = SPEED_1000;
6400 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6403 BNX2X_ERROR("NVRAM config error. "
6404 "Invalid link_config 0x%x"
6405 " speed_cap_mask 0x%x\n",
6406 bp->port.link_config,
6407 bp->link_params.speed_cap_mask);
6412 case PORT_FEATURE_LINK_SPEED_2_5G:
6413 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
6414 bp->link_params.req_line_speed = SPEED_2500;
6415 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6418 BNX2X_ERROR("NVRAM config error. "
6419 "Invalid link_config 0x%x"
6420 " speed_cap_mask 0x%x\n",
6421 bp->port.link_config,
6422 bp->link_params.speed_cap_mask);
6427 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6428 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6429 case PORT_FEATURE_LINK_SPEED_10G_KR:
6430 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
6431 bp->link_params.req_line_speed = SPEED_10000;
6432 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6435 BNX2X_ERROR("NVRAM config error. "
6436 "Invalid link_config 0x%x"
6437 " speed_cap_mask 0x%x\n",
6438 bp->port.link_config,
6439 bp->link_params.speed_cap_mask);
6445 BNX2X_ERROR("NVRAM config error. "
6446 "BAD link speed link_config 0x%x\n",
6447 bp->port.link_config);
6448 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6449 bp->port.advertising = bp->port.supported;
6453 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6454 PORT_FEATURE_FLOW_CONTROL_MASK);
6455 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
6456 !(bp->port.supported & SUPPORTED_Autoneg))
6457 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6459 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
6460 " advertising 0x%x\n",
6461 bp->link_params.req_line_speed,
6462 bp->link_params.req_duplex,
6463 bp->link_params.req_flow_ctrl, bp->port.advertising);
6466 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6468 mac_hi = cpu_to_be16(mac_hi);
6469 mac_lo = cpu_to_be32(mac_lo);
6470 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6471 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6474 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6476 int port = BP_PORT(bp);
6482 bp->link_params.bp = bp;
6483 bp->link_params.port = port;
6485 bp->link_params.lane_config =
6486 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6487 bp->link_params.ext_phy_config =
6489 dev_info.port_hw_config[port].external_phy_config);
6490 /* BCM8727_NOC => BCM8727 no over current */
6491 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6492 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6493 bp->link_params.ext_phy_config &=
6494 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6495 bp->link_params.ext_phy_config |=
6496 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6497 bp->link_params.feature_config_flags |=
6498 FEATURE_CONFIG_BCM8727_NOC;
6501 bp->link_params.speed_cap_mask =
6503 dev_info.port_hw_config[port].speed_capability_mask);
6505 bp->port.link_config =
6506 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6508 /* Get the 4 lanes xgxs config rx and tx */
6509 for (i = 0; i < 2; i++) {
6511 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6512 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6513 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6516 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6517 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6518 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6521 /* If the device is capable of WoL, set the default state according
6524 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6525 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6526 (config & PORT_FEATURE_WOL_ENABLED));
6528 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
6529 " speed_cap_mask 0x%08x link_config 0x%08x\n",
6530 bp->link_params.lane_config,
6531 bp->link_params.ext_phy_config,
6532 bp->link_params.speed_cap_mask, bp->port.link_config);
6534 bp->link_params.switch_cfg |= (bp->port.link_config &
6535 PORT_FEATURE_CONNECTED_SWITCH_MASK);
6536 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6538 bnx2x_link_settings_requested(bp);
6541 * If connected directly, work with the internal PHY, otherwise, work
6542 * with the external PHY
6544 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6545 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6546 bp->mdio.prtad = bp->link_params.phy_addr;
6548 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6549 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6551 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
6553 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6554 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6555 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6556 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6557 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6560 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6561 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6562 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6566 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6568 int func = BP_FUNC(bp);
6572 bnx2x_get_common_hwinfo(bp);
6576 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6578 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6580 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6581 FUNC_MF_CFG_E1HOV_TAG_MASK);
6582 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6584 BNX2X_DEV_INFO("%s function mode\n",
6585 IS_E1HMF(bp) ? "multi" : "single");
6588 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6590 FUNC_MF_CFG_E1HOV_TAG_MASK);
6591 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6593 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6595 func, bp->e1hov, bp->e1hov);
6597 BNX2X_ERROR("No valid E1HOV for func %d,"
6598 " aborting\n", func);
6603 BNX2X_ERROR("VN %d in single function mode,"
6604 " aborting\n", BP_E1HVN(bp));
6610 if (!BP_NOMCP(bp)) {
6611 bnx2x_get_port_hwinfo(bp);
6613 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6614 DRV_MSG_SEQ_NUMBER_MASK);
6615 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6619 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6620 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6621 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6622 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6623 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6624 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6625 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6626 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6627 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6628 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6629 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6631 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6639 /* only supposed to happen on emulation/FPGA */
6640 BNX2X_ERROR("warning: random MAC workaround active\n");
6641 random_ether_addr(bp->dev->dev_addr);
6642 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6648 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6650 int cnt, i, block_end, rodi;
6651 char vpd_data[BNX2X_VPD_LEN+1];
6652 char str_id_reg[VENDOR_ID_LEN+1];
6653 char str_id_cap[VENDOR_ID_LEN+1];
6656 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6657 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6659 if (cnt < BNX2X_VPD_LEN)
6662 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6663 PCI_VPD_LRDT_RO_DATA);
6668 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6669 pci_vpd_lrdt_size(&vpd_data[i]);
6671 i += PCI_VPD_LRDT_TAG_SIZE;
6673 if (block_end > BNX2X_VPD_LEN)
6676 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6677 PCI_VPD_RO_KEYWORD_MFR_ID);
6681 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6683 if (len != VENDOR_ID_LEN)
6686 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6688 /* vendor specific info */
6689 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6690 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6691 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6692 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6694 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6695 PCI_VPD_RO_KEYWORD_VENDOR0);
6697 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6699 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6701 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6702 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6703 bp->fw_ver[len] = ' ';
6712 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6714 int func = BP_FUNC(bp);
6718 /* Disable interrupt handling until HW is initialized */
6719 atomic_set(&bp->intr_sem, 1);
6720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6722 mutex_init(&bp->port.phy_mutex);
6723 mutex_init(&bp->fw_mb_mutex);
6724 spin_lock_init(&bp->stats_lock);
6726 mutex_init(&bp->cnic_mutex);
6729 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6730 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6732 rc = bnx2x_get_hwinfo(bp);
6734 bnx2x_read_fwinfo(bp);
6735 /* need to reset chip if undi was active */
6737 bnx2x_undi_unload(bp);
6739 if (CHIP_REV_IS_FPGA(bp))
6740 dev_err(&bp->pdev->dev, "FPGA detected\n");
6742 if (BP_NOMCP(bp) && (func == 0))
6743 dev_err(&bp->pdev->dev, "MCP disabled, "
6744 "must load devices in order!\n");
6746 /* Set multi queue mode */
6747 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6748 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6749 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6750 "requested is not MSI-X\n");
6751 multi_mode = ETH_RSS_MODE_DISABLED;
6753 bp->multi_mode = multi_mode;
6754 bp->int_mode = int_mode;
6756 bp->dev->features |= NETIF_F_GRO;
6760 bp->flags &= ~TPA_ENABLE_FLAG;
6761 bp->dev->features &= ~NETIF_F_LRO;
6763 bp->flags |= TPA_ENABLE_FLAG;
6764 bp->dev->features |= NETIF_F_LRO;
6766 bp->disable_tpa = disable_tpa;
6769 bp->dropless_fc = 0;
6771 bp->dropless_fc = dropless_fc;
6775 bp->tx_ring_size = MAX_TX_AVAIL;
6776 bp->rx_ring_size = MAX_RX_AVAIL;
6780 /* make sure that the numbers are in the right granularity */
6781 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6782 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6784 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6785 bp->current_interval = (poll ? poll : timer_interval);
6787 init_timer(&bp->timer);
6788 bp->timer.expires = jiffies + bp->current_interval;
6789 bp->timer.data = (unsigned long) bp;
6790 bp->timer.function = bnx2x_timer;
6796 /****************************************************************************
6797 * General service functions
6798 ****************************************************************************/
6800 /* called with rtnl_lock */
6801 static int bnx2x_open(struct net_device *dev)
6803 struct bnx2x *bp = netdev_priv(dev);
6805 netif_carrier_off(dev);
6807 bnx2x_set_power_state(bp, PCI_D0);
6809 if (!bnx2x_reset_is_done(bp)) {
6811 /* Reset MCP mail box sequence if there is on going
6816 /* If it's the first function to load and reset done
6817 * is still not cleared it may mean that. We don't
6818 * check the attention state here because it may have
6819 * already been cleared by a "common" reset but we
6820 * shell proceed with "process kill" anyway.
6822 if ((bnx2x_get_load_cnt(bp) == 0) &&
6823 bnx2x_trylock_hw_lock(bp,
6824 HW_LOCK_RESOURCE_RESERVED_08) &&
6825 (!bnx2x_leader_reset(bp))) {
6826 DP(NETIF_MSG_HW, "Recovered in open\n");
6830 bnx2x_set_power_state(bp, PCI_D3hot);
6832 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6833 " completed yet. Try again later. If u still see this"
6834 " message after a few retries then power cycle is"
6835 " required.\n", bp->dev->name);
6841 bp->recovery_state = BNX2X_RECOVERY_DONE;
6843 return bnx2x_nic_load(bp, LOAD_OPEN);
6846 /* called with rtnl_lock */
6847 static int bnx2x_close(struct net_device *dev)
6849 struct bnx2x *bp = netdev_priv(dev);
6851 /* Unload the driver, release IRQs */
6852 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6853 bnx2x_set_power_state(bp, PCI_D3hot);
6858 /* called with netif_tx_lock from dev_mcast.c */
6859 void bnx2x_set_rx_mode(struct net_device *dev)
6861 struct bnx2x *bp = netdev_priv(dev);
6862 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6863 int port = BP_PORT(bp);
6865 if (bp->state != BNX2X_STATE_OPEN) {
6866 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6870 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6872 if (dev->flags & IFF_PROMISC)
6873 rx_mode = BNX2X_RX_MODE_PROMISC;
6875 else if ((dev->flags & IFF_ALLMULTI) ||
6876 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6878 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6880 else { /* some multicasts */
6881 if (CHIP_IS_E1(bp)) {
6883 struct netdev_hw_addr *ha;
6884 struct mac_configuration_cmd *config =
6885 bnx2x_sp(bp, mcast_config);
6888 netdev_for_each_mc_addr(ha, dev) {
6889 config->config_table[i].
6890 cam_entry.msb_mac_addr =
6891 swab16(*(u16 *)&ha->addr[0]);
6892 config->config_table[i].
6893 cam_entry.middle_mac_addr =
6894 swab16(*(u16 *)&ha->addr[2]);
6895 config->config_table[i].
6896 cam_entry.lsb_mac_addr =
6897 swab16(*(u16 *)&ha->addr[4]);
6898 config->config_table[i].cam_entry.flags =
6900 config->config_table[i].
6901 target_table_entry.flags = 0;
6902 config->config_table[i].target_table_entry.
6903 clients_bit_vector =
6904 cpu_to_le32(1 << BP_L_ID(bp));
6905 config->config_table[i].
6906 target_table_entry.vlan_id = 0;
6909 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6910 config->config_table[i].
6911 cam_entry.msb_mac_addr,
6912 config->config_table[i].
6913 cam_entry.middle_mac_addr,
6914 config->config_table[i].
6915 cam_entry.lsb_mac_addr);
6918 old = config->hdr.length;
6920 for (; i < old; i++) {
6921 if (CAM_IS_INVALID(config->
6923 /* already invalidated */
6927 CAM_INVALIDATE(config->
6932 if (CHIP_REV_IS_SLOW(bp))
6933 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6935 offset = BNX2X_MAX_MULTICAST*(1 + port);
6937 config->hdr.length = i;
6938 config->hdr.offset = offset;
6939 config->hdr.client_id = bp->fp->cl_id;
6940 config->hdr.reserved1 = 0;
6942 bp->set_mac_pending++;
6945 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6946 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6947 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6950 /* Accept one or more multicasts */
6951 struct netdev_hw_addr *ha;
6952 u32 mc_filter[MC_HASH_SIZE];
6953 u32 crc, bit, regidx;
6956 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6958 netdev_for_each_mc_addr(ha, dev) {
6959 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6962 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6963 bit = (crc >> 24) & 0xff;
6966 mc_filter[regidx] |= (1 << bit);
6969 for (i = 0; i < MC_HASH_SIZE; i++)
6970 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6975 bp->rx_mode = rx_mode;
6976 bnx2x_set_storm_rx_mode(bp);
6980 /* called with rtnl_lock */
6981 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6982 int devad, u16 addr)
6984 struct bnx2x *bp = netdev_priv(netdev);
6987 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6989 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6990 prtad, devad, addr);
6992 if (prtad != bp->mdio.prtad) {
6993 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6994 prtad, bp->mdio.prtad);
6998 /* The HW expects different devad if CL22 is used */
6999 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7001 bnx2x_acquire_phy_lock(bp);
7002 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
7003 devad, addr, &value);
7004 bnx2x_release_phy_lock(bp);
7005 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
7012 /* called with rtnl_lock */
7013 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7014 u16 addr, u16 value)
7016 struct bnx2x *bp = netdev_priv(netdev);
7017 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7020 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7021 " value 0x%x\n", prtad, devad, addr, value);
7023 if (prtad != bp->mdio.prtad) {
7024 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7025 prtad, bp->mdio.prtad);
7029 /* The HW expects different devad if CL22 is used */
7030 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7032 bnx2x_acquire_phy_lock(bp);
7033 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
7034 devad, addr, value);
7035 bnx2x_release_phy_lock(bp);
7039 /* called with rtnl_lock */
7040 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7042 struct bnx2x *bp = netdev_priv(dev);
7043 struct mii_ioctl_data *mdio = if_mii(ifr);
7045 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7046 mdio->phy_id, mdio->reg_num, mdio->val_in);
7048 if (!netif_running(dev))
7051 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
7054 #ifdef CONFIG_NET_POLL_CONTROLLER
7055 static void poll_bnx2x(struct net_device *dev)
7057 struct bnx2x *bp = netdev_priv(dev);
7059 disable_irq(bp->pdev->irq);
7060 bnx2x_interrupt(bp->pdev->irq, dev);
7061 enable_irq(bp->pdev->irq);
7065 static const struct net_device_ops bnx2x_netdev_ops = {
7066 .ndo_open = bnx2x_open,
7067 .ndo_stop = bnx2x_close,
7068 .ndo_start_xmit = bnx2x_start_xmit,
7069 .ndo_set_multicast_list = bnx2x_set_rx_mode,
7070 .ndo_set_mac_address = bnx2x_change_mac_addr,
7071 .ndo_validate_addr = eth_validate_addr,
7072 .ndo_do_ioctl = bnx2x_ioctl,
7073 .ndo_change_mtu = bnx2x_change_mtu,
7074 .ndo_tx_timeout = bnx2x_tx_timeout,
7076 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
7078 #ifdef CONFIG_NET_POLL_CONTROLLER
7079 .ndo_poll_controller = poll_bnx2x,
7083 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7084 struct net_device *dev)
7089 SET_NETDEV_DEV(dev, &pdev->dev);
7090 bp = netdev_priv(dev);
7095 bp->func = PCI_FUNC(pdev->devfn);
7097 rc = pci_enable_device(pdev);
7099 dev_err(&bp->pdev->dev,
7100 "Cannot enable PCI device, aborting\n");
7104 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7105 dev_err(&bp->pdev->dev,
7106 "Cannot find PCI device base address, aborting\n");
7108 goto err_out_disable;
7111 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7112 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7113 " base address, aborting\n");
7115 goto err_out_disable;
7118 if (atomic_read(&pdev->enable_cnt) == 1) {
7119 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7121 dev_err(&bp->pdev->dev,
7122 "Cannot obtain PCI resources, aborting\n");
7123 goto err_out_disable;
7126 pci_set_master(pdev);
7127 pci_save_state(pdev);
7130 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7131 if (bp->pm_cap == 0) {
7132 dev_err(&bp->pdev->dev,
7133 "Cannot find power management capability, aborting\n");
7135 goto err_out_release;
7138 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7139 if (bp->pcie_cap == 0) {
7140 dev_err(&bp->pdev->dev,
7141 "Cannot find PCI Express capability, aborting\n");
7143 goto err_out_release;
7146 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
7147 bp->flags |= USING_DAC_FLAG;
7148 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
7149 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7150 " failed, aborting\n");
7152 goto err_out_release;
7155 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7156 dev_err(&bp->pdev->dev,
7157 "System does not support DMA, aborting\n");
7159 goto err_out_release;
7162 dev->mem_start = pci_resource_start(pdev, 0);
7163 dev->base_addr = dev->mem_start;
7164 dev->mem_end = pci_resource_end(pdev, 0);
7166 dev->irq = pdev->irq;
7168 bp->regview = pci_ioremap_bar(pdev, 0);
7170 dev_err(&bp->pdev->dev,
7171 "Cannot map register space, aborting\n");
7173 goto err_out_release;
7176 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7177 min_t(u64, BNX2X_DB_SIZE,
7178 pci_resource_len(pdev, 2)));
7179 if (!bp->doorbells) {
7180 dev_err(&bp->pdev->dev,
7181 "Cannot map doorbell space, aborting\n");
7186 bnx2x_set_power_state(bp, PCI_D0);
7188 /* clean indirect addresses */
7189 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7190 PCICFG_VENDOR_ID_OFFSET);
7191 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7192 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7193 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7194 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
7196 /* Reset the load counter */
7197 bnx2x_clear_load_cnt(bp);
7199 dev->watchdog_timeo = TX_TIMEOUT;
7201 dev->netdev_ops = &bnx2x_netdev_ops;
7202 bnx2x_set_ethtool_ops(dev);
7203 dev->features |= NETIF_F_SG;
7204 dev->features |= NETIF_F_HW_CSUM;
7205 if (bp->flags & USING_DAC_FLAG)
7206 dev->features |= NETIF_F_HIGHDMA;
7207 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7208 dev->features |= NETIF_F_TSO6;
7210 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7211 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7213 dev->vlan_features |= NETIF_F_SG;
7214 dev->vlan_features |= NETIF_F_HW_CSUM;
7215 if (bp->flags & USING_DAC_FLAG)
7216 dev->vlan_features |= NETIF_F_HIGHDMA;
7217 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7218 dev->vlan_features |= NETIF_F_TSO6;
7221 /* get_port_hwinfo() will set prtad and mmds properly */
7222 bp->mdio.prtad = MDIO_PRTAD_NONE;
7224 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7226 bp->mdio.mdio_read = bnx2x_mdio_read;
7227 bp->mdio.mdio_write = bnx2x_mdio_write;
7233 iounmap(bp->regview);
7236 if (bp->doorbells) {
7237 iounmap(bp->doorbells);
7238 bp->doorbells = NULL;
7242 if (atomic_read(&pdev->enable_cnt) == 1)
7243 pci_release_regions(pdev);
7246 pci_disable_device(pdev);
7247 pci_set_drvdata(pdev, NULL);
7253 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7254 int *width, int *speed)
7256 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7258 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7260 /* return value of 1=2.5GHz 2=5GHz */
7261 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7264 static int bnx2x_check_firmware(struct bnx2x *bp)
7266 const struct firmware *firmware = bp->firmware;
7267 struct bnx2x_fw_file_hdr *fw_hdr;
7268 struct bnx2x_fw_file_section *sections;
7269 u32 offset, len, num_ops;
7274 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7277 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7278 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7280 /* Make sure none of the offsets and sizes make us read beyond
7281 * the end of the firmware data */
7282 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7283 offset = be32_to_cpu(sections[i].offset);
7284 len = be32_to_cpu(sections[i].len);
7285 if (offset + len > firmware->size) {
7286 dev_err(&bp->pdev->dev,
7287 "Section %d length is out of bounds\n", i);
7292 /* Likewise for the init_ops offsets */
7293 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7294 ops_offsets = (u16 *)(firmware->data + offset);
7295 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7297 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7298 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7299 dev_err(&bp->pdev->dev,
7300 "Section offset %d is out of bounds\n", i);
7305 /* Check FW version */
7306 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7307 fw_ver = firmware->data + offset;
7308 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7309 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7310 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7311 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7312 dev_err(&bp->pdev->dev,
7313 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7314 fw_ver[0], fw_ver[1], fw_ver[2],
7315 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7316 BCM_5710_FW_MINOR_VERSION,
7317 BCM_5710_FW_REVISION_VERSION,
7318 BCM_5710_FW_ENGINEERING_VERSION);
7325 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7327 const __be32 *source = (const __be32 *)_source;
7328 u32 *target = (u32 *)_target;
7331 for (i = 0; i < n/4; i++)
7332 target[i] = be32_to_cpu(source[i]);
7336 Ops array is stored in the following format:
7337 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7339 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7341 const __be32 *source = (const __be32 *)_source;
7342 struct raw_op *target = (struct raw_op *)_target;
7345 for (i = 0, j = 0; i < n/8; i++, j += 2) {
7346 tmp = be32_to_cpu(source[j]);
7347 target[i].op = (tmp >> 24) & 0xff;
7348 target[i].offset = tmp & 0xffffff;
7349 target[i].raw_data = be32_to_cpu(source[j + 1]);
7353 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7355 const __be16 *source = (const __be16 *)_source;
7356 u16 *target = (u16 *)_target;
7359 for (i = 0; i < n/2; i++)
7360 target[i] = be16_to_cpu(source[i]);
7363 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7365 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7366 bp->arr = kmalloc(len, GFP_KERNEL); \
7368 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7371 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7372 (u8 *)bp->arr, len); \
7375 int bnx2x_init_firmware(struct bnx2x *bp)
7377 const char *fw_file_name;
7378 struct bnx2x_fw_file_hdr *fw_hdr;
7382 fw_file_name = FW_FILE_NAME_E1;
7383 else if (CHIP_IS_E1H(bp))
7384 fw_file_name = FW_FILE_NAME_E1H;
7386 BNX2X_ERR("Unsupported chip revision\n");
7390 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
7392 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
7394 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
7395 goto request_firmware_exit;
7398 rc = bnx2x_check_firmware(bp);
7400 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
7401 goto request_firmware_exit;
7404 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7406 /* Initialize the pointers to the init arrays */
7408 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7411 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7414 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7417 /* STORMs firmware */
7418 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7419 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7420 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7421 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7422 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7423 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7424 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7425 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7426 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7427 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7428 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7429 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7430 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7431 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7432 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7433 be32_to_cpu(fw_hdr->csem_pram_data.offset);
7437 init_offsets_alloc_err:
7438 kfree(bp->init_ops);
7440 kfree(bp->init_data);
7441 request_firmware_exit:
7442 release_firmware(bp->firmware);
7448 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7449 const struct pci_device_id *ent)
7451 struct net_device *dev = NULL;
7453 int pcie_width, pcie_speed;
7456 /* dev zeroed in init_etherdev */
7457 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7459 dev_err(&pdev->dev, "Cannot allocate net device\n");
7463 bp = netdev_priv(dev);
7464 bp->msg_enable = debug;
7466 pci_set_drvdata(pdev, dev);
7468 rc = bnx2x_init_dev(pdev, dev);
7474 rc = bnx2x_init_bp(bp);
7478 rc = register_netdev(dev);
7480 dev_err(&pdev->dev, "Cannot register net device\n");
7484 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7485 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7486 " IRQ %d, ", board_info[ent->driver_data].name,
7487 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7488 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7489 dev->base_addr, bp->pdev->irq);
7490 pr_cont("node addr %pM\n", dev->dev_addr);
7496 iounmap(bp->regview);
7499 iounmap(bp->doorbells);
7503 if (atomic_read(&pdev->enable_cnt) == 1)
7504 pci_release_regions(pdev);
7506 pci_disable_device(pdev);
7507 pci_set_drvdata(pdev, NULL);
7512 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7514 struct net_device *dev = pci_get_drvdata(pdev);
7518 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7521 bp = netdev_priv(dev);
7523 unregister_netdev(dev);
7525 /* Make sure RESET task is not scheduled before continuing */
7526 cancel_delayed_work_sync(&bp->reset_task);
7529 iounmap(bp->regview);
7532 iounmap(bp->doorbells);
7536 if (atomic_read(&pdev->enable_cnt) == 1)
7537 pci_release_regions(pdev);
7539 pci_disable_device(pdev);
7540 pci_set_drvdata(pdev, NULL);
7543 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7547 bp->state = BNX2X_STATE_ERROR;
7549 bp->rx_mode = BNX2X_RX_MODE_NONE;
7551 bnx2x_netif_stop(bp, 0);
7552 netif_carrier_off(bp->dev);
7554 del_timer_sync(&bp->timer);
7555 bp->stats_state = STATS_STATE_DISABLED;
7556 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7559 bnx2x_free_irq(bp, false);
7561 if (CHIP_IS_E1(bp)) {
7562 struct mac_configuration_cmd *config =
7563 bnx2x_sp(bp, mcast_config);
7565 for (i = 0; i < config->hdr.length; i++)
7566 CAM_INVALIDATE(config->config_table[i]);
7569 /* Free SKBs, SGEs, TPA pool and driver internals */
7570 bnx2x_free_skbs(bp);
7571 for_each_queue(bp, i)
7572 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7573 for_each_queue(bp, i)
7574 netif_napi_del(&bnx2x_fp(bp, i, napi));
7577 bp->state = BNX2X_STATE_CLOSED;
7582 static void bnx2x_eeh_recover(struct bnx2x *bp)
7586 mutex_init(&bp->port.phy_mutex);
7588 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7589 bp->link_params.shmem_base = bp->common.shmem_base;
7590 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7592 if (!bp->common.shmem_base ||
7593 (bp->common.shmem_base < 0xA0000) ||
7594 (bp->common.shmem_base >= 0xC0000)) {
7595 BNX2X_DEV_INFO("MCP not active\n");
7596 bp->flags |= NO_MCP_FLAG;
7600 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7601 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7602 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7603 BNX2X_ERR("BAD MCP validity signature\n");
7605 if (!BP_NOMCP(bp)) {
7606 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7607 & DRV_MSG_SEQ_NUMBER_MASK);
7608 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7613 * bnx2x_io_error_detected - called when PCI error is detected
7614 * @pdev: Pointer to PCI device
7615 * @state: The current pci connection state
7617 * This function is called after a PCI bus error affecting
7618 * this device has been detected.
7620 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7621 pci_channel_state_t state)
7623 struct net_device *dev = pci_get_drvdata(pdev);
7624 struct bnx2x *bp = netdev_priv(dev);
7628 netif_device_detach(dev);
7630 if (state == pci_channel_io_perm_failure) {
7632 return PCI_ERS_RESULT_DISCONNECT;
7635 if (netif_running(dev))
7636 bnx2x_eeh_nic_unload(bp);
7638 pci_disable_device(pdev);
7642 /* Request a slot reset */
7643 return PCI_ERS_RESULT_NEED_RESET;
7647 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7648 * @pdev: Pointer to PCI device
7650 * Restart the card from scratch, as if from a cold-boot.
7652 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7654 struct net_device *dev = pci_get_drvdata(pdev);
7655 struct bnx2x *bp = netdev_priv(dev);
7659 if (pci_enable_device(pdev)) {
7661 "Cannot re-enable PCI device after reset\n");
7663 return PCI_ERS_RESULT_DISCONNECT;
7666 pci_set_master(pdev);
7667 pci_restore_state(pdev);
7669 if (netif_running(dev))
7670 bnx2x_set_power_state(bp, PCI_D0);
7674 return PCI_ERS_RESULT_RECOVERED;
7678 * bnx2x_io_resume - called when traffic can start flowing again
7679 * @pdev: Pointer to PCI device
7681 * This callback is called when the error recovery driver tells us that
7682 * its OK to resume normal operation.
7684 static void bnx2x_io_resume(struct pci_dev *pdev)
7686 struct net_device *dev = pci_get_drvdata(pdev);
7687 struct bnx2x *bp = netdev_priv(dev);
7689 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7690 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7696 bnx2x_eeh_recover(bp);
7698 if (netif_running(dev))
7699 bnx2x_nic_load(bp, LOAD_NORMAL);
7701 netif_device_attach(dev);
7706 static struct pci_error_handlers bnx2x_err_handler = {
7707 .error_detected = bnx2x_io_error_detected,
7708 .slot_reset = bnx2x_io_slot_reset,
7709 .resume = bnx2x_io_resume,
7712 static struct pci_driver bnx2x_pci_driver = {
7713 .name = DRV_MODULE_NAME,
7714 .id_table = bnx2x_pci_tbl,
7715 .probe = bnx2x_init_one,
7716 .remove = __devexit_p(bnx2x_remove_one),
7717 .suspend = bnx2x_suspend,
7718 .resume = bnx2x_resume,
7719 .err_handler = &bnx2x_err_handler,
7722 static int __init bnx2x_init(void)
7726 pr_info("%s", version);
7728 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7729 if (bnx2x_wq == NULL) {
7730 pr_err("Cannot create workqueue\n");
7734 ret = pci_register_driver(&bnx2x_pci_driver);
7736 pr_err("Cannot register driver\n");
7737 destroy_workqueue(bnx2x_wq);
7742 static void __exit bnx2x_cleanup(void)
7744 pci_unregister_driver(&bnx2x_pci_driver);
7746 destroy_workqueue(bnx2x_wq);
7749 module_init(bnx2x_init);
7750 module_exit(bnx2x_cleanup);
7754 /* count denotes the number of new completions we have seen */
7755 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7757 struct eth_spe *spe;
7759 #ifdef BNX2X_STOP_ON_ERROR
7760 if (unlikely(bp->panic))
7764 spin_lock_bh(&bp->spq_lock);
7765 bp->cnic_spq_pending -= count;
7767 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7768 bp->cnic_spq_pending++) {
7770 if (!bp->cnic_kwq_pending)
7773 spe = bnx2x_sp_get_next(bp);
7774 *spe = *bp->cnic_kwq_cons;
7776 bp->cnic_kwq_pending--;
7778 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7779 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7781 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7782 bp->cnic_kwq_cons = bp->cnic_kwq;
7784 bp->cnic_kwq_cons++;
7786 bnx2x_sp_prod_update(bp);
7787 spin_unlock_bh(&bp->spq_lock);
7790 static int bnx2x_cnic_sp_queue(struct net_device *dev,
7791 struct kwqe_16 *kwqes[], u32 count)
7793 struct bnx2x *bp = netdev_priv(dev);
7796 #ifdef BNX2X_STOP_ON_ERROR
7797 if (unlikely(bp->panic))
7801 spin_lock_bh(&bp->spq_lock);
7803 for (i = 0; i < count; i++) {
7804 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7806 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7809 *bp->cnic_kwq_prod = *spe;
7811 bp->cnic_kwq_pending++;
7813 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7814 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7815 spe->data.mac_config_addr.hi,
7816 spe->data.mac_config_addr.lo,
7817 bp->cnic_kwq_pending);
7819 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7820 bp->cnic_kwq_prod = bp->cnic_kwq;
7822 bp->cnic_kwq_prod++;
7825 spin_unlock_bh(&bp->spq_lock);
7827 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7828 bnx2x_cnic_sp_post(bp, 0);
7833 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7835 struct cnic_ops *c_ops;
7838 mutex_lock(&bp->cnic_mutex);
7839 c_ops = bp->cnic_ops;
7841 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7842 mutex_unlock(&bp->cnic_mutex);
7847 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7849 struct cnic_ops *c_ops;
7853 c_ops = rcu_dereference(bp->cnic_ops);
7855 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7862 * for commands that have no data
7864 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7866 struct cnic_ctl_info ctl = {0};
7870 return bnx2x_cnic_ctl_send(bp, &ctl);
7873 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7875 struct cnic_ctl_info ctl;
7877 /* first we tell CNIC and only then we count this as a completion */
7878 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7879 ctl.data.comp.cid = cid;
7881 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7882 bnx2x_cnic_sp_post(bp, 1);
7885 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7887 struct bnx2x *bp = netdev_priv(dev);
7891 case DRV_CTL_CTXTBL_WR_CMD: {
7892 u32 index = ctl->data.io.offset;
7893 dma_addr_t addr = ctl->data.io.dma_addr;
7895 bnx2x_ilt_wr(bp, index, addr);
7899 case DRV_CTL_COMPLETION_CMD: {
7900 int count = ctl->data.comp.comp_count;
7902 bnx2x_cnic_sp_post(bp, count);
7906 /* rtnl_lock is held. */
7907 case DRV_CTL_START_L2_CMD: {
7908 u32 cli = ctl->data.ring.client_id;
7910 bp->rx_mode_cl_mask |= (1 << cli);
7911 bnx2x_set_storm_rx_mode(bp);
7915 /* rtnl_lock is held. */
7916 case DRV_CTL_STOP_L2_CMD: {
7917 u32 cli = ctl->data.ring.client_id;
7919 bp->rx_mode_cl_mask &= ~(1 << cli);
7920 bnx2x_set_storm_rx_mode(bp);
7925 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7932 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7934 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7936 if (bp->flags & USING_MSIX_FLAG) {
7937 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7938 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7939 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7941 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7942 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7944 cp->irq_arr[0].status_blk = bp->cnic_sb;
7945 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7946 cp->irq_arr[1].status_blk = bp->def_status_blk;
7947 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7952 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7955 struct bnx2x *bp = netdev_priv(dev);
7956 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7961 if (atomic_read(&bp->intr_sem) != 0)
7964 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7968 bp->cnic_kwq_cons = bp->cnic_kwq;
7969 bp->cnic_kwq_prod = bp->cnic_kwq;
7970 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7972 bp->cnic_spq_pending = 0;
7973 bp->cnic_kwq_pending = 0;
7975 bp->cnic_data = data;
7978 cp->drv_state = CNIC_DRV_STATE_REGD;
7980 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7982 bnx2x_setup_cnic_irq_info(bp);
7983 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7984 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7985 rcu_assign_pointer(bp->cnic_ops, ops);
7990 static int bnx2x_unregister_cnic(struct net_device *dev)
7992 struct bnx2x *bp = netdev_priv(dev);
7993 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7995 mutex_lock(&bp->cnic_mutex);
7996 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7997 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7998 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8001 rcu_assign_pointer(bp->cnic_ops, NULL);
8002 mutex_unlock(&bp->cnic_mutex);
8004 kfree(bp->cnic_kwq);
8005 bp->cnic_kwq = NULL;
8010 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8012 struct bnx2x *bp = netdev_priv(dev);
8013 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8015 cp->drv_owner = THIS_MODULE;
8016 cp->chip_id = CHIP_ID(bp);
8017 cp->pdev = bp->pdev;
8018 cp->io_base = bp->regview;
8019 cp->io_base2 = bp->doorbells;
8020 cp->max_kwqe_pending = 8;
8021 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
8022 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8023 cp->ctx_tbl_len = CNIC_ILT_LINES;
8024 cp->starting_cid = BCM_CNIC_CID_START;
8025 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8026 cp->drv_ctl = bnx2x_drv_ctl;
8027 cp->drv_register_cnic = bnx2x_register_cnic;
8028 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8032 EXPORT_SYMBOL(bnx2x_cnic_probe);
8034 #endif /* BCM_CNIC */